I can't get normal depth map from disparity.
Here is my code:
#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/contrib/contrib.hpp"
#include <cstdio>
#include <iostream>
#include <fstream>
using namespace cv;
using namespace std;
ofstream out("points.txt");
int main()
{
Mat img1, img2;
img1 = imread("images/im7rect.bmp");
img2 = imread("images/im8rect.bmp");
//resize(img1, img1, Size(320, 280));
//resize(img2, img2, Size(320, 280));
Mat g1,g2, disp, disp8;
cvtColor(img1, g1, CV_BGR2GRAY);
cvtColor(img2, g2, CV_BGR2GRAY);
int sadSize = 3;
StereoSGBM sbm;
sbm.SADWindowSize = sadSize;
sbm.numberOfDisparities = 144;//144; 128
sbm.preFilterCap = 10; //63
sbm.minDisparity = 0; //-39; 0
sbm.uniquenessRatio = 10;
sbm.speckleWindowSize = 100;
sbm.speckleRange = 32;
sbm.disp12MaxDiff = 1;
sbm.fullDP = true;
sbm.P1 = sadSize*sadSize*4;
sbm.P2 = sadSize*sadSize*32;
sbm(g1, g2, disp);
normalize(disp, disp8, 0, 255, CV_MINMAX, CV_8U);
Mat dispSGBMscale;
disp.convertTo(dispSGBMscale,CV_32F, 1./16);
imshow("image", img1);
imshow("disparity", disp8);
Mat Q;
FileStorage fs("Q.txt", FileStorage::READ);
fs["Q"] >> Q;
fs.release();
Mat points, points1;
//reprojectImageTo3D(disp, points, Q, true);
reprojectImageTo3D(disp, points, Q, false, CV_32F);
imshow("points", points);
ofstream point_cloud_file;
point_cloud_file.open ("point_cloud.xyz");
for(int i = 0; i < points.rows; i++) {
for(int j = 0; j < points.cols; j++) {
Vec3f point = points.at<Vec3f>(i,j);
if(point[2] < 10) {
point_cloud_file << point[0] << " " << point[1] << " " << point[2]
<< " " << static_cast<unsigned>(img1.at<uchar>(i,j)) << " " << static_cast<unsigned>(img1.at<uchar>(i,j)) << " " << static_cast<unsigned>(img1.at<uchar>(i,j)) << endl;
}
}
}
point_cloud_file.close();
waitKey(0);
return 0;
}
My images are:
Disparity map:
I get smth like this point cloud:
Q is equal:
[ 1., 0., 0., -3.2883545303344727e+02, 0., 1., 0.,
-2.3697290992736816e+02, 0., 0., 0., 5.4497170185417110e+02, 0.,
0., -1.4446083962336606e-02, 0. ]
I tried many other things. I tried with different images, but no one is able to get normal depth map.
What am I doing wrong? Should I do with reprojectImageTo3D or use other approach instead of it? What is the best way to vizualize depth map? (I tried point_cloud library)
Or could you provide me the working example with dataset and calibration info, that I could run it and get depth map. Or how can I get depth_map from middlebury stereo database (http://vision.middlebury.edu/stereo/data/), I think there isn't enough calibration info.
Edited:
Now I get smth like :
It is of course better, but still not normal.
Edited2:
I tried what you say:
Mat disp;
disp = imread("disparity-image.pgm", CV_LOAD_IMAGE_GRAYSCALE);
Mat disp64;
disp.convertTo(disp64,CV_64F, 1.0/16.0);
imshow("disp", disp);
I get this result with line cv::minMaxIdx(...) :
And this when I comment this line:
Ps: Also please could you tell me how can I calculate depth knowing only baseline and focal length in pixels.
I have made a simple comparison between OpenCV's reprojectImageTo3D() and my own (see below), and also run a test for a correct disparity and Q matrix.
// Reproject image to 3D
void customReproject(const cv::Mat& disparity, const cv::Mat& Q, cv::Mat& out3D)
{
CV_Assert(disparity.type() == CV_32F && !disparity.empty());
CV_Assert(Q.type() == CV_32F && Q.cols == 4 && Q.rows == 4);
// 3-channel matrix for containing the reprojected 3D world coordinates
out3D = cv::Mat::zeros(disparity.size(), CV_32FC3);
// Getting the interesting parameters from Q, everything else is zero or one
float Q03 = Q.at<float>(0, 3);
float Q13 = Q.at<float>(1, 3);
float Q23 = Q.at<float>(2, 3);
float Q32 = Q.at<float>(3, 2);
float Q33 = Q.at<float>(3, 3);
// Transforming a single-channel disparity map to a 3-channel image representing a 3D surface
for (int i = 0; i < disparity.rows; i++)
{
const float* disp_ptr = disparity.ptr<float>(i);
cv::Vec3f* out3D_ptr = out3D.ptr<cv::Vec3f>(i);
for (int j = 0; j < disparity.cols; j++)
{
const float pw = 1.0f / (disp_ptr[j] * Q32 + Q33);
cv::Vec3f& point = out3D_ptr[j];
point[0] = (static_cast<float>(j)+Q03) * pw;
point[1] = (static_cast<float>(i)+Q13) * pw;
point[2] = Q23 * pw;
}
}
}
Almost the same results were produced by both of the methods and they all seem correct to me. Would you please try it on your disparity map and Q matrix? You can have my test environment on my GitHub.
Note 1: also take care to do not scale twice the disparity (comment out the line disparity.convertTo(disparity, CV_32F, 1.0 / 16.0); if your disparity was also scaled.)
Note 2: it was built with OpenCV 3.0, you may have to change the includes.
Related
I am working in a robot application, in which I have a camera fixed to a robot gripper. In order to calculate the matrix transformation between the camera and the gripper Hcg I am using the calibrateHandEye new function provided in the OpenCV version 4.1.0
I had taken 10 pictures of the chessboard from the camera mounted in the gripper and at the same time I recorded the robot position.
The code I am working on:
// My_handeye.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <iostream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdio>
#include "pch.h"
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace std;
Mat eulerAnglesToRotationMatrix(Vec3f &theta);
Vec3f rotationMatrixToEulerAngles(Mat &R);
float rad2deg(float radian);
float deg2rad(float degree);
int main()
{
// Camera calibration information
std::vector<double> distortionCoefficients(5); // camera distortion
distortionCoefficients[0] = 2.4472856611074989e-01;
distortionCoefficients[1] = -8.1042032574246325e-01;
distortionCoefficients[2] = 0;
distortionCoefficients[3] = 0;
distortionCoefficients[4] = 7.8769462320821060e-01;
double f_x = 1.3624172121852105e+03; // Focal length in x axis
double f_y = 1.3624172121852105e+03; // Focal length in y axis (usually the same?)
double c_x = 960; // Camera primary point x
double c_y = 540; // Camera primary point y
cv::Mat cameraMatrix(3, 3, CV_32FC1);
cameraMatrix.at<float>(0, 0) = f_x;
cameraMatrix.at<float>(0, 1) = 0.0;
cameraMatrix.at<float>(0, 2) = c_x;
cameraMatrix.at<float>(1, 0) = 0.0;
cameraMatrix.at<float>(1, 1) = f_y;
cameraMatrix.at<float>(1, 2) = c_y;
cameraMatrix.at<float>(2, 0) = 0.0;
cameraMatrix.at<float>(2, 1) = 0.0;
cameraMatrix.at<float>(2, 2) = 1.0;
Mat rvec(3, 1, CV_32F), tvec(3, 1, CV_32F);
//
std::vector<Mat> R_gripper2base;
std::vector<Mat> t_gripper2base;
std::vector<Mat> R_target2cam;
std::vector<Mat> t_target2cam;
Mat R_cam2gripper = (Mat_<float>(3, 3));
Mat t_cam2gripper = (Mat_<float>(3, 1));
vector<String> fn;
glob("images/*.bmp", fn, false);
vector<Mat> images;
size_t num_images = fn.size(); //number of bmp files in images folder
Size patternsize(6, 8); //number of centers
vector<Point2f> centers; //this will be filled by the detected centers
float cell_size = 30;
vector<Point3f> obj_points;
R_gripper2base.reserve(num_images);
t_gripper2base.reserve(num_images);
R_target2cam.reserve(num_images);
t_target2cam.reserve(num_images);
for (int i = 0; i < patternsize.height; ++i)
for (int j = 0; j < patternsize.width; ++j)
obj_points.push_back(Point3f(float(j*cell_size),
float(i*cell_size), 0.f));
for (size_t i = 0; i < num_images; i++)
images.push_back(imread(fn[i]));
Mat frame;
for (size_t i = 0; i < num_images; i++)
{
frame = imread(fn[i]); //source image
bool patternfound = findChessboardCorners(frame, patternsize, centers);
if (patternfound)
{
drawChessboardCorners(frame, patternsize, Mat(centers), patternfound);
//imshow("window", frame);
//int key = cv::waitKey(0) & 0xff;
solvePnP(Mat(obj_points), Mat(centers), cameraMatrix, distortionCoefficients, rvec, tvec);
Mat R;
Rodrigues(rvec, R); // R is 3x3
R_target2cam.push_back(R);
t_target2cam.push_back(tvec);
Mat T = Mat::eye(4, 4, R.type()); // T is 4x4
T(Range(0, 3), Range(0, 3)) = R * 1; // copies R into T
T(Range(0, 3), Range(3, 4)) = tvec * 1; // copies tvec into T
cout << "T = " << endl << " " << T << endl << endl;
}
cout << patternfound << endl;
}
Vec3f theta_01{ deg2rad(-153.61), deg2rad(8.3), deg2rad(-91.91) };
Vec3f theta_02{ deg2rad(-166.71), deg2rad(3.04), deg2rad(-93.31) };
Vec3f theta_03{ deg2rad(-170.04), deg2rad(24.92), deg2rad(-88.29) };
Vec3f theta_04{ deg2rad(-165.71), deg2rad(24.68), deg2rad(-84.85) };
Vec3f theta_05{ deg2rad(-160.18), deg2rad(-15.94),deg2rad(-56.24) };
Vec3f theta_06{ deg2rad(175.68), deg2rad(10.95), deg2rad(180) };
Vec3f theta_07{ deg2rad(175.73), deg2rad(45.78), deg2rad(-179.92) };
Vec3f theta_08{ deg2rad(-165.34), deg2rad(47.37), deg2rad(-166.25) };
Vec3f theta_09{ deg2rad(-165.62), deg2rad(17.95), deg2rad(-166.17) };
Vec3f theta_10{ deg2rad(-151.99), deg2rad(-14.59),deg2rad(-94.19) };
Mat robot_rot_01 = eulerAnglesToRotationMatrix(theta_01);
Mat robot_rot_02 = eulerAnglesToRotationMatrix(theta_02);
Mat robot_rot_03 = eulerAnglesToRotationMatrix(theta_03);
Mat robot_rot_04 = eulerAnglesToRotationMatrix(theta_04);
Mat robot_rot_05 = eulerAnglesToRotationMatrix(theta_05);
Mat robot_rot_06 = eulerAnglesToRotationMatrix(theta_06);
Mat robot_rot_07 = eulerAnglesToRotationMatrix(theta_07);
Mat robot_rot_08 = eulerAnglesToRotationMatrix(theta_08);
Mat robot_rot_09 = eulerAnglesToRotationMatrix(theta_09);
Mat robot_rot_10 = eulerAnglesToRotationMatrix(theta_10);
const Mat robot_tr_01 = (Mat_<float>(3, 1) << 781.2, 338.59, 903.48);
const Mat robot_tr_02 = (Mat_<float>(3, 1) << 867.65, 382.52, 884.42);
const Mat robot_tr_03 = (Mat_<float>(3, 1) << 856.91, 172.99, 964.61);
const Mat robot_tr_04 = (Mat_<float>(3, 1) << 748.81, 146.75, 1043.29);
const Mat robot_tr_05 = (Mat_<float>(3, 1) << 627.66, 554.08, 920.85);
const Mat robot_tr_06 = (Mat_<float>(3, 1) << 715.06, 195.96, 889.38);
const Mat robot_tr_07 = (Mat_<float>(3, 1) << 790.9, 196.29, 1117.38);
const Mat robot_tr_08 = (Mat_<float>(3, 1) << 743.5, 283.93, 1131.92);
const Mat robot_tr_09 = (Mat_<float>(3, 1) << 748.9, 288.19, 910.58);
const Mat robot_tr_10 = (Mat_<float>(3, 1) << 813.18, 400.44, 917.16);
R_gripper2base.push_back(robot_rot_01);
R_gripper2base.push_back(robot_rot_02);
R_gripper2base.push_back(robot_rot_03);
R_gripper2base.push_back(robot_rot_04);
R_gripper2base.push_back(robot_rot_05);
R_gripper2base.push_back(robot_rot_06);
R_gripper2base.push_back(robot_rot_07);
R_gripper2base.push_back(robot_rot_08);
R_gripper2base.push_back(robot_rot_09);
R_gripper2base.push_back(robot_rot_10);
t_gripper2base.push_back(robot_tr_01);
t_gripper2base.push_back(robot_tr_02);
t_gripper2base.push_back(robot_tr_03);
t_gripper2base.push_back(robot_tr_04);
t_gripper2base.push_back(robot_tr_05);
t_gripper2base.push_back(robot_tr_06);
t_gripper2base.push_back(robot_tr_07);
t_gripper2base.push_back(robot_tr_08);
t_gripper2base.push_back(robot_tr_09);
t_gripper2base.push_back(robot_tr_10);
calibrateHandEye(R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, R_cam2gripper, t_cam2gripper, CALIB_HAND_EYE_TSAI);
Vec3f R_cam2gripper_r = rotationMatrixToEulerAngles(R_cam2gripper);
cout << "R_cam2gripper = " << endl << " " << R_cam2gripper << endl << endl;
cout << "R_cam2gripper_r = " << endl << " " << R_cam2gripper_r << endl << endl;
cout << "t_cam2gripper = " << endl << " " << t_cam2gripper << endl << endl;
}
Mat eulerAnglesToRotationMatrix(Vec3f &theta)
{
// Calculate rotation about x axis
Mat R_x = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cos(theta[0]), -sin(theta[0]),
0, sin(theta[0]), cos(theta[0])
);
// Calculate rotation about y axis
Mat R_y = (Mat_<double>(3, 3) <<
cos(theta[1]), 0, sin(theta[1]),
0, 1, 0,
-sin(theta[1]), 0, cos(theta[1])
);
// Calculate rotation about z axis
Mat R_z = (Mat_<double>(3, 3) <<
cos(theta[2]), -sin(theta[2]), 0,
sin(theta[2]), cos(theta[2]), 0,
0, 0, 1);
// Combined rotation matrix
Mat R = R_z * R_y * R_x;
return R;
}
float rad2deg(float radian) {
double pi = 3.14159;
return(radian * (180 / pi));
}
float deg2rad(float degree) {
double pi = 3.14159;
return(degree * (pi / 180));
}
// Checks if a matrix is a valid rotation matrix.
bool isRotationMatrix(Mat &R)
{
Mat Rt;
transpose(R, Rt);
Mat shouldBeIdentity = Rt * R;
Mat I = Mat::eye(3, 3, shouldBeIdentity.type());
return norm(I, shouldBeIdentity) < 1e-6;
}
// Calculates rotation matrix to euler angles
// The result is the same as MATLAB except the order
// of the euler angles ( x and z are swapped ).
Vec3f rotationMatrixToEulerAngles(Mat &R)
{
assert(isRotationMatrix(R));
float sy = sqrt(R.at<double>(0, 0) * R.at<double>(0, 0) + R.at<double>(1, 0) * R.at<double>(1, 0));
bool singular = sy < 1e-6; // If
float x, y, z;
if (!singular)
{
x = atan2(R.at<double>(2, 1), R.at<double>(2, 2));
y = atan2(-R.at<double>(2, 0), sy);
z = atan2(R.at<double>(1, 0), R.at<double>(0, 0));
}
else
{
x = atan2(-R.at<double>(1, 2), R.at<double>(1, 1));
y = atan2(-R.at<double>(2, 0), sy);
z = 0;
}
return Vec3f(x, y, z);
}
The result the function is giving me is the next one:
R_cam2gripper =
[0.3099803593003124, -0.8923086952824562, -0.3281727733547833;
0.7129271761196039, 0.4465219155360299, -0.5406967916458927;
0.6290047840821058, -0.0663579028402444, 0.7745641421680119]
R_cam2gripper_r =
[-0.0854626, -0.680272, 1.16065]
t_cam2gripper =
[-35.02063730299775;
-74.80633768251272;
-307.6725851251873]
I am getting 'good' results provided by other software. With them, the robot got to the exact points I am pointing in the camera (I have a 3D camera, from which I am getting the x, y, z from the camera world) so they are certainly correct, but I am having troubles to repeat the same result with the OpenCV function.
Sorry for the long introduction to my problem. Any understanding of why the solutions are not what is supposed to be? My guess is, that I have a problem understanding the angles or converting them but I couldn't find any way to solve this. Any hint will be well welcome!
I actually managed to solve this problem. The general idea was correct, but:
I was not understanding correctly the vector rotation notation the robot was giving. It was necessary to multiply the actual values by a factor.
I created a new program that extracts directly from the robot and the pictures the matrixes that the algorithm requires and writes these values to a YML file.
The CALIB_HAND_EYE_TSAI method wasn't giving me correct values. But with the four others, the values seem to converge to the actual values
Anyway, thank you for your help. I am stuck to get more precision in the algorithm, but that's for another question.
I am trying to build a test project to compare the openCv solvePnP implementation with the openGv one.
the opencv is detailed here:
https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#solvepnp
and the openGv here:
https://laurentkneip.github.io/opengv/page_how_to_use.html
Using the opencv example code, I am finding a chessboard in an image, and constructing the matching 3d points. i run the cv pnp, then set up the Gv solver. the cv pnp runs fine, and prints the values:
//rotation
-0.003040771263293328, 0.9797142824436152, -0.2003763421317906;
0.0623096853748876, 0.2001735322445355, 0.977777101438374]
//translation
[-12.06549797067309;
-9.533070368412945;
37.6825295047483]
I test by reprojecting the 3d points, and it looks good.
The Gv Pnp, however, prints nan for all values. i have tried to follow the example code, but I must be making a mistake somewhere. The code is:
int main(int argc, char **argv) {
cv::Mat matImg = cv::imread("chess.jpg");
cv::Size boardSize(8, 6);
//Construct the chessboard model
double squareSize = 2.80;
std::vector<cv::Point3f> objectPoints;
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
objectPoints.push_back(
cv::Point3f(double(j * squareSize), float(i * squareSize), 0));
}
}
cv::Mat rvec, tvec;
cv::Mat cameraMatrix, distCoeffs;
cv::FileStorage fs("CalibrationData.xml", cv::FileStorage::READ);
fs["cameraMatrix"] >> cameraMatrix;
fs["dist_coeffs"] >> distCoeffs;
//Found chessboard corners
std::vector<cv::Point2f> imagePoints;
bool found = cv::findChessboardCorners(matImg, boardSize, imagePoints, cv::CALIB_CB_FAST_CHECK);
if (found) {
cv::drawChessboardCorners(matImg, boardSize, cv::Mat(imagePoints), found);
//SolvePnP
cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
drawAxis(matImg, cameraMatrix, distCoeffs, rvec, tvec, squareSize);
}
//cv to matrix
cv::Mat R;
cv::Rodrigues(rvec, R);
std::cout << "results from cv:" << R << tvec << std::endl;
//START OPEN GV
//vars
bearingVectors_t bearingVectors;
points_t points;
rotation_t rotation;
//add points to the gv type
for (int i = 0; i < objectPoints.size(); ++i)
{
point_t pnt;
pnt.x() = objectPoints[i].x;
pnt.y() = objectPoints[i].y;
pnt.z() = objectPoints[i].z;
points.push_back(pnt);
}
/*
K is the common 3x3 camera matrix that you can compose with cx, cy, fx, and fy.
You put the image point into homogeneous form (append a 1),
multiply it with the inverse of K from the left, which gives you a normalized image point (a spatial direction vector).
You normalize that to norm 1.
*/
//to homogeneous
std::vector<cv::Point3f> imagePointsH;
convertPointsToHomogeneous(imagePoints, imagePointsH);
//multiply by K.Inv
for (int i = 0; i < imagePointsH.size(); i++)
{
cv::Point3f pt = imagePointsH[i];
cv::Mat ptMat(3, 1, cameraMatrix.type());
ptMat.at<double>(0, 0) = pt.x;
ptMat.at<double>(1, 0) = pt.y;
ptMat.at<double>(2, 0) = pt.z;
cv::Mat dstMat = cameraMatrix.inv() * ptMat;
//store as bearing vector
bearingVector_t bvec;
bvec.x() = dstMat.at<double>(0, 0);
bvec.y() = dstMat.at<double>(1, 0);
bvec.z() = dstMat.at<double>(2, 0);
bvec.normalize();
bearingVectors.push_back(bvec);
}
//create a central absolute adapter
absolute_pose::CentralAbsoluteAdapter adapter(
bearingVectors,
points,
rotation);
size_t iterations = 50;
std::cout << "running epnp (all correspondences)" << std::endl;
transformation_t epnp_transformation;
for (size_t i = 0; i < iterations; i++)
epnp_transformation = absolute_pose::epnp(adapter);
std::cout << "results from epnp algorithm:" << std::endl;
std::cout << epnp_transformation << std::endl << std::endl;
return 0;
}
Where am i going wrong in setting up the openGv Pnp solver?
Years later, i had this same issue, and solved it. To convert openCv to openGV bearing vectors, you can do this:
bearingVectors_t bearingVectors;
std::vector<cv::Point2f> dd2;
const int N1 = static_cast<int>(dd2.size());
cv::Mat points1_mat = cv::Mat(dd2).reshape(1);
// first rectify points and construct homogeneous points
// construct homogeneous points
cv::Mat ones_col1 = cv::Mat::ones(N1, 1, CV_32F);
cv::hconcat(points1_mat, ones_col1, points1_mat);
// undistort points
cv::Mat points1_rect = points1_mat * cameraMatrix.inv();
// compute bearings
points2bearings3(points1_rect, &bearingVectors);
using this function for the final conversion:
// Convert a set of points to bearing
// points Matrix of size Nx3 with the set of points.
// bearings Vector of bearings.
void points2bearings3(const cv::Mat& points,
opengv::bearingVectors_t* bearings) {
double l;
cv::Vec3f p;
opengv::bearingVector_t bearing;
for (int i = 0; i < points.rows; ++i) {
p = cv::Vec3f(points.row(i));
l = std::sqrt(p[0] * p[0] + p[1] * p[1] + p[2] * p[2]);
for (int j = 0; j < 3; ++j) bearing[j] = p[j] / l;
bearings->push_back(bearing);
}
}
as input data I have a 24 bit RGB image and a palette with 2..20 fixed colours. These colours are in no way spread regularly over the full colour range.
Now I have to modify the colours of input image so that only the colours of the given palette are used - using the colour out of the palette that is closest to the original colour (not closest mathematically but for human's visual impression). So what I need is an algorithm that uses an input colour and finds the colour in target palette that visually fits best to this colour. Please note: I'm not looking for a stupid comparison/difference algorithm but for something that really incorporates the impression a colour has on humans!
Since this is something that already should have been done and because I do not want to re-invent the wheel again: is there some example source code out there that does this job? In best case it is really a piece of code and not a link to a desastrous huge library ;-)
(I'd guess OpenCV does not provide such a function?)
Thanks
You should look at the Lab color space. It was designed so that the distance in the colour space equals the perceptual distance. So once you have converted your image you can compute the distances as you would have done earlier, but should get a better result from a perceptual point of view. In OpenCV you can use the cvtColor(source, destination, CV_BGR2Lab) function.
Another Idea would be to use dithering. The idea is to mix missing colours using neighbouring pixels. A popular algorithm for this is Floyd-Steinberg dithering.
Here is an example of mine, where I combined a optimized palette using k-means with the Lab colourspace and floyd steinberg dithering:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
cv::Mat floydSteinberg(cv::Mat img, cv::Mat palette);
cv::Vec3b findClosestPaletteColor(cv::Vec3b color, cv::Mat palette);
int main(int argc, char** argv)
{
// Number of clusters (colors on result image)
int nrColors = 18;
cv::Mat imgBGR = imread(argv[1],1);
cv::Mat img;
cvtColor(imgBGR, img, CV_BGR2Lab);
cv::Mat colVec = img.reshape(1, img.rows*img.cols); // change to a Nx3 column vector
cv::Mat colVecD;
colVec.convertTo(colVecD, CV_32FC3, 1.0); // convert to floating point
cv::Mat labels, centers;
cv::kmeans(colVecD, nrColors, labels,
cv::TermCriteria(CV_TERMCRIT_ITER, 100, 0.1),
3, cv::KMEANS_PP_CENTERS, centers); // compute k mean centers
// replace pixels by there corresponding image centers
cv::Mat imgPosterized = img.clone();
for(int i = 0; i < img.rows; i++ )
for(int j = 0; j < img.cols; j++ )
for(int k = 0; k < 3; k++)
imgPosterized.at<Vec3b>(i,j)[k] = centers.at<float>(labels.at<int>(j+img.cols*i),k);
// convert palette back to uchar
cv::Mat palette;
centers.convertTo(palette,CV_8UC3,1.0);
// call floyd steinberg dithering algorithm
cv::Mat fs = floydSteinberg(img, palette);
cv::Mat imgPosterizedBGR, fsBGR;
cvtColor(imgPosterized, imgPosterizedBGR, CV_Lab2BGR);
cvtColor(fs, fsBGR, CV_Lab2BGR);
imshow("input",imgBGR); // original image
imshow("result",imgPosterizedBGR); // posterized image
imshow("fs",fsBGR); // floyd steinberg dithering
waitKey();
return 0;
}
cv::Mat floydSteinberg(cv::Mat imgOrig, cv::Mat palette)
{
cv::Mat img = imgOrig.clone();
cv::Mat resImg = img.clone();
for(int i = 0; i < img.rows; i++ )
for(int j = 0; j < img.cols; j++ )
{
cv::Vec3b newpixel = findClosestPaletteColor(img.at<Vec3b>(i,j), palette);
resImg.at<Vec3b>(i,j) = newpixel;
for(int k=0;k<3;k++)
{
int quant_error = (int)img.at<Vec3b>(i,j)[k] - newpixel[k];
if(i+1<img.rows)
img.at<Vec3b>(i+1,j)[k] = min(255,max(0,(int)img.at<Vec3b>(i+1,j)[k] + (7 * quant_error) / 16));
if(i-1 > 0 && j+1 < img.cols)
img.at<Vec3b>(i-1,j+1)[k] = min(255,max(0,(int)img.at<Vec3b>(i-1,j+1)[k] + (3 * quant_error) / 16));
if(j+1 < img.cols)
img.at<Vec3b>(i,j+1)[k] = min(255,max(0,(int)img.at<Vec3b>(i,j+1)[k] + (5 * quant_error) / 16));
if(i+1 < img.rows && j+1 < img.cols)
img.at<Vec3b>(i+1,j+1)[k] = min(255,max(0,(int)img.at<Vec3b>(i+1,j+1)[k] + (1 * quant_error) / 16));
}
}
return resImg;
}
float vec3bDist(cv::Vec3b a, cv::Vec3b b)
{
return sqrt( pow((float)a[0]-b[0],2) + pow((float)a[1]-b[1],2) + pow((float)a[2]-b[2],2) );
}
cv::Vec3b findClosestPaletteColor(cv::Vec3b color, cv::Mat palette)
{
int i=0;
int minI = 0;
cv::Vec3b diff = color - palette.at<Vec3b>(0);
float minDistance = vec3bDist(color, palette.at<Vec3b>(0));
for (int i=0;i<palette.rows;i++)
{
float distance = vec3bDist(color, palette.at<Vec3b>(i));
if (distance < minDistance)
{
minDistance = distance;
minI = i;
}
}
return palette.at<Vec3b>(minI);
}
Try this algorithm (it will reduct color number, but it compute palette by itself):
#include <opencv2/opencv.hpp>
#include "opencv2/legacy/legacy.hpp"
#include <vector>
#include <list>
#include <iostream>
using namespace cv;
using namespace std;
void main(void)
{
// Number of clusters (colors on result image)
int NrGMMComponents = 32;
// Source file name
string fname="D:\\ImagesForTest\\tools.jpg";
cv::Mat SampleImg = imread(fname,1);
//cv::GaussianBlur(SampleImg,SampleImg,Size(5,5),3);
int SampleImgHeight = SampleImg.rows;
int SampleImgWidth = SampleImg.cols;
// Pick datapoints
vector<Vec3d> ListSamplePoints;
for (int y=0; y<SampleImgHeight; y++)
{
for (int x=0; x<SampleImgWidth; x++)
{
// Get pixel color at that position
Vec3b bgrPixel = SampleImg.at<Vec3b>(y, x);
uchar b = bgrPixel.val[0];
uchar g = bgrPixel.val[1];
uchar r = bgrPixel.val[2];
if(rand()%25==0) // Pick not every, bu t every 25-th
{
ListSamplePoints.push_back(Vec3d(b,g,r));
}
} // for (x)
} // for (y)
// Form training matrix
Mat labels;
int NrSamples = ListSamplePoints.size();
Mat samples( NrSamples, 3, CV_32FC1 );
for (int s=0; s<NrSamples; s++)
{
Vec3d v = ListSamplePoints.at(s);
samples.at<float>(s,0) = (float) v[0];
samples.at<float>(s,1) = (float) v[1];
samples.at<float>(s,2) = (float) v[2];
}
cout << "Learning to represent the sample distributions with" << NrGMMComponents << "gaussians." << endl;
// Algorithm parameters
CvEMParams params;
params.covs = NULL;
params.means = NULL;
params.weights = NULL;
params.probs = NULL;
params.nclusters = NrGMMComponents;
params.cov_mat_type = CvEM::COV_MAT_GENERIC; // DIAGONAL, GENERIC, SPHERICAL
params.start_step = CvEM::START_AUTO_STEP;
params.term_crit.max_iter = 1500;
params.term_crit.epsilon = 0.001;
params.term_crit.type = CV_TERMCRIT_ITER|CV_TERMCRIT_EPS;
//params.term_crit.type = CV_TERMCRIT_ITER;
// Train
cout << "Started GMM training" << endl;
CvEM em_model;
em_model.train( samples, Mat(), params, &labels );
cout << "Finished GMM training" << endl;
// Result image
Mat img = Mat::zeros( Size( SampleImgWidth, SampleImgHeight ), CV_8UC3 );
// Ask classifier for each pixel
Mat sample( 1, 3, CV_32FC1 );
Mat means;
means=em_model.getMeans();
for(int i = 0; i < img.rows; i++ )
{
for(int j = 0; j < img.cols; j++ )
{
Vec3b v=SampleImg.at<Vec3b>(i,j);
sample.at<float>(0,0) = (float) v[0];
sample.at<float>(0,1) = (float) v[1];
sample.at<float>(0,2) = (float) v[2];
int response = cvRound(em_model.predict( sample ));
img.at<Vec3b>(i,j)[0]=means.at<double>(response,0);
img.at<Vec3b>(i,j)[1]=means.at<double>(response,1);
img.at<Vec3b>(i,j)[2]=means.at<double>(response,2);
}
}
img.convertTo(img,CV_8UC3);
imshow("result",img);
waitKey();
// Save the result
cv::imwrite("result.png", img);
}
PS: For perceptive color distance measurement it's better to use L*a*b color space. There is converter in opencv for this purpose. For clustering you can use k-means with defined cluster centers (your palette entries). After clustering you'll get points with indexes of palette intries.
I'm trying to generate a bird's eye view from an image. For the camera intrinsics and disortions, I'm using hard coded values that I retrieved from a driving simulator that has a camera mounted on it's roof.
The basis for the code is from "Learning OpenCV Computer Vision with the OpenCV Library", Pg 409.
When I run the code on an image containing a chess board with 3 inner corners per row and 4 inner corners per column, my bird's eye view is upside down. I need the image to correctly turn into a bird's eye and that is right side up because I need the homography matrix for another function call.
Here are the input and output images, and the code i'm using:
Input image:
Corners detected:
Output Image/bird's eye (upside down!):
The code:
#include <highgui.h>
#include <cv.h>
#include <cxcore.h>
#include <math.h>
#include <vector>
#include <stdio.h>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char* argv[]) {
if(argc != 4) return -1;
// INPUT PARAMETERS:
//
int board_w = atoi(argv[1]); //inner corners per row
int board_h = atoi(argv[2]); //inner corners per column
int board_n = board_w * board_h;
CvSize board_sz = cvSize( board_w, board_h );
//Hard coded intrinsics for the camera
Mat intrinsicMat = (Mat_<double>(3, 3) <<
418.7490, 0., 236.8528,
0.,558.6650,322.7346,
0., 0., 1.);
//Hard coded distortions for the camera
CvMat* distortion = cvCreateMat(1, 4, CV_32F);
cvmSet(distortion, 0, 0, -0.0019);
cvmSet(distortion, 0, 1, 0.0161);
cvmSet(distortion, 0, 2, 0.0011);
cvmSet(distortion, 0, 3, -0.0016);
IplImage* image = 0;
IplImage* gray_image = 0;
if( (image = cvLoadImage(argv[3])) == 0 ) {
printf("Error: Couldn’t load %s\n",argv[3]);
return -1;
}
gray_image = cvCreateImage( cvGetSize(image), 8, 1 );
cvCvtColor(image, gray_image, CV_BGR2GRAY );
// UNDISTORT OUR IMAGE
//
IplImage* mapx = cvCreateImage( cvGetSize(image), IPL_DEPTH_32F, 1 );
IplImage* mapy = cvCreateImage( cvGetSize(image), IPL_DEPTH_32F, 1 );
CvMat intrinsic (intrinsicMat);
//This initializes rectification matrices
//
cvInitUndistortMap(
&intrinsic,
distortion,
mapx,
mapy
);
IplImage *t = cvCloneImage(image);
// Rectify our image
//
cvRemap( t, image, mapx, mapy );
// GET THE CHESSBOARD ON THE PLANE
//
cvNamedWindow("Chessboard");
CvPoint2D32f* corners = new CvPoint2D32f[ board_n ];
int corner_count = 0;
int found = cvFindChessboardCorners(
image,
board_sz,
corners,
&corner_count,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
);
if(!found){
printf("Couldn’t aquire chessboard on %s, "
"only found %d of %d corners\n",
argv[3],corner_count,board_n
);
return -1;
}
//Get Subpixel accuracy on those corners:
cvFindCornerSubPix(
gray_image,
corners,
corner_count,
cvSize(11,11),
cvSize(-1,-1),
cvTermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1 )
);
//GET THE IMAGE AND OBJECT POINTS:
// We will choose chessboard object points as (r,c):
// (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1).
//
CvPoint2D32f objPts[4], imgPts[4];
imgPts[0] = corners[0];
imgPts[1] = corners[board_w-1];
imgPts[2] = corners[(board_h-1)*board_w];
imgPts[3] = corners[(board_h-1)*board_w + board_w-1];
objPts[0].x = 0; objPts[0].y = 0;
objPts[1].x = board_w -1; objPts[1].y = 0;
objPts[2].x = 0; objPts[2].y = board_h -1;
objPts[3].x = board_w -1; objPts[3].y = board_h -1;
// DRAW THE POINTS in order: B,G,R,YELLOW
//
cvCircle( image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0,0,255), 3); //blue
cvCircle( image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0,255,0), 3); //green
cvCircle( image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255,0,0), 3); //red
cvCircle( image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255,255,0), 3); //yellow
// DRAW THE FOUND CHESSBOARD
//
cvDrawChessboardCorners(
image,
board_sz,
corners,
corner_count,
found
);
cvShowImage( "Chessboard", image );
// FIND THE HOMOGRAPHY
//
CvMat *H = cvCreateMat( 3, 3, CV_32F);
cvGetPerspectiveTransform( objPts, imgPts, H);
Mat homography = H;
cvSave("Homography.xml",H); //We can reuse H for the same camera mounting
/**********************GENERATING 3X4 MATRIX***************************/
// LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
//
float Z = 23;
int key = 0;
IplImage *birds_image = cvCloneImage(image);
cvNamedWindow("Birds_Eye");
// LOOP TO ALLOW USER TO PLAY WITH HEIGHT:
//
// escape key stops
//
while(key != 27) {
// Set the height
//
CV_MAT_ELEM(*H,float,2,2) = Z;
// COMPUTE THE FRONTAL PARALLEL OR BIRD’S-EYE VIEW:
// USING HOMOGRAPHY TO REMAP THE VIEW
//
cvWarpPerspective(
image,
birds_image,
H,
CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS
);
cvShowImage( "Birds_Eye", birds_image );
imwrite("/home/lee/bird.jpg", birds_image);
key = cvWaitKey();
if(key == 'u') Z += 0.5;
if(key == 'd') Z -= 0.5;
}
return 0;
}
The homography result seems correct. Since you're mapping the camera's z-axe as the world's y-axe, the image resulting of the bird's eye view (BEV) remap is upside down.
If you really need the BEV image as the camera shot you can have use H as H = Ty * Rx * H, where R is a 180 degree rotation around x-axe, T is a translation in y-axe and H is your original homography. The translation is required since your rotation remapped your old BEV on the negative side of y-axe.
My problem is that the colors in my disparity map are backwards. As in the farther away things are lighter than the things closer to the camera.
I have tried many things (i.e. convertTo, convertScaleAbs, and various combinations of values in them, etc.) and cannot seem to get the colors in the disparity map to reverse (i.e. be normal - where things closer are lighter than things farther away).
I need some help in doing that.
Also, out of curiosity, how can i change the color space of the disparity map to be like the colorful ones in MATLAB that I see online?
Here's my code and also on pastebin. http://pastebin.com/E3vVN6UU
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
void show(const char* windowname, Mat image)
{
namedWindow(windowname, CV_WINDOW_AUTOSIZE);
imshow(windowname, image);
}
int main()
{
Mat image1, image2;
Mat camMat1 = (Mat_<double>(3,3) << 793.1338, 0, 337.2309, 0, 792.0555, 256.9991, 0, 0, 1);
Mat camMat2 = (Mat_<double>(3,3) << 799.1271, 0, 319.8581, 0, 797.2460, 243.4638, 0, 0, 1);
Mat dispCoeffs1 = (Mat_<double>(1,5) << 0.0033, -0.1320, -0.0019, 0.0026, 0);
Mat dispCoeffs2 = (Mat_<double>(1,5) << -0.0109, -0.0188, -0.0014, -0.0055, 0);
Mat RotMat = (Mat_<double>(3,3) << 0.9998, -0.0023, 0.0221, 0.0022, 1, 0.0031, -0.0221, -0.0031, 0.9998);
Mat TransMat = (Mat_<double>(3,1) << 374.2306, -1.8319, 5.5745);
//Rectify
Mat R1, R2, P1, P2, Q;
stereoRectify(camMat1, dispCoeffs1, camMat2, dispCoeffs2, Size(640,480), RotMat, TransMat, R1, R2, P1, P2, Q, CV_CALIB_ZERO_DISPARITY, 1, Size(640,480));
//Define the mapping to the done
Mat rx1, ry1;
Mat rx2, ry2;
initUndistortRectifyMap(camMat1, dispCoeffs1, R1, P1, Size(640,480), CV_16SC2, rx1, ry1);
initUndistortRectifyMap(camMat2, dispCoeffs2, R2, P2, Size(640,480), CV_16SC2, rx2, ry2);
//SET THE BM STATE VARIABLES BEGIN - DONE GLOBALLY
StereoBM bm;
bm.state->preFilterSize = 31;
bm.state->preFilterCap = 63;
bm.state->SADWindowSize = 9;
bm.state->minDisparity = -128;
//bm.state->disp12MaxDiff = 2;
bm.state->numberOfDisparities = 128;
bm.state->textureThreshold = 50;
bm.state->uniquenessRatio = 15;
bm.state->speckleWindowSize = 100;
bm.state->speckleRange = 16;
//SET THE BM STATE VARIABLES END
VideoCapture cap3 = VideoCapture(0);
VideoCapture cap4 = VideoCapture(1);
//cap3.set(CV_CAP_PROP_FRAME_WIDTH, 320);
//cap3.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
//cap4.set(CV_CAP_PROP_FRAME_WIDTH, 320);
//cap4.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
cap3 >> image1;
cap4 >> image2;
Size imageSize = image1.size();
Mat gray_image1;
Mat gray_image2;
Mat frame1r;
//frame1r.create(image1.size(), CV_8U);
Mat frame2r;
//frame2r.create(image2.size(), CV_8U);
Mat frame1rf;
Mat frame2rf;
//Mat disp(image1.size(), CV_16S);
//Mat vdisp(image1.size(), CV_8U);
Mat disp, vdisp;
//Mat image3d(image1.size(), CV_32FC3);
Mat image3d;
Mat rectified_pair;
rectified_pair.create(imageSize.height, (imageSize.width)*2, CV_8UC3);
//Actually do the mapping -- based on the mapping definition
while(1)
{
bm.state->preFilterSize = 31;
bm.state->preFilterCap = 63;
bm.state->SADWindowSize = 21;
bm.state->minDisparity = -128;
//bm.state->disp12MaxDiff = 2;
bm.state->numberOfDisparities = 64;
bm.state->textureThreshold = 20;
bm.state->uniquenessRatio = 10;
bm.state->speckleWindowSize = 100;
bm.state->speckleRange = 32;
cvtColor(image1, gray_image1, CV_BGR2GRAY);
cvtColor(image2, gray_image2, CV_BGR2GRAY);
remap(gray_image1, frame1r, rx1, ry1, CV_INTER_LINEAR);
remap(gray_image2, frame2r, rx2, ry2, CV_INTER_LINEAR);
bm(frame1r, frame2r, disp);
normalize(disp, vdisp, 0, 255, NORM_MINMAX, CV_8U);
//convertScaleAbs(vdisp, vdisp, 1, 0);
disp.convertTo(vdisp, CV_8U, 255/(64*16.));
show("disparity", vdisp);
//reprojectImageTo3D(disp, image3d, Q, true);
//show("depth map", image3d);
//display image side by side for rectified window
//copy frame1r to the left side
cvtColor(frame1r, frame1rf, CV_GRAY2BGR);
frame1rf.copyTo(rectified_pair(Rect(0,0,imageSize.width, imageSize.height)));
//copy frame2r to the right side
cvtColor(frame2r, frame2rf, CV_GRAY2BGR);
frame2rf.copyTo(rectified_pair(Rect(imageSize.width,0,imageSize.width, imageSize.height)));
for(int i=0; i<imageSize.height; i+=32)
line(rectified_pair, Point(0,i), Point((imageSize.width)*2, i), CV_RGB(0,255,0));
show("rectified", rectified_pair);
cap3 >> image1;
cap4 >> image2;
if(waitKey(15) == 27)
break;
}
return 0;
}
I'm not using stereo pairs but get the same result using Kinect - far = light, near = dark
To change this I have used the below :
double min, max;
minMaxLoc(depthImage, &min, &max);
depthImage.convertTo(rImage, CV_8U, -255.0/max, 255);
I was facing the same problem then I tried swapping right and left images,and it worked!
Now I am getting correct image.