Opencv : project points manually - opencv

I'm trying to reproduce the behavior of the method projectPoints() from OpenCV.
In the two images below, red/green/blue axis are obtained with OpenCV's method, whereas magenta/yellow/cyan axis are obtained with my own method :
image1
image2
With my method, axis seem to have a good orientation but translations are incorrect.
Here is my code :
void drawVector(float x, float y, float z, float r, float g, float b, cv::Mat &pose, cv::Mat &cameraMatrix, cv::Mat &dst) {
//Origin = (0, 0, 0, 1)
cv::Mat origin(4, 1, CV_64FC1, double(0));
origin.at<double>(3, 0) = 1;
//End = (x, y, z, 1)
cv::Mat end(4, 1, CV_64FC1, double(1));
end.at<double>(0, 0) = x; end.at<double>(1, 0) = y; end.at<double>(2, 0) = z;
//multiplies transformation matrix by camera matrix
cv::Mat mat = cameraMatrix * pose.colRange(0, 4).rowRange(0, 3);
//projects points
origin = mat * origin;
end = mat * end;
//draws corresponding line
cv::line(dst, cv::Point(origin.at<double>(0, 0), origin.at<double>(1, 0)),
cv::Point(end.at<double>(0, 0), end.at<double>(1, 0)),
CV_RGB(255 * r, 255 * g, 255 * b));
}
void drawVector_withProjectPointsMethod(float x, float y, float z, float r, float g, float b, cv::Mat &pose, cv::Mat &cameraMatrix, cv::Mat &dst) {
std::vector<cv::Point3f> points;
std::vector<cv::Point2f> projectedPoints;
//fills input array with 2 points
points.push_back(cv::Point3f(0, 0, 0));
points.push_back(cv::Point3f(x, y, z));
//Gets rotation vector thanks to cv::Rodrigues() method.
cv::Mat rvec;
cv::Rodrigues(pose.colRange(0, 3).rowRange(0, 3), rvec);
//projects points using cv::projectPoints method
cv::projectPoints(points, rvec, pose.colRange(3, 4).rowRange(0, 3), cameraMatrix, std::vector<double>(), projectedPoints);
//draws corresponding line
cv::line(dst, projectedPoints[0], projectedPoints[1],
CV_RGB(255 * r, 255 * g, 255 * b));
}
void drawAxis(cv::Mat &pose, cv::Mat &cameraMatrix, cv::Mat &dst) {
drawVector(0.1, 0, 0, 1, 1, 0, pose, cameraMatrix, dst);
drawVector(0, 0.1, 0, 0, 1, 1, pose, cameraMatrix, dst);
drawVector(0, 0, 0.1, 1, 0, 1, pose, cameraMatrix, dst);
drawVector_withProjectPointsMethod(0.1, 0, 0, 1, 0, 0, pose, cameraMatrix, dst);
drawVector_withProjectPointsMethod(0, 0.1, 0, 0, 1, 0, pose, cameraMatrix, dst);
drawVector_withProjectPointsMethod(0, 0, 0.1, 0, 0, 1, pose, cameraMatrix, dst);
}
What am I doing wrong ?

I just forgot to divide the resulting points by their last component after projection :
Given the matrix of the camera wich serve to take an image, and for any point (x, y, z, 1) in 3d space, its projection on that image is computed like following :
//point3D has 4 component (x, y, z, w), point2D has 3 (x, y, z).
point2D = cameraMatrix * point3D;
//then we have to divide the 2 first component of point2D by the third.
point2D /= point2D.z;

Related

Convert camera intrinsic and extrinsic matrix to modelview and projection matrix on openGL

I'm trying to convert camera extrinsic matrix and intrinsic matrix to modelview and projection matrix on openGL for AR.
The camera intrinsic matrix is estimated by camera calibration, and I already calculated extrinsic matrix (I knew the correspondences between world coordinate(CAD model) and camera coordinate).
Only for checking whether I calculated camera extrinsic matrix properly, I augmented CAD model on the image using :
P = KE
(u, v, w) = P(X, Y, Z)
where P : projection matrix, K : intrinsic matrix, E : extrinsic matrix,
(u/w, v/w) : image coordinate, (X, Y, Z) : world coordinate of CAD model.
The result is shown as follows, and result is good. I confirmed the intrinsic matrix and extrinsic matrix is correct.
However, I failed to change those to openGL.(I want to draw CAD model using OpenGL on the image). Using below code, the object is out of window (By changing the projection matrix of the openGL, I checked the code could draw object. But it draws object at wrong position).
extern Matrix3f IntrinsicMatrix;
double fx = IntrinsicMatrix(0, 0);
double fy = IntrinsicMatrix(1, 1);
double cx = IntrinsicMatrix(0, 2);
double cy = IntrinsicMatrix(1, 2);
double alpha = IntrinsicMatrix(0, 1);
extern Mat InputImage;
double w = InputImage.cols;
double h = InputImage.rows;
double Znear = 0.1;
double Zfar = 500000;
extern MatrixXf ExtrinsicMatrix;
Matrix4f Extrinsic4f, Temp;
for (unsigned int i = 0; i < 3; i++)
for (unsigned int j = 0; j < 4; j++)
Extrinsic4f(i, j) = ExtrinsicMatrix(i, j);
Extrinsic4f(3, 0) = 0.0f;
Extrinsic4f(3, 1) = 0.0f;
Extrinsic4f(3, 2) = 0.0f;
Extrinsic4f(3, 3) = 1.0f;
for (unsigned int i = 0; i < 4; i++)
for (unsigned int j = 0; j < 4; j++)
Temp(i, j) = 0.0f;
Temp(0, 0) = 1.0f;
Temp(1, 1) = -1.0f;
Temp(2, 2) = -1.0f;
Temp(3, 3) = 1.0f;
Extrinsic4f = Temp*Extrinsic4f;
Matrix4f glViewMatrix = Extrinsic4f.transpose();
GLfloat model[16] = {
glViewMatrix(0, 0), glViewMatrix(0, 1), glViewMatrix(0, 2), glViewMatrix(0, 3),
glViewMatrix(1, 0), glViewMatrix(1, 1), glViewMatrix(1, 2), glViewMatrix(1, 3),
glViewMatrix(2, 0), glViewMatrix(2, 1), glViewMatrix(2, 2), glViewMatrix(2, 3),
glViewMatrix(3, 0), glViewMatrix(3, 1), glViewMatrix(3, 2), glViewMatrix(3, 3),
};
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(model);
glMatrixMode(GL_PROJECTION);
GLfloat perspMatrix[16]={
2*fx/w, 0.0 , (w-2*cx)/w, 0,
0, -2*fy/h, (-h+2*cy)/h, 0,
0, 0, (-Zfar-Znear)/(Zfar-Znear), -2*Zfar*Znear/(Zfar-Znear),
0, 0, -1, 0};
glLoadMatrixf(perspMatrix);
glColor3f(1.f, 1.f, 1.f);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 0.0);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, 0.0);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 0.0);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 0.0);
glEnd();
glDisable(GL_TEXTURE_2D);
double fov_y = 2 * atan(height / 2 / fy) * 180 / CV_PI;
gluPerspective(fov_y, (double)width / height, Znear, Zfar); // 39
glViewport(0, 0, width, height);
draw3Dobject();
glutSwapBuffers();
Is there anything that I change?

Plot histogram of Sobel operator magnitude and angle in OpenCV

I want to plot histogram in OpenCV C++. The task is that x-axis should be angle and y-axis should be magnitude of histogram. I calculate magnitude and angle by using Sobel operator. Now how can I plot histogram by using magnitude and angle?
Thanks in advance. The simple code of problem is
// Read image
Mat img = imread("abs.jpg");
img.convertTo(img, CV_32F, 1 / 255.0);
/*GaussianBlur(img, img, Size(3, 3), 0, 0, BORDER_CONSTANT);*/
// Calculate gradients gx, gy
Mat gx, gy;
Sobel(img, gx, CV_32F, 1, 0, 1);
Sobel(img, gy, CV_32F, 0, 1, 1);
// C++ Calculate gradient magnitude and direction (in degrees)
Mat mag, angle;
cartToPolar(gx, gy, mag, angle, 1);
imshow("magnitude of image is", mag);
imshow("angle of image is", angle);
Ok, So the first part of it is to calculate the histogram of each of them. Since both are separated already (in their own Mat) we do not have to split them or anything, and we can use them directly in the calcHist function of OpenCV.
By the documentation we have:
void calcHist(const Mat* images, int nimages, const int* channels, InputArray mask, OutputArray hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
So you would have to do:
cv::Mat histMag, histAng;
// number of bins of the histogram, adjust to your liking
int histSize = 10;
// degrees goes from 0-360 if radians then change acordingly
float rangeAng[] = { 0, 360} ;
const float* histRangeAng = { rangeAng };
double minval, maxval;
// get the range for the magnitude
cv::minMaxLoc(mag, &minval, &maxval);
float rangeMag[] = { static_cast<float>(minval), static_cast<float>(maxval)} ;
const float* histRangeMag = { rangeMag };
cv::calcHist(&mag, 1, 0, cv::NoArray(), histMag, 1, &histSize, &histRangeMag, true, false);
cv::calcHist(&angle, 1, 0, cv::NoArray(), histAng, 1, &histSize, &histRangeAng, true, false);
Now you have to plot the two histograms found in histMag and histAng.
In the turtorial I posted in the comments you have lines in the plot, for the angle it would be something like this:
// Draw the histograms for B, G and R
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound( (double) hist_w/histSize );
cv::Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
/// Normalize the result to [ 0, histImage.rows ]
cv::normalize(histAng, histAng, 0, histImage.rows, cv::NORM_MINMAX, -1, Mat() );
// Draw the lines
for( int i = 1; i < histSize; i++ )
{
cv::line( histImage, cv::Point( bin_w*(i-1), hist_h - cvRound(histAng.at<float>(i-1)) ) ,
cv::Point( bin_w*(i), hist_h - cvRound(histAng.at<float>(i)) ),
cv::Scalar( 255, 0, 0), 2, 8, 0 );
}
With this you can do the same for the magnitude, or maybe turn it into a function which draws histograms if they are supplied.
In the documentation they have another option, to draw rectangles as the bins, adapting it to our case, we get something like:
// Draw the histograms for B, G and R
int hist_w = 512; int hist_h = 400;
int bin_w = std::round( static_cast<double>(hist_w)/static_cast<double>(histSize) );
cv::Mat histImage( hist_h, hist_w, CV_8UC3, Scalar( 0,0,0) );
/// Normalize the result to [ 0, histImage.rows ]
cv::normalize(histAng, histAng, 0, histImage.rows, cv::NORM_MINMAX, -1, Mat() );
for( int i = 1; i < histSize; i++ )
{
cv::rectangle(histImage, cv::Point(bin_w*(i-1), hist_h - static_cast<int>(std::round(histAng.at<float>(i-1)))), cv::Point(bin_w*(i), hist_h),);
}
Again, this can be done for the magnitude as well in the same way. This are super simple plots, if you need more complex or beautiful plots, you may need to call an external library and pass the data inside the calculated histograms. Also, this code has not been tested, so it may have a typo or error, but if something fails, just write a comment and we can find a solution.
I hope this helps you, and sorry for the late answer.

CvSVM predict not returning correct value

I was trying the example detailed within this article.
Training and further the loop to identify hyper-plane works well.
i.e.
// Data for visual representation
int width = 512, height = 512;
Mat image = Mat::zeros(height, width, CV_8UC3);
// Set up training data
float labels[4] = {1.0, -1.0, -1.0, -1.0};
Mat labelsMat(4, 1, CV_32FC1, labels);
float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} };
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
// Set up SVM's parameters
CvSVMParams svmparam;
svmparam.svm_type = CvSVM::C_SVC;
svmparam.kernel_type = CvSVM::LINEAR;
svmparam.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
// Train the SVM
CvSVM svm;
svm.train(trainingDataMat, labelsMat, Mat(), Mat(), svmparam);
svm.save("Training.xml");
// Train the SVM
svm->train(trainingDataMat, ml::ROW_SAMPLE, labelsMat);
Vec3b blue(255, 0 ,0);
Vec3b green(0, 255, 0);
for(int x = 0; x < image.rows; ++x)
{
for(int y = 0; y < image.cols; ++y)
{
Mat sampleMat = (Mat_<float>(1,2) << y,x);
float response = svm.predict(sampleMat);
if(response == 1)
image.at<Vec3b>(x,y) = green;
else if(response == -1)
image.at<Vec3b>(x,y) = blue;
}
}
But when I am trying to get support vector using api (svm.get_support_vector(i);), it returns a very small number (as 0.000876529e-28). Hence after type-casting to "int" the coordinates X,Y are becoming 0,0 respectively. So, even after getting the hyperplane, I am unable to get respective support - vectors.
i.e.
for (int i = 0; i < c; ++i)
{
const float* v = svm.get_support_vector(i);
cv::Point resCenter((int) v[0], (int) v[1]);
std::cout << v[0] << ":" << v[1] << "= " << resCenter << std::endl;
circle( image, resCenter, 6, Scalar(128, 128, 128), thickness, lineType);
}
I tried Normalizing the coordinate position as
X' = x - MinR / (MaxR - MinR) // Here MinR and MaxR are size of cols (0, 512)
Y' = y - MinR / (MaxR - MinR) // Here MinR and MaxR are size of rows (0, 512)
As I am new to Machine learning, I would be thankful, if you would suggest me something to read on the following questions:
What does Train internally do with the feature-vector we are passing. (I understand it creates a category with respect to Labels provided, but how is it happening.)
Internal functioning of predict.
Any pointers to understanding these would help me. Thanks for your precious time in advance.

Image Rectification Using camera intrinsic and extrinsic parameters

I want to rectify stereo images using intrinsic and extrinsic camera parameters that obtained from photomodeler software.
I wrote the code (modifying this link https://gist.github.com/anonymous/6586653) and I determined the relative rotation and translation parameters , but when I input the images the obtained results is not as expected although I tried to find the error but I couldn't.
your help is really appreciated:
the input images are:
I couldn't load all the images therefore I have put this link regarding the images and the results:
https://www.dropbox.com/s/5tmj9rk91tkrot4/RECTIFICATION_TEST_DATA.docx?dl=0
the code is
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <iomanip>
#include<opencv2/opencv.hpp>
#include <iostream>
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <fstream>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
// Mat img1 = imread("E:\\12_0628.tif", 1);
// Mat img2 = imread("E:\\12_0629.tif", 1);
Mat img1 = imread("E:\\DSC_0483.JPG");
Mat img2 = imread("E:\\DSC_0484.JPG");
//EXTERIOR OREINTATION FOR THE 1ST IMAGE
double omega1 = -172.672440, phi1 = -80.168311, kappa1 = 163.005082, tx1 = -35.100000, ty1 = -56.700000, tz1 = -59.300000;
//EXTERIOR OREINTATION FOR THE 2ND IMAGE
double omega2 = 27.576999, phi2 = -67.089920, kappa2 = 2.826051, tx2 = -37.600000, ty2 = -18.600000, tz2 = -41.700000;
//Rotation matrices of the 1st image
omega1 = omega1 * CV_PI / 180.;
phi1 = phi1 * CV_PI / 180.;
kappa1 = kappa1 * CV_PI / 180.;
omega2 = omega2 * CV_PI / 180.;
phi2 = phi2 * CV_PI / 180.;
kappa2 = kappa2 * CV_PI / 180.;
Mat RX1 = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cos(omega1), sin(omega1),
0, -sin(omega1), cos(omega1));
Mat RY1 = (Mat_<double>(3, 3) <<
cos(phi1), 0, -sin(phi1),
0, 1, 0,
sin(phi1), 0, cos(phi1));
Mat RZ1 = (Mat_<double>(3, 3) <<
cos(kappa1), sin(kappa1), 0,
-sin(kappa1), cos(kappa1), 0,
0, 0, 1);
// Composed rotation matrix with (RX, RY, RZ)
Mat R1 = RX1 * RY1 * RZ1;
Mat T1 = (Mat_<double>(3, 1) <<
tx1,
ty1,
tz1);
/////////////////////Rotation matrices of the 2nd image//////////////////////////////////////////
//Rotation matrices of the 1st image
Mat RX2 = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cos(omega2), sin(omega2),
0, -sin(omega2), cos(omega2));
Mat RY2 = (Mat_<double>(3, 3) <<
cos(phi2), 0, -sin(phi2),
0, 1, 0,
sin(phi2), 0, cos(phi2));
Mat RZ2 = (Mat_<double>(3, 3) <<
cos(kappa2), sin(kappa2), 0,
-sin(kappa2), cos(kappa2), 0,
0, 0, 1);
// Composed rotation matrix with (RX, RY, RZ)
Mat R2 = RX2 * RY2 * RZ2;
Mat T2 = (Mat_<double>(3, 1) <<
tx2,
ty2,
tz2);
/////////////////////////////////////////////////////////////
double f = 2284.;// the focal length of the camera nikon D40, this is equivalant to 18mm
double w = (double)img1.cols;
double h = (double)img1.rows;
Mat M = (Mat_<double>(3, 3) <<//camera matrix
f , 0. , w/2,
0. , f , h/2,
0. , 0. , 1.);
Mat D = (Mat_<double>(5, 1) <<// distortion coefficicents
0,
0,
0,
0,
0.
);
Mat R1inv = R1.inv();
Mat Rrel = R2 * R1inv;
Mat Trel = (-1 * Rrel) * T1+ T2;
Mat T = (Mat_<double>(3, 1) <<//translation matrix
-2376.6,
-740.0,
229.0);
cout << img1.size() << endl;
cout << img2.size() << endl;
//Mat R1, R2,
Mat P1, P2, Q;
stereoRectify(M, D, M, D, img1.size(), Rrel, Trel, //the input data
R1, R2, P1, P2, Q);//the output data
Mat map1x, map1y, map2x, map2y;
Mat imgdst1, imgdst2;
// Size (flaot)imageSize;
// imageSize = img1.size();
initUndistortRectifyMap(M, D, R1, P1, img1.size(), CV_32FC1, map1x, map1y);
initUndistortRectifyMap(M, D, R2, P2, img1.size(), CV_32FC1, map2x, map2y);
remap(img1, imgdst1, map1x, map1y, INTER_LINEAR, BORDER_CONSTANT, Scalar());
remap(img2, imgdst2, map2x, map2y, INTER_LINEAR, BORDER_CONSTANT, Scalar());
namedWindow("image1");
namedWindow("image2");
imshow("image1", imgdst1);
imshow("image2", imgdst2);
// imwrite("DSC_0906_rect.jpg", imgdst1);
// imwrite("DSC_0913_rect.jpg", imgdst2);
imwrite("E:\\Researches\\2016-2017_res\\2_8_epipolar_geometry\\temp_image\\output1.bmp", imgdst1);
imwrite("E:\\Researches\\2016-2017_res\\2_8_epipolar_geometry\\temp_image\\output2.bmp", imgdst2);
waitKey();
return 0;
}

OpenCV Help on cvPerspectiveTransform

I have a problem with the follow code, i´m trying to change a perspective of an image using cvPerspectiveTransform, but I get the follow error:
OpenCV Error: Assertion failed (scn + 1 == m.cols && (depth == CV_32F || depth == CV_64F))
CvMat* p = cvCreateMat(2, 4, CV_64FC1);
CvMat* h = cvCreateMat(2, 4, CV_64FC1);
CvMat* p2h = cvCreateMat(2, 4, CV_64FC1);
cvZero(p);
cvZero(h);
cvZero(p2h);
//set src points
for (int i = 0; i < 4; i++) {
CvPoint point = verifiedPoints[i];
cvmSet( p, 0, i, point.x );
cvmSet( p, 1, i, point.y );
printf("point %d (%d , %d)\n",i,point.x,point.y);
}
//set dst points
cvmSet( h, 0, 0, 0 );
cvmSet( h, 1, 0, real_height );
cvmSet( h, 0, 1, real_width );
cvmSet( h, 1, 1, real_height );
cvmSet( h, 0, 2, real_width );
cvmSet( h, 1, 2, 0 );
cvmSet( h, 0, 3, 0 );
cvmSet( h, 1, 3, 0);
//cvPerspectiveTransform or cvFindHomography?
cvPerspectiveTransform(p,h,p2h);
cvReleaseMat(&p);
cvReleaseMat(&h);
I try to change p2h to other values, for example:
CvMat* p2h = cvCreateMat(3, 3, CV_32F)
but I get other error:
OpenCV Error: Assertion failed (dst.type() == src.type() && dst.channels() == m.rows-1) in cvPerspectiveTransform
Any help?
According to the OpenCV documentation for cvPerspectiveTransform:
src – Source two-channel or three-channel floating-point array. Each
element is a 2D/3D vector to be transformed.
dst – Destination array
of the same size and type as src.
mtx – 3x3 or 4x4 floating-point
transformation matrix.
So you need to declare your matrices this way:
CvMat* p = cvCreateMat(1, 4, CV_64FC2);
CvMat* h = cvCreateMat(1, 4, CV_64FC2);
CvMat* p2h = cvCreateMat(3, 3, CV_64FC1);

Resources