3D rotation matrix between two 3D points - opencv

I have 2 known 3d points OC1 and OC2 which are the origin of 2 axis plot in the space and I need to compute the 3D rotation matrix between them.
I know that using R1&T1 I can get to Oc1 and using R2&T2 I can get to Oc2 but I need to compute the 3D rotation matrix between Oc1 and Oc2. I just thought about this rule:
oMc1=(R1 | T1) and oMc2=(R2 | T2) and what I want is:
c1Mc2 = (oMc1)^-1 oMc2
So I tried to implement it and here is my code:
vector <Point3f> listOfPointsOnTable;
cout << "******** DATA *******" << endl;
listOfPointsOnTable.push_back(Point3f(0,0,0));
listOfPointsOnTable.push_back(Point3f(100,0,0));
listOfPointsOnTable.push_back(Point3f(100,100,0));
listOfPointsOnTable.push_back(Point3f(0,100,0));
cout << endl << "Scene points :" << endl;
for (int i = 0; i < listOfPointsOnTable.size(); i++)
{
cout << listOfPointsOnTable[i] << endl;
}
//Define the optical center of each camera
Point3f centreOfC1 = Point3f(23,0,50);
Point3f centreOfC2 = Point3f(0,42,20);
cout << endl << "Center Of C1: " << centreOfC1 << " , Center of C2 : " << centreOfC2 << endl;
//Define the translation and rotation between main axis and the camera 1 axis
Mat translationOfC1 = (Mat_<double>(3, 1) << (0-centreOfC1.x), (0-centreOfC1.y), (0-centreOfC1.z));
float rotxC1 = 0, rotyC1 = 0, rotzC1 = -45;
int focaleC1 = 2;
Mat rotationOfC1 = rotation3D(rotxC1, rotyC1,rotzC1);
cout << endl << "Translation from default axis to C1: " << translationOfC1 << endl;
cout << "Rotation from default axis to C1: " << rotationOfC1 << endl;
Mat transformationToC1 = buildTransformationMatrix(rotationOfC1, translationOfC1);
cout << "Transformation from default axis to C1: " << transformationToC1 << endl << endl;
//Define the translation and rotation between main axis and the camera 2 axis
Mat translationOfC2 = (Mat_<double>(3, 1) << (0-centreOfC2.x), (0-centreOfC2.y), (0-centreOfC2.z));
float rotxC2 = 0, rotyC2 = 0, rotzC2 = -90;
int focaleC2 = 2;
Mat rotationOfC2 = rotation3D(rotxC2, rotyC2,rotzC2);
cout << endl << "Translation from default axis to C2: " << translationOfC2 << endl;
cout << "Rotation from default axis to C2: " << rotationOfC2 << endl;
Mat transformationToC2 = buildTransformationMatrix(rotationOfC2, translationOfC2);
cout << "Transformation from default axis to C2: " << transformationToC2 << endl << endl;
Mat centreOfC2InMat = (Mat_<double>(3, 1) << centreOfC2.x, centreOfC2.y, centreOfC2.z);
Mat centreOfC2InCamera1 = rotationOfC1 * centreOfC2InMat + translationOfC1;
Mat translationBetweenC1AndC2 = -centreOfC2InCamera1;
cout << endl << "****Translation from C2 to C1" << endl;
cout << translationBetweenC1AndC2 << endl;
Mat centreOfC1InMat = (Mat_<double>(3, 1) << centreOfC1.x, centreOfC1.y, centreOfC1.z);
Mat centreOfC1InCamera2 = rotationOfC2 * centreOfC1InMat + translationOfC2;
Mat translationBetweenC2AndC1 = -centreOfC1InCamera2;
cout << "****Translation from C1 to C2" << endl;
cout << translationBetweenC2AndC1 << endl;
cout << "Tran1-1 * Trans2 = " << transformationToC1.inv() * transformationToC2 << endl;
cout << "Tran2-1 * Trans1 = " << transformationToC2.inv() * transformationToC1 << endl;
Mat rotation3D(int alpha, int beta, int gamma)
{
// Rotation matrices around the X, Y, and Z axis
double alphaInRadian = alpha * M_PI / 180.0;
double betaInRadian = beta * M_PI / 180.0;
double gammaInRadian = gamma * M_PI / 180.0;
Mat RX = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cosf(alphaInRadian), sinf(alphaInRadian),
0, -sinf(alphaInRadian), cosf(alphaInRadian));
Mat RY = (Mat_<double>(3, 3) <<
cosf(betaInRadian), 0, sinf(betaInRadian),
0, 1, 0,
-sinf(betaInRadian), 0, cosf(betaInRadian));
Mat RZ = (Mat_<double>(3, 3) <<
cosf(gammaInRadian), sinf(gammaInRadian), 0,
-sinf(gammaInRadian),cosf(gammaInRadian), 0,
0, 0, 1);
// Composed rotation matrix with (RX, RY, RZ)
Mat R = RX * RY * RZ;
return R;
}
Mat buildTransformationMatrix(Mat rotation, Mat translation)
{
Mat transformation = (Mat_<double>(4, 4) <<
rotation.at<double>(0,0), rotation.at<double>(0,1), rotation.at<double>(0,2), translation.at<double>(0,0),
rotation.at<double>(1,0), rotation.at<double>(1,1), rotation.at<double>(1,2), translation.at<double>(1,0),
rotation.at<double>(2,0), rotation.at<double>(2,1), rotation.at<double>(2,2), translation.at<double>(2,0),
0, 0, 0, 1);
return transformation;
}
here is the output:
//Origin of 3 axis
O(0,0,0), OC1 (23, 0, 50), OC2 (0, 42, 20)
Translation from default axis to OC1: [-23;
0;
-50]
Rotation from default axis to OC1: [0.7071067690849304, -0.7071067690849304, 0;
0.7071067690849304, 0.7071067690849304, 0;
0, 0, 1]
Trans1 = Transformation from default axis to OC1: [0.7071067690849304, -0.7071067690849304, 0, -23;
0.7071067690849304, 0.7071067690849304, 0, 0;
0, 0, 1, -50;
0, 0, 0, 1]
Translation from default axis to OC2: [0;
-42;
-20]
Rotation from default axis to OC2: [-4.371138828673793e-08, -1, 0;
1, -4.371138828673793e-08, 0;
0, 0, 1]
Trans2 = Transformation from default axis to OC2: [-4.371138828673793e-08, -1, 0, 0;
1, -4.371138828673793e-08, 0, -42;
0, 0, 1, -20;
0, 0, 0, 1]
(Trans1)-1 * (Trans2) = [0.7071067623795453, -0.7071068241967844, 0, -13.43502907247513;
0.7071068241967844, 0.7071067623795453, 0, -45.96194156373071;
0, 0, 1, 30;
0, 0, 0, 1]
(Trans2)-1 * (Trans1) = [0.7071067381763105, 0.7071067999935476, 0, 42.00000100536185;
-0.7071067999935475, 0.7071067381763104, -0, 22.99999816412165;
0, 0, 1, -30;
0, 0, 0, 1]
//Calculation of translation between OC1 and OC2:
****Translation from C2 to C1
[52.69848430156708;
-29.69848430156708;
30]
****Translation from C1 to C2
[1.005361930594972e-06;
19;
-30]
As you can see above, the 4th column of (Trans1)-1 * (Trans2) is not equal neither to the translation from C2->C1 nor to the translation from C2->C1
it lets me think that c1Mc2 = (oMc1)^-1 oMc2 does not get what I want but I don't really understand why? Is there any other solution to get what I want?

The rotation matrix of Oc1 is by definition the component of the local XYZ axes:
| Xc1[0] Yc1[0] Zc1[0] |
R_1 = | Xc1[1] Yc1[1] Zc1[1] |
| Xc1[2] Yc1[2] Zc1[2] |
and similarly for R_2 of Oc2.
If the relative rotation between them is R then you can define
R_2 = R_1*R
and thus:
R = transpose(R_1)*R_2
that is all you need.

Related

camera pose estimation with solvePnP() and SOLVEPNP_IPPE_SQUARE method

I'm working with ARKit and trying to get camera position from QR code with known size (0.16m).
To detect QR code I'am using Vision framework so i can get each corner point on image.
Data preparation:
let intrinsics = arFrame.camera.intrinsics
let imageResolution = arFrame.camera.imageResolution
let imagePointsArray = [NSValue(cgPoint: visionResult.topLeft), NSValue(cgPoint: visionResult.topRight), NSValue(cgPoint: visionResult.bottomLeft), NSValue(cgPoint: visionResult.bottomRight)]
let intrinsicsArray = (0..<3).flatMap { x in (0..<3).map { y in NSNumber(value: intrinsics[x][y]) } }
let squareLength = NSNumber(value: 0.16)
let res = OpenCVWrapper.findPose(imagePointsArray, intrinsics: intrinsicsArray, size: imageResolution, squareLength: squareLength)
To get camera position I'm using OpenCV solution solvePnP() with flag = SOLVEPNP_IPPE_SQUARE
OpenCV in Objective-C++ based on this answer:
+(Pose)findPose: (NSArray<NSValue *> *) imagePoints
intrinsics: (NSArray<NSNumber *> *) intrinsics
imageResolution: (CGSize) imageResolution
squareLength: (NSNumber *) squareLength {
cv::Mat distCoeffs(4,1,cv::DataType<double>::type, 0.0);
cv::Mat rvec(3,1,cv::DataType<double>::type);
cv::Mat tvec(3,1,cv::DataType<double>::type);
cv::Mat cameraMatrix = [self intrinsicMatrixWithArray:intrinsics];
vector<Point2f> cvImagePoints = [self convertImagePoints:imagePoints toSize: imageResolution];
vector<Point3f> cvObjectPoints = [self getObjectPointsWithSquareLength:squareLength];
std::cout << "object points: \n" << cvObjectPoints << std::endl;
std::cout << "image points: \n" << cvImagePoints << std::endl;
std::cout << "cameraMatrix points: \n" << cameraMatrix << std::endl;
cv::solvePnP(cvObjectPoints, cvImagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, SOLVEPNP_IPPE_SQUARE);
std::cout << "rvec: \n" << rvec << std::endl;
std::cout << "tvec: \n" << tvec << std::endl;
cv::Mat RotX(3, 3, cv::DataType<double>::type);
cv::setIdentity(RotX);
RotX.at<double>(4) = -1; //cos(180) = -1
RotX.at<double>(8) = -1;
cv::Mat R;
cv::Rodrigues(rvec, R);
R = R.t(); // rotation of inverse
Mat rvecConverted;
Rodrigues(R, rvecConverted); //
std::cout << "rvec in world coords:\n" << rvecConverted << std::endl;
rvecConverted = RotX * rvecConverted;
std::cout << "rvec scenekit :\n" << rvecConverted << std::endl;
Mat tvecConverted = -R * tvec;
std::cout << "tvec in world coords:\n" << tvecConverted << std::endl;
tvecConverted = RotX * tvecConverted;
std::cout << "tvec scenekit :\n" << tvecConverted << std::endl;
SCNVector4 rotationVector = SCNVector4Make(rvecConverted.at<double>(0), rvecConverted.at<double>(1), rvecConverted.at<double>(2), norm(rvecConverted));
SCNVector3 translationVector = SCNVector3Make(tvecConverted.at<double>(0), tvecConverted.at<double>(1), tvecConverted.at<double>(2));
return Pose{rotationVector, translationVector};
}
+ (vector<Point3f>) getObjectPointsWithSquareLength: (NSNumber*) squareLength {
vector<Point3f> points;
double squareLengthDouble = [squareLength doubleValue];
points.push_back(Point3f(-squareLengthDouble/2, squareLengthDouble/2, 0));
points.push_back(Point3f(squareLengthDouble/2, squareLengthDouble/2, 0));
points.push_back(Point3f(squareLengthDouble/2, -squareLengthDouble/2, 0));
points.push_back(Point3f(-squareLengthDouble/2, -squareLengthDouble/2, 0));
return points;
}
+ (vector<Point2f>) convertImagePoints: (NSArray<NSValue *> *) array
toSize: (CGSize) size {
vector<Point2f> points;
for (NSValue * value in array) {
CGPoint point = [value CGPointValue];
points.push_back(Point2f((point.x * size.width), (point.y * size.height)));
}
return points;
}
+ (cv::Mat) intrinsicMatrixWithArray: (NSArray<NSNumber *> *) intrinsics {
Mat result(3,3,cv::DataType<double>::type);
cv::setIdentity(result);
result.at<double>(0) = [intrinsics[0] doubleValue]; //fx
result.at<double>(4) = [intrinsics[4] doubleValue]; //fy
result.at<double>(2) = [intrinsics[6] doubleValue]; //cx
result.at<double>(5) = [intrinsics[7] doubleValue]; //cy
result.at<double>(8) = [intrinsics[8] doubleValue]; //1
return result;
}
The problem is when I point camera directly to QR code with 2 meters distance, the result of translationVector.z (tvec scenekit) should be equal to 2 meters, but instead there is a random positive or negative numbers.
Output:
Calculated distance to QR 2.0856588
object points:
[-0.079999998, 0.079999998, 0;
0.079999998, 0.079999998, 0;
0.079999998, -0.079999998, 0;
-0.079999998, -0.079999998, 0]
image points:
[795.98724, 717.27045;
684.5592, 715.80487;
793.31567, 826.06146;
684.40692, 824.39771]
cameraMatrix points:
[1454.490478515625, 0, 935.6685791015625;
0, 1454.490478515625, 717.999267578125;
0, 0, 1]
rvec:
[-0.9251278749049585;
1.185890362907954;
-0.9989977018022447]
tvec:
[0.04753833193572054;
-0.009999648596310796;
-0.3527916723601041]
rvec in world coords:
[0.9251278749049584;
-1.185890362907954;
0.9989977018022447]
rvec scenekit :
[0.9251278749049584;
1.185890362907954;
-0.9989977018022447]
tvec in world coords:
[-0.1159248829391864;
-0.3366933247327607;
0.004569098144615695]
tvec scenekit :
[-0.1159248829391864;
0.3366933247327607;
-0.004569098144615695]
Thanks for any help
The estimated translation between the camera and the tag is not correct. The tz is negative which is physically not possible. See here for the details about the camera coordinates system.
You have to be sure that each 3D object point matches with the corresponding 2D image point.
If I plot the 2D coordinates, I have the following image:
With RGBM the order of the points.
If you swap the last two image points, you should get:
rvec: [0.1217246105180353, 0.1224686744740433, -3.116495036698598]
tvec: [-0.2866576939480562, 0.07760414675470864, 2.127895748451679]

OpenCV: Wrong result in calibrateHandEye function

I am working in a robot application, in which I have a camera fixed to a robot gripper. In order to calculate the matrix transformation between the camera and the gripper Hcg I am using the calibrateHandEye new function provided in the OpenCV version 4.1.0
I had taken 10 pictures of the chessboard from the camera mounted in the gripper and at the same time I recorded the robot position.
The code I am working on:
// My_handeye.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <iostream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdio>
#include "pch.h"
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
using namespace cv;
using namespace std;
Mat eulerAnglesToRotationMatrix(Vec3f &theta);
Vec3f rotationMatrixToEulerAngles(Mat &R);
float rad2deg(float radian);
float deg2rad(float degree);
int main()
{
// Camera calibration information
std::vector<double> distortionCoefficients(5); // camera distortion
distortionCoefficients[0] = 2.4472856611074989e-01;
distortionCoefficients[1] = -8.1042032574246325e-01;
distortionCoefficients[2] = 0;
distortionCoefficients[3] = 0;
distortionCoefficients[4] = 7.8769462320821060e-01;
double f_x = 1.3624172121852105e+03; // Focal length in x axis
double f_y = 1.3624172121852105e+03; // Focal length in y axis (usually the same?)
double c_x = 960; // Camera primary point x
double c_y = 540; // Camera primary point y
cv::Mat cameraMatrix(3, 3, CV_32FC1);
cameraMatrix.at<float>(0, 0) = f_x;
cameraMatrix.at<float>(0, 1) = 0.0;
cameraMatrix.at<float>(0, 2) = c_x;
cameraMatrix.at<float>(1, 0) = 0.0;
cameraMatrix.at<float>(1, 1) = f_y;
cameraMatrix.at<float>(1, 2) = c_y;
cameraMatrix.at<float>(2, 0) = 0.0;
cameraMatrix.at<float>(2, 1) = 0.0;
cameraMatrix.at<float>(2, 2) = 1.0;
Mat rvec(3, 1, CV_32F), tvec(3, 1, CV_32F);
//
std::vector<Mat> R_gripper2base;
std::vector<Mat> t_gripper2base;
std::vector<Mat> R_target2cam;
std::vector<Mat> t_target2cam;
Mat R_cam2gripper = (Mat_<float>(3, 3));
Mat t_cam2gripper = (Mat_<float>(3, 1));
vector<String> fn;
glob("images/*.bmp", fn, false);
vector<Mat> images;
size_t num_images = fn.size(); //number of bmp files in images folder
Size patternsize(6, 8); //number of centers
vector<Point2f> centers; //this will be filled by the detected centers
float cell_size = 30;
vector<Point3f> obj_points;
R_gripper2base.reserve(num_images);
t_gripper2base.reserve(num_images);
R_target2cam.reserve(num_images);
t_target2cam.reserve(num_images);
for (int i = 0; i < patternsize.height; ++i)
for (int j = 0; j < patternsize.width; ++j)
obj_points.push_back(Point3f(float(j*cell_size),
float(i*cell_size), 0.f));
for (size_t i = 0; i < num_images; i++)
images.push_back(imread(fn[i]));
Mat frame;
for (size_t i = 0; i < num_images; i++)
{
frame = imread(fn[i]); //source image
bool patternfound = findChessboardCorners(frame, patternsize, centers);
if (patternfound)
{
drawChessboardCorners(frame, patternsize, Mat(centers), patternfound);
//imshow("window", frame);
//int key = cv::waitKey(0) & 0xff;
solvePnP(Mat(obj_points), Mat(centers), cameraMatrix, distortionCoefficients, rvec, tvec);
Mat R;
Rodrigues(rvec, R); // R is 3x3
R_target2cam.push_back(R);
t_target2cam.push_back(tvec);
Mat T = Mat::eye(4, 4, R.type()); // T is 4x4
T(Range(0, 3), Range(0, 3)) = R * 1; // copies R into T
T(Range(0, 3), Range(3, 4)) = tvec * 1; // copies tvec into T
cout << "T = " << endl << " " << T << endl << endl;
}
cout << patternfound << endl;
}
Vec3f theta_01{ deg2rad(-153.61), deg2rad(8.3), deg2rad(-91.91) };
Vec3f theta_02{ deg2rad(-166.71), deg2rad(3.04), deg2rad(-93.31) };
Vec3f theta_03{ deg2rad(-170.04), deg2rad(24.92), deg2rad(-88.29) };
Vec3f theta_04{ deg2rad(-165.71), deg2rad(24.68), deg2rad(-84.85) };
Vec3f theta_05{ deg2rad(-160.18), deg2rad(-15.94),deg2rad(-56.24) };
Vec3f theta_06{ deg2rad(175.68), deg2rad(10.95), deg2rad(180) };
Vec3f theta_07{ deg2rad(175.73), deg2rad(45.78), deg2rad(-179.92) };
Vec3f theta_08{ deg2rad(-165.34), deg2rad(47.37), deg2rad(-166.25) };
Vec3f theta_09{ deg2rad(-165.62), deg2rad(17.95), deg2rad(-166.17) };
Vec3f theta_10{ deg2rad(-151.99), deg2rad(-14.59),deg2rad(-94.19) };
Mat robot_rot_01 = eulerAnglesToRotationMatrix(theta_01);
Mat robot_rot_02 = eulerAnglesToRotationMatrix(theta_02);
Mat robot_rot_03 = eulerAnglesToRotationMatrix(theta_03);
Mat robot_rot_04 = eulerAnglesToRotationMatrix(theta_04);
Mat robot_rot_05 = eulerAnglesToRotationMatrix(theta_05);
Mat robot_rot_06 = eulerAnglesToRotationMatrix(theta_06);
Mat robot_rot_07 = eulerAnglesToRotationMatrix(theta_07);
Mat robot_rot_08 = eulerAnglesToRotationMatrix(theta_08);
Mat robot_rot_09 = eulerAnglesToRotationMatrix(theta_09);
Mat robot_rot_10 = eulerAnglesToRotationMatrix(theta_10);
const Mat robot_tr_01 = (Mat_<float>(3, 1) << 781.2, 338.59, 903.48);
const Mat robot_tr_02 = (Mat_<float>(3, 1) << 867.65, 382.52, 884.42);
const Mat robot_tr_03 = (Mat_<float>(3, 1) << 856.91, 172.99, 964.61);
const Mat robot_tr_04 = (Mat_<float>(3, 1) << 748.81, 146.75, 1043.29);
const Mat robot_tr_05 = (Mat_<float>(3, 1) << 627.66, 554.08, 920.85);
const Mat robot_tr_06 = (Mat_<float>(3, 1) << 715.06, 195.96, 889.38);
const Mat robot_tr_07 = (Mat_<float>(3, 1) << 790.9, 196.29, 1117.38);
const Mat robot_tr_08 = (Mat_<float>(3, 1) << 743.5, 283.93, 1131.92);
const Mat robot_tr_09 = (Mat_<float>(3, 1) << 748.9, 288.19, 910.58);
const Mat robot_tr_10 = (Mat_<float>(3, 1) << 813.18, 400.44, 917.16);
R_gripper2base.push_back(robot_rot_01);
R_gripper2base.push_back(robot_rot_02);
R_gripper2base.push_back(robot_rot_03);
R_gripper2base.push_back(robot_rot_04);
R_gripper2base.push_back(robot_rot_05);
R_gripper2base.push_back(robot_rot_06);
R_gripper2base.push_back(robot_rot_07);
R_gripper2base.push_back(robot_rot_08);
R_gripper2base.push_back(robot_rot_09);
R_gripper2base.push_back(robot_rot_10);
t_gripper2base.push_back(robot_tr_01);
t_gripper2base.push_back(robot_tr_02);
t_gripper2base.push_back(robot_tr_03);
t_gripper2base.push_back(robot_tr_04);
t_gripper2base.push_back(robot_tr_05);
t_gripper2base.push_back(robot_tr_06);
t_gripper2base.push_back(robot_tr_07);
t_gripper2base.push_back(robot_tr_08);
t_gripper2base.push_back(robot_tr_09);
t_gripper2base.push_back(robot_tr_10);
calibrateHandEye(R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, R_cam2gripper, t_cam2gripper, CALIB_HAND_EYE_TSAI);
Vec3f R_cam2gripper_r = rotationMatrixToEulerAngles(R_cam2gripper);
cout << "R_cam2gripper = " << endl << " " << R_cam2gripper << endl << endl;
cout << "R_cam2gripper_r = " << endl << " " << R_cam2gripper_r << endl << endl;
cout << "t_cam2gripper = " << endl << " " << t_cam2gripper << endl << endl;
}
Mat eulerAnglesToRotationMatrix(Vec3f &theta)
{
// Calculate rotation about x axis
Mat R_x = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cos(theta[0]), -sin(theta[0]),
0, sin(theta[0]), cos(theta[0])
);
// Calculate rotation about y axis
Mat R_y = (Mat_<double>(3, 3) <<
cos(theta[1]), 0, sin(theta[1]),
0, 1, 0,
-sin(theta[1]), 0, cos(theta[1])
);
// Calculate rotation about z axis
Mat R_z = (Mat_<double>(3, 3) <<
cos(theta[2]), -sin(theta[2]), 0,
sin(theta[2]), cos(theta[2]), 0,
0, 0, 1);
// Combined rotation matrix
Mat R = R_z * R_y * R_x;
return R;
}
float rad2deg(float radian) {
double pi = 3.14159;
return(radian * (180 / pi));
}
float deg2rad(float degree) {
double pi = 3.14159;
return(degree * (pi / 180));
}
// Checks if a matrix is a valid rotation matrix.
bool isRotationMatrix(Mat &R)
{
Mat Rt;
transpose(R, Rt);
Mat shouldBeIdentity = Rt * R;
Mat I = Mat::eye(3, 3, shouldBeIdentity.type());
return norm(I, shouldBeIdentity) < 1e-6;
}
// Calculates rotation matrix to euler angles
// The result is the same as MATLAB except the order
// of the euler angles ( x and z are swapped ).
Vec3f rotationMatrixToEulerAngles(Mat &R)
{
assert(isRotationMatrix(R));
float sy = sqrt(R.at<double>(0, 0) * R.at<double>(0, 0) + R.at<double>(1, 0) * R.at<double>(1, 0));
bool singular = sy < 1e-6; // If
float x, y, z;
if (!singular)
{
x = atan2(R.at<double>(2, 1), R.at<double>(2, 2));
y = atan2(-R.at<double>(2, 0), sy);
z = atan2(R.at<double>(1, 0), R.at<double>(0, 0));
}
else
{
x = atan2(-R.at<double>(1, 2), R.at<double>(1, 1));
y = atan2(-R.at<double>(2, 0), sy);
z = 0;
}
return Vec3f(x, y, z);
}
The result the function is giving me is the next one:
R_cam2gripper =
[0.3099803593003124, -0.8923086952824562, -0.3281727733547833;
0.7129271761196039, 0.4465219155360299, -0.5406967916458927;
0.6290047840821058, -0.0663579028402444, 0.7745641421680119]
R_cam2gripper_r =
[-0.0854626, -0.680272, 1.16065]
t_cam2gripper =
[-35.02063730299775;
-74.80633768251272;
-307.6725851251873]
I am getting 'good' results provided by other software. With them, the robot got to the exact points I am pointing in the camera (I have a 3D camera, from which I am getting the x, y, z from the camera world) so they are certainly correct, but I am having troubles to repeat the same result with the OpenCV function.
Sorry for the long introduction to my problem. Any understanding of why the solutions are not what is supposed to be? My guess is, that I have a problem understanding the angles or converting them but I couldn't find any way to solve this. Any hint will be well welcome!
I actually managed to solve this problem. The general idea was correct, but:
I was not understanding correctly the vector rotation notation the robot was giving. It was necessary to multiply the actual values by a factor.
I created a new program that extracts directly from the robot and the pictures the matrixes that the algorithm requires and writes these values to a YML file.
The CALIB_HAND_EYE_TSAI method wasn't giving me correct values. But with the four others, the values seem to converge to the actual values
Anyway, thank you for your help. I am stuck to get more precision in the algorithm, but that's for another question.

using getoptiamlnewcameramatrix to recover all the original image

enter image description here
this is my program output
and this is my distorted imageenter image description here
to get the original image`s whole pixel ,I used the getoptimalnewcameramatrix,but the output is terrible ,my intrix and distcoffes is totally correct.
here is my program
Mat src = imread("E:\\40_office\\distorted_bot\\0.jpg");
Size newsize(2280,3072);
cout << src.rows;
namedWindow("", WINDOW_AUTOSIZE);
imshow("", src);
waitKey();
Mat mapx,mapy;//size(x,y)
Mat cameraMatrix = (Mat_<double>(3, 3) << 1224.1, 0, 761.7497, 0, 1209.8, 1043.6, 0, 0, 1);
//Mat cameraMatrix = (Mat_<double>(3, 3) << 1224.1, 0, 1141.7497, 0, 1209.8, 1442.6, 0, 0, 1);
//Mat cameraMatrix = (Mat_<double>(3, 3) << 1209.8, 0, 1043.6, 0, 1224.1, 761.7497, 0, 0, 1);
Mat distCoeffs = (Mat_<double>(5, 1) << -0.3649, 0.1451, -0.0273, -0.000035214,0.0012);
double alpha = 1;
Mat newcameramatrix = getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, src.size(), alpha, newsize,0);
cout << newcameramatrix<<"src.size"<<src.size();
fisheye::initUndistortRectifyMap(cameraMatrix, distCoeffs,Mat(), newcameramatrix ,newsize, CV_16SC2, mapx,mapy);
Mat newimage=Mat(newsize,CV_8UC3);
remap(src, newimage, mapx, mapy, INTER_LINEAR);
imwrite("C:\\Users\\wk\\Desktop\\1_cali100.jpg", newimage);
return;

Bug in cv::warpAffine?

I think the following examples shows a bug in warpAffine (OpenCV 3.1 with precompiled Win64 dlls):
Mat x(1,20, CV_32FC1);
for (int iCol(0); iCol<x.cols; iCol++) { x.col(iCol).setTo(iCol); }
Mat crop;
Point2d c(10., 0.);
double scale(1.3);
int cropSz(11);
double vals[6] = { scale, 0.0, c.x-(cropSz/2)*scale, 0.0, scale, c.y };
Mat map(2, 3, CV_64FC1, vals);
warpAffine(x, crop, map, Size(cropSz, 1), WARP_INVERSE_MAP | INTER_LINEAR);
float dx = (crop.at<float>(0, crop.cols-1) - crop.at<float>(0, 0))/(crop.cols-1);
Mat constGrad = crop.clone().setTo(0);
for (int iCol(0); iCol<constGrad.cols; iCol++) {
constGrad.col(iCol) = c.x + (iCol-cropSz/2)*scale;
}
Mat diff = crop - constGrad;
double err = norm(diff, NORM_INF);
if (err>1e-4) {
cout << "Problem:" << endl;
cout << "computed output: " << crop << endl;
cout << "expected output: " << constGrad << endl;
cout << "difference: " << diff << endl;
Mat dxImg;
Mat dxFilt(1, 2, CV_32FC1);
dxFilt.at<float>(0) = -1.0f;
dxFilt.at<float>(1) = 1.0f;
filter2D(crop, dxImg, crop.depth(), dxFilt);
cout << "x-derivative in computed output: " << dxImg(Rect(1,0,10,1)) << endl;
cout << "Note: We expect a constant difference of 1.3" << endl;
}
Here is the program output:
Problem:
computed output: [3.5, 4.8125, 6.09375, 7.40625, 8.6875, 10, 11.3125, 12.59375, 13.90625, 15.1875, 16.5]
expected output: [3.5, 4.8000002, 6.0999999, 7.4000001, 8.6999998, 10, 11.3, 12.6, 13.9, 15.2, 16.5]
difference: [0, 0.012499809, -0.0062499046, 0.0062499046, -0.012499809, 0, 0.012499809, -0.0062503815, 0.0062503815, -0.012499809, 0]
x-derivative in computed output: [1.3125, 1.28125, 1.3125, 1.28125, 1.3125, 1.3125, 1.28125, 1.3125, 1.28125, 1.3125]
Note: We expect a constant difference of 1.3
I create an image with entries 0, 1, 2, ...n-1, and cut a region around (10,0) with scale 1.3. I also create an expected image constGrad. However, they are not the same. Even more, since the input image has a constant derivative in x-direction and the mapping is affine, I expect also a constant gradient in the resulting image.
The problem is not a boundary stuff problem, the same happens at the inner of an image. It's also not related to WARP_INVERSE_MAP.
Is this a known issue? Any comments on this?

Application error(0xc000007b) when runnning Opencv example in Visual Studio 2015

I am running canny edge example in Visual Studio 2015 and i got this error.
The application was unable to start correctly (0xc000007b).
And then visual studio show to this error.
Unhandled exception at 0x77A2D5B2 (ntdll.dll) in Canny Edge.exe: 0xC000007B: %hs is either not designed to run on Windows or it contains an error. Try installing the program again using the original installation media or contact your system administrator or the software vendor for support. Error status 0x.
I quite sure this coding is working as i ran this coding before in Visual Studio 2013. Here is my coding.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <algorithm>
using namespace cv;
using namespace std;
void help()
{
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
"Usage:\n"
"./houghlines <image_name>, Default is pic1.jpg\n" << endl;
}
bool less_by_y(const cv::Point& lhs, const cv::Point& rhs)
{
return lhs.y < rhs.y;
}
int main(int argc, char** argv)
{
const char* filename = argc >= 2 ? argv[1] : "pic1.jpg";
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Rect roi;
Mat src = imread("test_4_1.png");
if (src.empty())
{
help();
cout << "can not open " << filename << endl;
return -1;
}
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
findContours(dst, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
//vector<Vec2f> lines;
//HoughLines(dst, lines, 1, CV_PI / 180, 50, 0, 0);
//for (size_t i = 0; i < lines.size(); i++)
//{
// float rho = lines[i][0], theta = lines[i][1];
// Point pt1, pt2;
// double a = cos(theta), b = sin(theta);
// double x0 = a*rho, y0 = b*rho;
// pt1.x = cvRound(x0 + 1000 * (-b));
// pt1.y = cvRound(y0 + 1000 * (a));
// pt2.x = cvRound(x0 - 1000 * (-b));
// pt2.y = cvRound(y0 - 1000 * (a));
// line(cdst, pt1, pt2, Scalar(0, 0, 255), 1, CV_AA);
// cout << pt1 << " " << pt2 << endl;
//}
vector<Vec4i> lines;
HoughLinesP(dst, lines, 1, CV_PI / 180, 30, 50, 10);
for (size_t i = 0; i < lines.size(); i++)
{
Vec4i l = lines[i];
line(cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 1, CV_AA);
cout << l << endl;
}
cout << endl << lines.size() << endl;
cout << arcLength(contours[0], true) << endl;
cout << dst.size() << endl << endl;
for (int a = 0; a < contours[0].size(); a++){
cout << contours[0][a] << " ";
}
vector<Point> test = contours[0];
auto mmx = std::minmax_element(test.begin(), test.end(), less_by_y);
cout << endl << *mmx.first._Ptr << endl << *mmx.second._Ptr;
vector<Point> test2 = contours[1];
auto mmx_1 = std::minmax_element(test2.begin(), test2.end(), less_by_y);
cout << endl << *mmx_1.first._Ptr << endl << *mmx_1.second._Ptr;
imshow("source", src);
imshow("detected lines", cdst);
/* ROI by creating mask for the parallelogram */
Mat mask = cvCreateMat(dst.size().height, dst.size().width, CV_8UC1);
// Create black image with the same size as the original
for (int i = 0; i < mask.cols; i++)
for (int j = 0; j < mask.rows; j++)
mask.at<uchar>(Point(i, j)) = 0;
cout <<endl<<endl<< *mmx.first._Ptr << *mmx.second._Ptr << *mmx_1.first._Ptr << *mmx_1.second._Ptr << endl;
// Create Polygon from vertices
vector<Point> ROI_Vertices = { *mmx.first._Ptr, *mmx.second._Ptr, *mmx_1.first._Ptr, *mmx_1.second._Ptr};
vector<Point> ROI_Poly;
approxPolyDP(ROI_Vertices, ROI_Poly, 1.0, false);
// Fill polygon white
fillConvexPoly(mask, &ROI_Poly[0], ROI_Poly.size(), 255, 8, 0);
cout << ROI_Poly.size() << endl;
// Create new image for result storage
Mat imageDest = cvCreateMat(dst.size().height, dst.size().width, CV_8UC3);
// Cut out ROI and store it in imageDest
src.copyTo(imageDest, mask);
imshow("mask", mask);
imshow("image", imageDest);
waitKey();
return 0;
}
Actually my comment is the answer, with some additions
What OpenCV Libs are you linking to? Are you linking to vs12? Because
you need to upgrade your linker to vs13 for MSVS 2015
OpenCV Doesn't come with Visual Studio 15 pre-builds, so you need to build OpenCV yourself for VS2015
This person seems to have had a similar problem and talks you through how to compile for VS2015

Resources