I want to rectify stereo images using intrinsic and extrinsic camera parameters that obtained from photomodeler software.
I wrote the code (modifying this link https://gist.github.com/anonymous/6586653) and I determined the relative rotation and translation parameters , but when I input the images the obtained results is not as expected although I tried to find the error but I couldn't.
your help is really appreciated:
the input images are:
I couldn't load all the images therefore I have put this link regarding the images and the results:
https://www.dropbox.com/s/5tmj9rk91tkrot4/RECTIFICATION_TEST_DATA.docx?dl=0
the code is
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <iomanip>
#include<opencv2/opencv.hpp>
#include <iostream>
#include <opencv/cv.h>
#include <opencv/highgui.h>
#include <fstream>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
// Mat img1 = imread("E:\\12_0628.tif", 1);
// Mat img2 = imread("E:\\12_0629.tif", 1);
Mat img1 = imread("E:\\DSC_0483.JPG");
Mat img2 = imread("E:\\DSC_0484.JPG");
//EXTERIOR OREINTATION FOR THE 1ST IMAGE
double omega1 = -172.672440, phi1 = -80.168311, kappa1 = 163.005082, tx1 = -35.100000, ty1 = -56.700000, tz1 = -59.300000;
//EXTERIOR OREINTATION FOR THE 2ND IMAGE
double omega2 = 27.576999, phi2 = -67.089920, kappa2 = 2.826051, tx2 = -37.600000, ty2 = -18.600000, tz2 = -41.700000;
//Rotation matrices of the 1st image
omega1 = omega1 * CV_PI / 180.;
phi1 = phi1 * CV_PI / 180.;
kappa1 = kappa1 * CV_PI / 180.;
omega2 = omega2 * CV_PI / 180.;
phi2 = phi2 * CV_PI / 180.;
kappa2 = kappa2 * CV_PI / 180.;
Mat RX1 = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cos(omega1), sin(omega1),
0, -sin(omega1), cos(omega1));
Mat RY1 = (Mat_<double>(3, 3) <<
cos(phi1), 0, -sin(phi1),
0, 1, 0,
sin(phi1), 0, cos(phi1));
Mat RZ1 = (Mat_<double>(3, 3) <<
cos(kappa1), sin(kappa1), 0,
-sin(kappa1), cos(kappa1), 0,
0, 0, 1);
// Composed rotation matrix with (RX, RY, RZ)
Mat R1 = RX1 * RY1 * RZ1;
Mat T1 = (Mat_<double>(3, 1) <<
tx1,
ty1,
tz1);
/////////////////////Rotation matrices of the 2nd image//////////////////////////////////////////
//Rotation matrices of the 1st image
Mat RX2 = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cos(omega2), sin(omega2),
0, -sin(omega2), cos(omega2));
Mat RY2 = (Mat_<double>(3, 3) <<
cos(phi2), 0, -sin(phi2),
0, 1, 0,
sin(phi2), 0, cos(phi2));
Mat RZ2 = (Mat_<double>(3, 3) <<
cos(kappa2), sin(kappa2), 0,
-sin(kappa2), cos(kappa2), 0,
0, 0, 1);
// Composed rotation matrix with (RX, RY, RZ)
Mat R2 = RX2 * RY2 * RZ2;
Mat T2 = (Mat_<double>(3, 1) <<
tx2,
ty2,
tz2);
/////////////////////////////////////////////////////////////
double f = 2284.;// the focal length of the camera nikon D40, this is equivalant to 18mm
double w = (double)img1.cols;
double h = (double)img1.rows;
Mat M = (Mat_<double>(3, 3) <<//camera matrix
f , 0. , w/2,
0. , f , h/2,
0. , 0. , 1.);
Mat D = (Mat_<double>(5, 1) <<// distortion coefficicents
0,
0,
0,
0,
0.
);
Mat R1inv = R1.inv();
Mat Rrel = R2 * R1inv;
Mat Trel = (-1 * Rrel) * T1+ T2;
Mat T = (Mat_<double>(3, 1) <<//translation matrix
-2376.6,
-740.0,
229.0);
cout << img1.size() << endl;
cout << img2.size() << endl;
//Mat R1, R2,
Mat P1, P2, Q;
stereoRectify(M, D, M, D, img1.size(), Rrel, Trel, //the input data
R1, R2, P1, P2, Q);//the output data
Mat map1x, map1y, map2x, map2y;
Mat imgdst1, imgdst2;
// Size (flaot)imageSize;
// imageSize = img1.size();
initUndistortRectifyMap(M, D, R1, P1, img1.size(), CV_32FC1, map1x, map1y);
initUndistortRectifyMap(M, D, R2, P2, img1.size(), CV_32FC1, map2x, map2y);
remap(img1, imgdst1, map1x, map1y, INTER_LINEAR, BORDER_CONSTANT, Scalar());
remap(img2, imgdst2, map2x, map2y, INTER_LINEAR, BORDER_CONSTANT, Scalar());
namedWindow("image1");
namedWindow("image2");
imshow("image1", imgdst1);
imshow("image2", imgdst2);
// imwrite("DSC_0906_rect.jpg", imgdst1);
// imwrite("DSC_0913_rect.jpg", imgdst2);
imwrite("E:\\Researches\\2016-2017_res\\2_8_epipolar_geometry\\temp_image\\output1.bmp", imgdst1);
imwrite("E:\\Researches\\2016-2017_res\\2_8_epipolar_geometry\\temp_image\\output2.bmp", imgdst2);
waitKey();
return 0;
}
I am currently converting image from RGB to YCrCb format using OpenCV function -cvtColor. I would like perform the conversion on my own with equations similar to
//equations for RGB to YUV conversion
Y' = 0.299 R + 0.587 G + 0.114 B
U = -0.147 R - 0.289 G + 0.436 B
V = 0.615 R - 0.515 G - 0.100 B.
I am not able to understand OpenCV image matrix operation. I would like to access RGB pixel values from image Mat so that I can perform the conversion myself. How can I get R,G,B values from image and then how to apply the transformation ? My current code below.
int main (int argc, char *argv[])
{
// Load in image
cv::Mat src = cv ::imread("C:\\openv2410\\frames\\frame_0.png",1);
// Create a vector for the channels and split the original image into B G R colour channels.
// Keep in mind that OpenCV uses BGR and not RGB images
vector<cv::Mat> spl;
split(src,spl);
// Create an zero pixel image for filling purposes - will become clear later
// Also create container images for B G R channels as colour images
cv::Mat empty_image = cv::Mat::zeros(src.rows, src.cols, CV_8UC1);
cv::Mat empty_channel = cv::Mat::zeros(src.rows, src.cols, CV_8UC1);
cv::Mat result_blue(src.rows, src.cols, CV_8UC3); // notice the 3 channels here!
cv::Mat result_green(src.rows, src.cols, CV_8UC3); // notice the 3 channels here!
cv::Mat result_red(src.rows, src.cols, CV_8UC3); // notice the 3 channels here!
// Create blue channel
cv::Mat in1[] = { spl[0], empty_image, empty_image };
int from_to1[] = { 0,0, 1,1, 2,2 };
mixChannels( in1, 3, &result_blue, 1, from_to1, 3 );
// Create green channel
cv::Mat in2[] = { empty_channel, spl[1], empty_image };
int from_to2[] = { 0,0, 1,1, 2,2 };
mixChannels( in2, 3, &result_green, 1, from_to2, 3 );
// Create red channel
cv::Mat in3[] = { empty_channel, empty_channel, spl[2]};
int from_to3[] = { 0,0, 1,1, 2,2 };
mixChannels( in3, 3, &result_red, 1, from_to3, 3 );
imshow("blue channel",result_blue);
imshow("green channel",result_green);
imshow("red channel",result_red);
cv::waitKey(0);
return 0;
}
Sample code for conversion from BGR to YCrCb. Source(1).
//sample input and output
float data[3][1] = { 98,76,88 };
Mat input( 1, 1, CV_32FC3, data) ;
Mat output( 1, 1, CV_32FC3 );
//iterate over all pixels
for(int i = 0; i < input.rows; i++) {
for(int j = 0; j < input.cols; j++) {
//get bgr pixel
Vec3f bgrPixel = input.at<Vec3f>(i, j);
float B = bgrPixel[0];
float G = bgrPixel[1];
float R = bgrPixel[2];
//actual conversion from BGR to YCrCb
float delta = 0.5f;
float Y = 0.299f * R + 0.587f * G + 0.114f * B;
float Cb = (B - Y) * 0.564f + delta;
float Cr = (R - Y) * 0.713f + delta;
//store into result image
Vec3f yCrCbPixel( Y, Cr, Cb );
output.at<Vec3f>(i, j) = yCrCbPixel;
}
}
So I am having problems with OpenCV. I used the sample code from the book, "Learning OpenCV". I got the code to compute all of the intrinsics and extrinsics of the two cameras, but when I go to Remap the images, all I get is a blank image. I use 6 images from both cameras, with a 9x6 chessboard. The input file alternates with left and right images (the lr=i%2 made me think that...).
Below is my code. I only added the cvRemap() function towards the end.
#undef _GLIBCXX_DEBUG
#include <opencv\cv.h>
#include <opencv\cxmisc.h>
#include <opencv\highgui.h>
#include <vector>
#include <string>
#include <algorithm>
#include <stdio.h>
#include <ctype.h>
#include <Windows.h>
using namespace std;
//
// Given a list of chessboard images, the number of corners (nx, ny)
// on the chessboards, and a flag: useCalibrated for calibrated (0) or
// uncalibrated (1: use cvStereoCalibrate(), 2: compute fundamental
// matrix separately) stereo. Calibrate the cameras and display the
// rectified results along with the computed disparity images.
//
static void
StereoCalib(const char* imageList, int useUncalibrated)
{
IplImage* L_img1 = cvLoadImage("bad1.bmp");
IplImage* R_img1 = cvLoadImage("good1.bmp");
IplImage* fixed_L = cvCloneImage(L_img1);
IplImage* fixed_R = cvCloneImage(R_img1);
CvRect roi1, roi2;
int nx = 0, ny = 0;
int displayCorners = 1;
int showUndistorted = 1;
bool isVerticalStereo = false; //OpenCV can handle left-right
//or up-down camera arrangements
const int maxScale = 1;
const float squareSize = 1.f; //Set this to your actual square size
FILE* f = fopen(imageList, "rt");
int i, j, lr, nframes = 0, n, N = 0;
vector<string> imageNames[2];
vector<CvPoint3D32f> objectPoints;
vector<CvPoint2D32f> points[2];
vector<CvPoint2D32f> temp_points[2];
vector<int> npoints;
//vector<uchar> active[2];
int is_found[2] = {0, 0};
vector<CvPoint2D32f> temp;
CvSize imageSize = {0,0};
// ARRAY AND VECTOR STORAGE:
double M1[3][3], M2[3][3], D1[5], D2[5];
double R[3][3], T[3], E[3][3], F[3][3];
double Q[4][4];
CvMat _M1 = cvMat(3, 3, CV_64F, M1 );
CvMat _M2 = cvMat(3, 3, CV_64F, M2 );
CvMat _D1 = cvMat(1, 5, CV_64F, D1 );
CvMat _D2 = cvMat(1, 5, CV_64F, D2 );
CvMat _R = cvMat(3, 3, CV_64F, R );
CvMat _T = cvMat(3, 1, CV_64F, T );
CvMat _E = cvMat(3, 3, CV_64F, E );
CvMat _F = cvMat(3, 3, CV_64F, F );
CvMat _Q = cvMat(4, 4, CV_64FC1, Q);
char buf[1024];
if( displayCorners )
cvNamedWindow( "corners", 1 );
// READ IN THE LIST OF CHESSBOARDS:
if( !f )
{
fprintf(stderr, "can not open file %s\n", imageList );
Sleep(2000);
return;
}
if( !fgets(buf, sizeof(buf)-3, f) || sscanf(buf, "%d%d", &nx, &ny) != 2 )
return;
n = nx*ny;
temp.resize(n);
temp_points[0].resize(n);
temp_points[1].resize(n);
for(i=0;;i++)
{
int count = 0, result=0;
lr = i % 2;
vector<CvPoint2D32f>& pts = temp_points[lr];//points[lr];
if( !fgets( buf, sizeof(buf)-3, f ))
break;
size_t len = strlen(buf);
while( len > 0 && isspace(buf[len-1]))
buf[--len] = '\0';
if( buf[0] == '#')
continue;
IplImage* img = cvLoadImage( buf, 0 );
if( !img )
break;
imageSize = cvGetSize(img);
imageNames[lr].push_back(buf);
//FIND CHESSBOARDS AND CORNERS THEREIN:
for( int s = 1; s <= maxScale; s++ )
{
IplImage* timg = img;
if( s > 1 )
{
timg = cvCreateImage(
cvSize(img->width*s,img->height*s),
img->depth, img->nChannels
);
cvResize( img, timg, CV_INTER_CUBIC );
}
result = cvFindChessboardCorners(
timg, cvSize(nx, ny),
&temp[0], &count,
CV_CALIB_CB_ADAPTIVE_THRESH |
CV_CALIB_CB_NORMALIZE_IMAGE
);
if( timg != img )
cvReleaseImage( &timg );
if( result || s == maxScale )
for( j = 0; j < count; j++ )
{
temp[j].x /= s;
temp[j].y /= s;
}
if( result )
break;
}
if( displayCorners )
{
printf("%s\n", buf);
IplImage* cimg = cvCreateImage( imageSize, 8, 3 );
cvCvtColor( img, cimg, CV_GRAY2BGR );
cvDrawChessboardCorners(
cimg, cvSize(nx, ny), &temp[0],
count, result
);
IplImage* cimg1 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
cvResize(cimg, cimg1);
cvShowImage( "corners", cimg1 );
cvReleaseImage( &cimg );
cvReleaseImage( &cimg1 );
int c = cvWaitKey(1000);
if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
//N = pts.size();
//pts.resize(N + n, cvPoint2D32f(0,0));
//active[lr].push_back((uchar)result);
is_found[lr] = result > 0 ? 1 : 0;
//assert( result != 0 );
if( result )
{
//Calibration will suffer without subpixel interpolation
cvFindCornerSubPix(
img, &temp[0], count,
cvSize(11, 11), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 0.01)
);
copy( temp.begin(), temp.end(), pts.begin() );
}
cvReleaseImage( &img );
if(lr)
{
if(is_found[0] == 1 && is_found[1] == 1)
{
assert(temp_points[0].size() == temp_points[1].size());
int current_size = points[0].size();
points[0].resize(current_size + temp_points[0].size(), cvPoint2D32f(0.0, 0.0));
points[1].resize(current_size + temp_points[1].size(), cvPoint2D32f(0.0, 0.0));
copy(temp_points[0].begin(), temp_points[0].end(), points[0].begin() + current_size);
copy(temp_points[1].begin(), temp_points[1].end(), points[1].begin() + current_size);
nframes++;
printf("Pair successfully detected...\n");
}
is_found[0] = 0;
is_found[1] = 0;
}
}
fclose(f);
printf("\n");
// HARVEST CHESSBOARD 3D OBJECT POINT LIST:
objectPoints.resize(nframes*n);
for( i = 0; i < ny; i++ )
for( j = 0; j < nx; j++ )
objectPoints[i*nx + j] = cvPoint3D32f(i*squareSize, j*squareSize, 0);
for( i = 1; i < nframes; i++ )
copy(
objectPoints.begin(), objectPoints.begin() + n,
objectPoints.begin() + i*n
);
npoints.resize(nframes,n);
N = nframes*n;
CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
cvSetIdentity(&_M1);
cvSetIdentity(&_M2);
cvZero(&_D1);
cvZero(&_D2);
// CALIBRATE THE STEREO CAMERAS
printf("Running stereo calibration ...");
fflush(stdout);
cvStereoCalibrate(
&_objectPoints, &_imagePoints1,
&_imagePoints2, &_npoints,
&_M1, &_D1, &_M2, &_D2,
imageSize, &_R, &_T, &_E, &_F,
cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH +
CV_CALIB_FIX_K3
);
printf(" done\n");
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
vector<CvPoint3D32f> lines[2];
points[0].resize(N);
points[1].resize(N);
_imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
_imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
lines[0].resize(N);
lines[1].resize(N);
CvMat _L1 = cvMat(1, N, CV_32FC3, &lines[0][0]);
CvMat _L2 = cvMat(1, N, CV_32FC3, &lines[1][0]);
//Always work in undistorted space
cvUndistortPoints(
&_imagePoints1, &_imagePoints1,
&_M1, &_D1, 0, &_M1
);
cvUndistortPoints(
&_imagePoints2, &_imagePoints2,
&_M2, &_D2, 0, &_M2
);
cvComputeCorrespondEpilines( &_imagePoints1, 1, &_F, &_L1 );
cvComputeCorrespondEpilines( &_imagePoints2, 2, &_F, &_L2 );
double avgErr = 0;
for( i = 0; i < N; i++ )
{
double err =
fabs(
points[0][i].x*lines[1][i].x +
points[0][i].y*lines[1][i].y + lines[1][i].z
) +
fabs(
points[1][i].x*lines[0][i].x +
points[1][i].y*lines[0][i].y + lines[0][i].z
);
avgErr += err;
}
printf( "avg err = %g\n", avgErr/(nframes*n) );
// save intrinsic parameters
CvFileStorage* fstorage = cvOpenFileStorage("intrinsics.yml", NULL, CV_STORAGE_WRITE);
cvWrite(fstorage, "M1", &_M1);
cvWrite(fstorage, "D1", &_D1);
cvWrite(fstorage, "M2", &_M2);
cvWrite(fstorage, "D2", &_D2);
cvReleaseFileStorage(&fstorage);
//COMPUTE AND DISPLAY RECTIFICATION
if( showUndistorted )
{
CvMat* mx1 = cvCreateMat( imageSize.height, imageSize.width, CV_32F );
CvMat* my1 = cvCreateMat( imageSize.height, imageSize.width, CV_32F );
CvMat* mx2 = cvCreateMat( imageSize.height, imageSize.width, CV_32F );
CvMat* my2 = cvCreateMat( imageSize.height, imageSize.width, CV_32F );
CvMat* img1r = cvCreateMat( imageSize.height, imageSize.width, CV_8U );
CvMat* img2r = cvCreateMat( imageSize.height, imageSize.width, CV_8U );
CvMat* disp = cvCreateMat( imageSize.height, imageSize.width, CV_16S );
double R1[3][3], R2[3][3], P1[3][4], P2[3][4];
CvMat _R1 = cvMat(3, 3, CV_64F, R1);
CvMat _R2 = cvMat(3, 3, CV_64F, R2);
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useUncalibrated == 0 )
{
CvMat _P1 = cvMat(3, 4, CV_64F, P1);
CvMat _P2 = cvMat(3, 4, CV_64F, P2);
cvStereoRectify(
&_M1, &_M2, &_D1, &_D2, imageSize,
&_R, &_T,
&_R1, &_R2, &_P1, &_P2, &_Q,
CV_CALIB_ZERO_DISPARITY,
1, imageSize, &roi1, &roi2
);
CvFileStorage* file = cvOpenFileStorage("extrinsics.yml", NULL, CV_STORAGE_WRITE);
cvWrite(file, "R", &_R);
cvWrite(file, "T", &_T);
cvWrite(file, "R1", &_R1);
cvWrite(file, "R2", &_R2);
cvWrite(file, "P1", &_P1);
cvWrite(file, "P2", &_P2);
cvWrite(file, "Q", &_Q);
cvReleaseFileStorage(&file);
isVerticalStereo = fabs(P2[1][3]) > fabs(P2[0][3]);
if(!isVerticalStereo)
roi2.x += imageSize.width;
else
roi2.y += imageSize.height;
//Precompute maps for cvRemap()
cvNamedWindow( "Original" );
cvNamedWindow( "Fixed" );
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_P1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_P2,mx2,my2);
cvRemap(R_img1, fixed_R, mx2, my2);
cvShowImage("Original", R_img1);
cvShowImage("Fixed", fixed_R);
while(1){
int c = cvWaitKey(15);
if(c == 'p') {
c = 0;
while(c != 'p' && c != 27) {
c = cvWaitKey(250);
}
}
if(c == 27)
break;
}// end while
}
//OR ELSE HARTLEY'S METHOD
else if( useUncalibrated == 1 || useUncalibrated == 2 )
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
double H1[3][3], H2[3][3], iM[3][3];
CvMat _H1 = cvMat(3, 3, CV_64F, H1);
CvMat _H2 = cvMat(3, 3, CV_64F, H2);
CvMat _iM = cvMat(3, 3, CV_64F, iM);
//Just to show you could have independently used F
if( useUncalibrated == 2 )
cvFindFundamentalMat(&_imagePoints1, &_imagePoints2, &_F);
cvStereoRectifyUncalibrated(
&_imagePoints1, &_imagePoints2, &_F,
imageSize,
&_H1, &_H2, 3
);
cvInvert(&_M1, &_iM);
cvMatMul(&_H1, &_M1, &_R1);
cvMatMul(&_iM, &_R1, &_R1);
cvInvert(&_M2, &_iM);
cvMatMul(&_H2, &_M2, &_R2);
cvMatMul(&_iM, &_R2, &_R2);
//Precompute map for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D1,&_R2,&_M2,mx2,my2);
}
else
assert(0);
cvReleaseMat( &mx1 );
cvReleaseMat( &my1 );
cvReleaseMat( &mx2 );
cvReleaseMat( &my2 );
cvReleaseMat( &img1r );
cvReleaseMat( &img2r );
cvReleaseMat( &disp );
}
}
int main(int argc, char** argv)
{
StereoCalib(argc > 1 ? argv[1] : "stereo_calib.txt", 0);
return 0;
}
Below are the extrinsic matrices obtained from the program.
R: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.9997887582765532e-001, 4.2746998112201760e-003,
-4.8964109286960510e-003, -4.1317666335754111e-003,
9.9957553950354616e-001, 2.8838677686057253e-002,
5.0176092857428471e-003, -2.8817837665560161e-002,
9.9957208635962669e-001 ]
T: !!opencv-matrix
rows: 3
cols: 1
dt: d
data: [ -8.3141294302865210e-001, -3.2181226087457654e-001,
-4.5924165239318537e-001 ]
R1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 8.3000228682826938e-001, 3.1110786082949388e-001,
4.6293423160308594e-001, -3.1818678207964091e-001,
9.4578880995670123e-001, -6.5120647036789381e-002,
-4.5809756119155060e-001, -9.3249267508025396e-002,
8.8399728423766677e-001 ]
R2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 8.2904793019998391e-001, 3.2089684317297251e-001,
4.5793530708249980e-001, -3.1381823995200708e-001,
9.4482404014772625e-001, -9.3944906367255512e-002,
-4.6281491084940990e-001, -6.5823621903907531e-002,
8.8400769741835628e-001 ]
P1: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ -4.4953673002726404e+001, 0., -1.3375267505645752e+001, 0.,
0., -4.4953673002726404e+001, 2.4430860614776611e+002, 0., 0., 0.,
1., 0. ]
P2: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ -4.4953673002726404e+001, 0., -1.3375267505645752e+001,
4.5081911684079330e+001, 0., -4.4953673002726404e+001,
2.4430860614776611e+002, 0., 0., 0., 1., 0. ]
And the intrinsic parameters found are as follows.
M1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.3107336978610317e+002, 0., 3.4686501809547735e+002, 0.,
4.3107336978610317e+002, 1.9221944996848421e+002, 0., 0., 1. ]
D1: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ -1.6825480517169825e-001, 1.0756945282000266e-001, 0., 0., 0. ]
M2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.3107336978610317e+002, 0., 3.5310162800332756e+002, 0.,
4.3107336978610317e+002, 1.8963116073129768e+002, 0., 0., 1. ]
D2: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ -1.9546177300030809e-001, 1.7624631189915094e-001, 0., 0., 0. ]
Any help would be much appreciated. I am not very experienced with OpenCV, and I have a hard time wrapping my head around what most of the functions are even doing. So I ca
I think I found the answer. After much experimenting, it seemed that the flag for cvStereoCalibrate, CV_CALIB_SAME_FOCAL_LENGTH, caused my output images to appear warped and/or not work. Also, I took many more chessboard pictures with a larger chessboard, and this seemed to help my results quite a bit.
Hope this helps anyone in the future.