Implementing Lucas Kanade Method - opencv

I have a problem with the execution of my code.
I am trying to make this work:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main( )
{
Mat src1,src2;
namedWindow( "corner", CV_WINDOW_AUTOSIZE );
namedWindow( "result", CV_WINDOW_AUTOSIZE );
src1 = imread("RGB_32.png", CV_LOAD_IMAGE_COLOR);
src2 = imread("RGB_33.png", CV_LOAD_IMAGE_COLOR);
//*********************************************************************************
Mat im1,im2;
cvtColor(src1, im1, CV_RGB2GRAY);
cvtColor(src2, im2, CV_RGB2GRAY);
//******************ReduceImageSizeAndDetectCorner**********************************
Mat im2c,
tmp = im2;
pyrDown(tmp,im2c,Size( tmp.cols/2, tmp.rows/2));
Mat dst = Mat::zeros( im2c.size(), CV_32FC1 );
int blockSize = 3;
int apertureSize = 3;
double c = 0.09;
cornerHarris(im2c,dst,blockSize, apertureSize, c, BORDER_DEFAULT);
Mat dst_norm, dst_norm_scaled;
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
int w=23,nb_pt=0,l=0, alpha=143;
/// Drawing a circle around corners
for( int j = 0+w; j < dst_norm.rows-w ; j++ ){
for( int i = 0+w; i < dst_norm.cols-w; i++ ){
if( (int) dst_norm.at<float>(j,i) > alpha )
{
circle( im2c, Point( i, j ), 0.5, Scalar(0,0,255), 4, 8, 0 );
nb_pt ++ ;
}
}
}
Mat C= Mat:: zeros(nb_pt,2,CV_32FC1) ;
for( int j = 0+w; j < dst_norm.rows-w ; j++ ){
for( int i = 0+w; i < dst_norm.cols-w; i++ ){
if( (int) dst_norm.at<float>(j,i) > alpha ){
C.at <float> (l,0)=j;
C.at <float> (l,1)=i;
l++;
}
}
}
C=2*C;
//******************ImplementLucas&KanadeMethod**************************************
Mat1f Cx,Cy;
Mat Ix_m,Iy_m,It_m,It1,It2;
Cx = (Mat_<float>(2,2) << -1,1,-1,1);
Cy= (Mat_<float>(2,2) << -1,-1,1,1);
Mat Ct1 = (Mat_<float>(2,2) << -1,-1,-1,-1);
Mat Ct2 = Mat::ones( 2, 2, CV_8U );
filter2D(im1,Ix_m,-1,Cx,Point(-1,-1),0,BORDER_DEFAULT);
filter2D(im1,Iy_m,-1,Cy,Point(-1,-1),0,BORDER_DEFAULT);
filter2D(im1,It1,-1,Ct1,Point(-1,-1),0,BORDER_DEFAULT);
filter2D(im1,It2,-1,Ct2,Point(-1,-1),0,BORDER_DEFAULT);
add(It1,It2,It_m);
//initialiser le flot
//Mat u = Mat::zeros( 1, nb_pt, CV_32FC1 );
//Mat v = Mat::zeros( 1, nb_pt, CV_32FC1 );
Mat Ix,Ixd,Iy,Iyd,It,Itd,b,nu,A;
int u,v ;
cv::Scalar color(0,0,255);
int size = 10 ;
int thickness = 10 ;
for (int k=0 ; k < nb_pt ; ++k) {
int j= C.at <float> (k,0);
int i= C.at <float> (k,1);
Ix= Ix_m(Range(j-w,j+w),Range(i-w,i+w));
Iy= Iy_m(Range(j-w,j+w),Range(i-w,i+w));
It= It_m(Range(j-w,j+w),Range(i-w,i+w));
redim(Ix,Ixd);
redim(Iy,Iyd);
redim(It,Itd);
multi(Itd,b);
funct(Ixd,Iyd,A);
Mat inv = A.inv(DECOMP_SVD);
nu = inv * b ;
u= nu.at<float> (0,0);
v= nu.at<float> (0,1);
//cout << "u = "<< u << endl ;
//cout << "v = "<< v << endl ;
cvQuiver(src2,i,j,u,v,color, size, thickness);
}
imshow( "result", src2 );*/
waitKey();
}
I have a problem with the for loop. When k = 2, I get the error "core dumped". But, when I run the code case by case, that is to say I take the case k=0 then k=1,2,..., it works.
Any help please? when k=2

Related

Accurate subpixel edge location in C (OPENCV)

I want to find subpixel and I resarched this topic However I think that subpixels must be such as 152.6 , 49.3 ...
I find this document in opencv http://docs.opencv.org/2.4/modules/imgproc/doc/feature_detection.html?highlight=cornersubpix#cornersubpix
And I try this code
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src, src_gray;
int maxCorners = 10;
int maxTrackbar = 50;
RNG rng(11111);
char* source_window = "Image";
void goodFeaturesToTrack_Demo( int, void* );
int main( int argc, char** argv )
{
src = imread( "a.png", 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey(0);
return(0);
}
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 )
{ maxCorners = 1; }
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
Mat copy;
copy = src.clone();
goodFeaturesToTrack( src_gray,corners,maxCorners,qualityLevel,minDistance,Mat(),blockSize,useHarrisDetector,k );
cout<<"** Number of corners detected: "<<corners.size()<<endl;
int r = 4;
for( int i = 0; i < corners.size(); i++ )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),rng.uniform(0,255)), -1, 8, 0 ); }
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, copy );
Size winSize = Size( 10, 10 );
Size zeroZone = Size( -1, -1 );
TermCriteria criteria = TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 );
cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
for( int i = 0; i < corners.size(); i++ )
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
}
But I have this result:
this code ofind only corner's subpixel I want to find edge's subpixel

OpenCV draw rectangle from webcam with 2 largest objects

I need to draw rectangle with 2 largest object from webcam. I already got to draw contours with 2 largest object from webcam but now i confuse in how to draw 2 largest Rectangle.
Someone can show me the code Please~
//find and draw contours
void showconvex(Mat &thresh,Mat &frame) {
int largestIndex = 0;
int largestContour = 0;
int secondLargestIndex = 0;
int secondLargestContour = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours
findContours(thresh, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
/// Find the convex hull object for each contour
vector<vector<Point> >hull(contours.size());
vector<vector<int> >inthull(contours.size());
vector<vector<Vec4i> >defects(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(Mat(contours[i]), hull[i], false);
convexHull(Mat(contours[i]),inthull[i], false);
if (inthull[i].size()>3)
convexityDefects(contours[i], inthull[i], defects[i]);
}
//find 2 largest contour
for( int i = 0; i< contours.size(); i++ )
{
if(contours[i].size() > largestContour)
{
secondLargestContour = largestContour;
secondLargestIndex = largestIndex;
largestContour = contours[i].size();
largestIndex = i;
}
else if(contours[i].size() > secondLargestContour)
{
secondLargestContour = contours[i].size();
secondLargestIndex = i;
}
}
//show contours of 2 biggest and hull as well
if(contours.size()>0)
{
//check for contouraea function if error occur
//draw the 2 largest contour using previously stored index.
drawContours(frame, contours, largestIndex, CV_RGB(0,255,0), 2, 8, hierarchy);
drawContours(frame, contours, secondLargestIndex, CV_RGB(0,255,0), 2, 8, hierarchy);
}
}
take a look at the code below
based on sorting contours by bounding boxes or by areas.
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std;
struct contour_sorter_dsc // sorts contours by their bounding boxes descending
{
bool operator ()( const vector<Point>& a, const vector<Point> & b )
{
Rect ra( boundingRect(a) );
Rect rb( boundingRect(b) );
return ( ( rb.width * rb.height ) < ( ra.width * ra.height ) );
}
};
struct contour_sorter_dsc_area // sorts contours by their areas descending
{
bool operator ()( const vector<Point>& a, const vector<Point> & b )
{
double area_a = contourArea( a );
double area_b = contourArea( b );
return ( area_b < area_a );
}
};
int main( int argc, char** argv )
{
Mat src = imread( argv[1] );
if( src.empty() )
{
return -1;
}
Mat canvas1 = src.clone();
Mat canvas2 = src.clone();
Mat gray;
cvtColor( src, gray, COLOR_BGR2GRAY );
gray = gray > 127; // binarize image
vector<vector<Point> > contours;
findContours( gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE );
sort(contours.begin(), contours.end(), contour_sorter_dsc());
for( size_t i = 0; i< 2; i++ )
{ // checks if the first contour is image boundary
if( contours[0][0] == Point( 1, 1 ) & contours[0][1] == Point( 1, gray.rows -2 )
& contours[0][2] == Point( gray.cols - 2, gray.rows -2 ) & contours[0][3] == Point( gray.cols - 2, 1 ) )
{
contours[0] = contours[1];
contours[1] = contours[2];
}
if( i < contours.size())
{
drawContours( canvas1, contours, i, Scalar( 255,255,0 ) );
Rect minRect = boundingRect( Mat(contours[i]) );
rectangle( canvas1, minRect, Scalar( 0, 0, 255 ) );
}
}
imshow( "result of sorting contours by bounding boxes ", canvas1 );
sort(contours.begin(), contours.end(), contour_sorter_dsc_area());
for( size_t i = 0; i< 2; i++ )
{ // checks if the first contour is image boundary
if( contours[0][0] == Point( 1, 1 ) & contours[0][1] == Point( 1, gray.rows -2 )
& contours[0][2] == Point( gray.cols - 2, gray.rows -2 ) & contours[0][3] == Point( gray.cols - 2, 1 ) )
{
contours[0] = contours[1];
contours[1] = contours[2];
}
if( i < contours.size())
{
drawContours( canvas2, contours, i, Scalar( 255,255,0 ) );
Rect minRect = boundingRect( Mat(contours[i]) );
rectangle( canvas2, minRect, Scalar( 0, 0, 255 ) );
}
}
imshow( "result of sorting contours by areas ", canvas2 );
waitKey();
return 0;
}
Input image
Result Images according sort type

Extract Contours path attributes

I am currently working on extracting Contours path attributes from a particular image file. I am able to extract Contours using Open CV function findContours() the output look like this
[98, 81][97, 80][95, 80][94, 79][93, 79][92, 78][91, 78][88, 75][87, 75][85, 73][84, 73][83, 72][82, 72]
But my desired output is look like this
M 398.7,106.8 c -5.5,-2.7 -20.7,-4.7 -36.1,-4.6 -15.4,0.1
How can I get it
This is my code:
using namespace cv;
using namespace std;
Mat src_grays;
int threshs = 100;
int max_threshs = 255;
RNG rng(12345);
void thresh_callbacks(int, void* );
void main( )
{
Mat src = imread( "F:/academic/pro4/t/download.jpg" );
imshow("real Image", src);
Mat gray,edge,edges, draw,draws;
Mat samples(src.rows * src.cols, 3, CV_32F);
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
for( int z = 0; z < 3; z++)
samples.at<float>(y + x*src.rows, z) = src.at<Vec3b>(y,x)[z];
int clusterCount = 5;
Mat labels;
int attempts = 10;
Mat centers;
kmeans(samples, clusterCount, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers );
Mat new_image( src.size(), src.type() );
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
{
int cluster_idx = labels.at<int>(y + x*src.rows,0);
new_image.at<Vec3b>(y,x)[0] = centers.at<float>(cluster_idx, 0);
new_image.at<Vec3b>(y,x)[1] = centers.at<float>(cluster_idx, 1);
new_image.at<Vec3b>(y,x)[2] = centers.at<float>(cluster_idx, 2);
}
imshow( "clustered image", new_image );
char filename[80];
sprintf(filename,"F:/academic/pro4/t/seg.png");
imwrite(filename, new_image);
cvtColor(src, gray, CV_BGR2GRAY);
Canny( new_image, edges, 50, 150, 3);
edges.convertTo(draws, CV_8U);
namedWindow("imageAfterSegmnetation", CV_WINDOW_AUTOSIZE);
imshow("imagesAfterCluster", draws);
cvtColor( new_image, src_grays, CV_BGR2GRAY );
blur( src_grays, src_grays, Size(3,3) );
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &threshs, max_threshs, thresh_callbacks );
thresh_callbacks( 0, 0 );
waitKey( 0 );
}
void thresh_callbacks(int, void* )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( src_grays, canny_output, threshs, threshs*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
for(int i= 0; i < contours.size(); i++)
{
for(int j= 0; j < contours[i].size();j++) // run until j < contours[i].size();
{
int a= contours[i][j].x ;
int b =contours[i][j].y ;
// printf("Point(x,y)=" + a, b);
std::cout << contours[i][j] << std::endl;
}
printf ("%i", i + "\n");
}
/// Draw contours
int a=contours.size();
for( int i = 0; i<contours.size(); i++ )
{
Mat drawing_i = Mat::zeros( canny_output.size(), CV_8UC3 );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing_i, contours, i, color, 2, 8, hierarchy, 0, Point() );
namedWindow( "Contours_i", CV_WINDOW_AUTOSIZE );
imshow( "Contours_i", drawing_i );
}
}
Note:
I need Contours path, that mean how to contours connected for example it can be M = moveto L = lineto H = horizontal lineto V = vertical lineto C = curveto S = smooth curveto Q = quadratic Bézier curve T = smooth quadratic Bézier curveto A = elliptical Arc Z = closepath just like SVG path

Why is the lines of optical flow are not drawn in my code

I'm trying to use the optical flow, but optical flow lines are not drawn and instead only points, what's the problem ?
Here is the source code of the project. Looked through the debugger. GDB shows that always p0.x = p1.x and p0.y = p1.y. but why ? Sorry for my bad English.
#include "opencv/cv.h"
#include "opencv2/core/core.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <stdio.h>
std::vector<cv::Point2f> corners;
std::vector<cv::Point2f> corners_b;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
int maxCorners = 200;
int maxTrackbar = 100;
void MotionDetection(cv::Mat frame1, cv::Mat frame2)
{
cv::Mat prev, next;
cvtColor(frame1, prev, CV_BGR2GRAY);
cvtColor(frame2, next, CV_BGR2GRAY);
goodFeaturesToTrack( prev,
corners,
maxCorners,
qualityLevel,
minDistance,
cv::Mat(),
blockSize,
useHarrisDetector,
k );
cornerSubPix(prev,
corners,
cvSize( 10, 10 ) ,
cvSize( -1, -1 ),
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
std::vector<uchar> features_found;
features_found.reserve(maxCorners);
std::vector<float> feature_errors;
feature_errors.reserve(maxCorners);
calcOpticalFlowPyrLK(prev, next, corners, corners_b, features_found,
feature_errors, cvSize( 10, 10 ), 5, cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0);
IplImage g = next;
for( int i = 0; i < maxCorners; ++i )
{
CvPoint p0 = cvPoint( cvRound( corners[i].x ), cvRound( corners[i].y ) );
CvPoint p1 = cvPoint( cvRound( corners_b[i].x ), cvRound( corners_b[i].y ) );
cvLine( &g, p0, p1, CV_RGB(255,0,0), 3, CV_AA );
}
cv::Mat rs(&g);
imshow( "result window", rs );
int key = cv::waitKey(5);
}
int main(int argc, char* argv[])
{
cv::VideoCapture cap(0);
if(!cap.isOpened())
{
std::cout<<"[!] Error: cant open camera!"<<std::endl;
return -1;
}
cv::Mat edges;
cv::namedWindow("result window", 1);
cv::Mat frame, frame2;
cap >> frame;
while(1)
{
cap >> frame2;
MotionDetection(frame, frame2);
}
return 0;
}
in you Main function frame is clone frame2.
I think, that
cap >> frame2;
frame2.copyTo( frame );
instead of
cap >> frame;
thats all

OpenCV: Problems with Stereo Rectification (using sample code from book)

So I am having problems with OpenCV. I used the sample code from the book, "Learning OpenCV". I got the code to compute all of the intrinsics and extrinsics of the two cameras, but when I go to Remap the images, all I get is a blank image. I use 6 images from both cameras, with a 9x6 chessboard. The input file alternates with left and right images (the lr=i%2 made me think that...).
Below is my code. I only added the cvRemap() function towards the end.
#undef _GLIBCXX_DEBUG
#include <opencv\cv.h>
#include <opencv\cxmisc.h>
#include <opencv\highgui.h>
#include <vector>
#include <string>
#include <algorithm>
#include <stdio.h>
#include <ctype.h>
#include <Windows.h>
using namespace std;
//
// Given a list of chessboard images, the number of corners (nx, ny)
// on the chessboards, and a flag: useCalibrated for calibrated (0) or
// uncalibrated (1: use cvStereoCalibrate(), 2: compute fundamental
// matrix separately) stereo. Calibrate the cameras and display the
// rectified results along with the computed disparity images.
//
static void
StereoCalib(const char* imageList, int useUncalibrated)
{
IplImage* L_img1 = cvLoadImage("bad1.bmp");
IplImage* R_img1 = cvLoadImage("good1.bmp");
IplImage* fixed_L = cvCloneImage(L_img1);
IplImage* fixed_R = cvCloneImage(R_img1);
CvRect roi1, roi2;
int nx = 0, ny = 0;
int displayCorners = 1;
int showUndistorted = 1;
bool isVerticalStereo = false; //OpenCV can handle left-right
//or up-down camera arrangements
const int maxScale = 1;
const float squareSize = 1.f; //Set this to your actual square size
FILE* f = fopen(imageList, "rt");
int i, j, lr, nframes = 0, n, N = 0;
vector<string> imageNames[2];
vector<CvPoint3D32f> objectPoints;
vector<CvPoint2D32f> points[2];
vector<CvPoint2D32f> temp_points[2];
vector<int> npoints;
//vector<uchar> active[2];
int is_found[2] = {0, 0};
vector<CvPoint2D32f> temp;
CvSize imageSize = {0,0};
// ARRAY AND VECTOR STORAGE:
double M1[3][3], M2[3][3], D1[5], D2[5];
double R[3][3], T[3], E[3][3], F[3][3];
double Q[4][4];
CvMat _M1 = cvMat(3, 3, CV_64F, M1 );
CvMat _M2 = cvMat(3, 3, CV_64F, M2 );
CvMat _D1 = cvMat(1, 5, CV_64F, D1 );
CvMat _D2 = cvMat(1, 5, CV_64F, D2 );
CvMat _R = cvMat(3, 3, CV_64F, R );
CvMat _T = cvMat(3, 1, CV_64F, T );
CvMat _E = cvMat(3, 3, CV_64F, E );
CvMat _F = cvMat(3, 3, CV_64F, F );
CvMat _Q = cvMat(4, 4, CV_64FC1, Q);
char buf[1024];
if( displayCorners )
cvNamedWindow( "corners", 1 );
// READ IN THE LIST OF CHESSBOARDS:
if( !f )
{
fprintf(stderr, "can not open file %s\n", imageList );
Sleep(2000);
return;
}
if( !fgets(buf, sizeof(buf)-3, f) || sscanf(buf, "%d%d", &nx, &ny) != 2 )
return;
n = nx*ny;
temp.resize(n);
temp_points[0].resize(n);
temp_points[1].resize(n);
for(i=0;;i++)
{
int count = 0, result=0;
lr = i % 2;
vector<CvPoint2D32f>& pts = temp_points[lr];//points[lr];
if( !fgets( buf, sizeof(buf)-3, f ))
break;
size_t len = strlen(buf);
while( len > 0 && isspace(buf[len-1]))
buf[--len] = '\0';
if( buf[0] == '#')
continue;
IplImage* img = cvLoadImage( buf, 0 );
if( !img )
break;
imageSize = cvGetSize(img);
imageNames[lr].push_back(buf);
//FIND CHESSBOARDS AND CORNERS THEREIN:
for( int s = 1; s <= maxScale; s++ )
{
IplImage* timg = img;
if( s > 1 )
{
timg = cvCreateImage(
cvSize(img->width*s,img->height*s),
img->depth, img->nChannels
);
cvResize( img, timg, CV_INTER_CUBIC );
}
result = cvFindChessboardCorners(
timg, cvSize(nx, ny),
&temp[0], &count,
CV_CALIB_CB_ADAPTIVE_THRESH |
CV_CALIB_CB_NORMALIZE_IMAGE
);
if( timg != img )
cvReleaseImage( &timg );
if( result || s == maxScale )
for( j = 0; j < count; j++ )
{
temp[j].x /= s;
temp[j].y /= s;
}
if( result )
break;
}
if( displayCorners )
{
printf("%s\n", buf);
IplImage* cimg = cvCreateImage( imageSize, 8, 3 );
cvCvtColor( img, cimg, CV_GRAY2BGR );
cvDrawChessboardCorners(
cimg, cvSize(nx, ny), &temp[0],
count, result
);
IplImage* cimg1 = cvCreateImage(cvSize(640, 480), IPL_DEPTH_8U, 3);
cvResize(cimg, cimg1);
cvShowImage( "corners", cimg1 );
cvReleaseImage( &cimg );
cvReleaseImage( &cimg1 );
int c = cvWaitKey(1000);
if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
//N = pts.size();
//pts.resize(N + n, cvPoint2D32f(0,0));
//active[lr].push_back((uchar)result);
is_found[lr] = result > 0 ? 1 : 0;
//assert( result != 0 );
if( result )
{
//Calibration will suffer without subpixel interpolation
cvFindCornerSubPix(
img, &temp[0], count,
cvSize(11, 11), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 0.01)
);
copy( temp.begin(), temp.end(), pts.begin() );
}
cvReleaseImage( &img );
if(lr)
{
if(is_found[0] == 1 && is_found[1] == 1)
{
assert(temp_points[0].size() == temp_points[1].size());
int current_size = points[0].size();
points[0].resize(current_size + temp_points[0].size(), cvPoint2D32f(0.0, 0.0));
points[1].resize(current_size + temp_points[1].size(), cvPoint2D32f(0.0, 0.0));
copy(temp_points[0].begin(), temp_points[0].end(), points[0].begin() + current_size);
copy(temp_points[1].begin(), temp_points[1].end(), points[1].begin() + current_size);
nframes++;
printf("Pair successfully detected...\n");
}
is_found[0] = 0;
is_found[1] = 0;
}
}
fclose(f);
printf("\n");
// HARVEST CHESSBOARD 3D OBJECT POINT LIST:
objectPoints.resize(nframes*n);
for( i = 0; i < ny; i++ )
for( j = 0; j < nx; j++ )
objectPoints[i*nx + j] = cvPoint3D32f(i*squareSize, j*squareSize, 0);
for( i = 1; i < nframes; i++ )
copy(
objectPoints.begin(), objectPoints.begin() + n,
objectPoints.begin() + i*n
);
npoints.resize(nframes,n);
N = nframes*n;
CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
cvSetIdentity(&_M1);
cvSetIdentity(&_M2);
cvZero(&_D1);
cvZero(&_D2);
// CALIBRATE THE STEREO CAMERAS
printf("Running stereo calibration ...");
fflush(stdout);
cvStereoCalibrate(
&_objectPoints, &_imagePoints1,
&_imagePoints2, &_npoints,
&_M1, &_D1, &_M2, &_D2,
imageSize, &_R, &_T, &_E, &_F,
cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH +
CV_CALIB_FIX_K3
);
printf(" done\n");
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
vector<CvPoint3D32f> lines[2];
points[0].resize(N);
points[1].resize(N);
_imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
_imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
lines[0].resize(N);
lines[1].resize(N);
CvMat _L1 = cvMat(1, N, CV_32FC3, &lines[0][0]);
CvMat _L2 = cvMat(1, N, CV_32FC3, &lines[1][0]);
//Always work in undistorted space
cvUndistortPoints(
&_imagePoints1, &_imagePoints1,
&_M1, &_D1, 0, &_M1
);
cvUndistortPoints(
&_imagePoints2, &_imagePoints2,
&_M2, &_D2, 0, &_M2
);
cvComputeCorrespondEpilines( &_imagePoints1, 1, &_F, &_L1 );
cvComputeCorrespondEpilines( &_imagePoints2, 2, &_F, &_L2 );
double avgErr = 0;
for( i = 0; i < N; i++ )
{
double err =
fabs(
points[0][i].x*lines[1][i].x +
points[0][i].y*lines[1][i].y + lines[1][i].z
) +
fabs(
points[1][i].x*lines[0][i].x +
points[1][i].y*lines[0][i].y + lines[0][i].z
);
avgErr += err;
}
printf( "avg err = %g\n", avgErr/(nframes*n) );
// save intrinsic parameters
CvFileStorage* fstorage = cvOpenFileStorage("intrinsics.yml", NULL, CV_STORAGE_WRITE);
cvWrite(fstorage, "M1", &_M1);
cvWrite(fstorage, "D1", &_D1);
cvWrite(fstorage, "M2", &_M2);
cvWrite(fstorage, "D2", &_D2);
cvReleaseFileStorage(&fstorage);
//COMPUTE AND DISPLAY RECTIFICATION
if( showUndistorted )
{
CvMat* mx1 = cvCreateMat( imageSize.height, imageSize.width, CV_32F );
CvMat* my1 = cvCreateMat( imageSize.height, imageSize.width, CV_32F );
CvMat* mx2 = cvCreateMat( imageSize.height, imageSize.width, CV_32F );
CvMat* my2 = cvCreateMat( imageSize.height, imageSize.width, CV_32F );
CvMat* img1r = cvCreateMat( imageSize.height, imageSize.width, CV_8U );
CvMat* img2r = cvCreateMat( imageSize.height, imageSize.width, CV_8U );
CvMat* disp = cvCreateMat( imageSize.height, imageSize.width, CV_16S );
double R1[3][3], R2[3][3], P1[3][4], P2[3][4];
CvMat _R1 = cvMat(3, 3, CV_64F, R1);
CvMat _R2 = cvMat(3, 3, CV_64F, R2);
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useUncalibrated == 0 )
{
CvMat _P1 = cvMat(3, 4, CV_64F, P1);
CvMat _P2 = cvMat(3, 4, CV_64F, P2);
cvStereoRectify(
&_M1, &_M2, &_D1, &_D2, imageSize,
&_R, &_T,
&_R1, &_R2, &_P1, &_P2, &_Q,
CV_CALIB_ZERO_DISPARITY,
1, imageSize, &roi1, &roi2
);
CvFileStorage* file = cvOpenFileStorage("extrinsics.yml", NULL, CV_STORAGE_WRITE);
cvWrite(file, "R", &_R);
cvWrite(file, "T", &_T);
cvWrite(file, "R1", &_R1);
cvWrite(file, "R2", &_R2);
cvWrite(file, "P1", &_P1);
cvWrite(file, "P2", &_P2);
cvWrite(file, "Q", &_Q);
cvReleaseFileStorage(&file);
isVerticalStereo = fabs(P2[1][3]) > fabs(P2[0][3]);
if(!isVerticalStereo)
roi2.x += imageSize.width;
else
roi2.y += imageSize.height;
//Precompute maps for cvRemap()
cvNamedWindow( "Original" );
cvNamedWindow( "Fixed" );
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_P1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_P2,mx2,my2);
cvRemap(R_img1, fixed_R, mx2, my2);
cvShowImage("Original", R_img1);
cvShowImage("Fixed", fixed_R);
while(1){
int c = cvWaitKey(15);
if(c == 'p') {
c = 0;
while(c != 'p' && c != 27) {
c = cvWaitKey(250);
}
}
if(c == 27)
break;
}// end while
}
//OR ELSE HARTLEY'S METHOD
else if( useUncalibrated == 1 || useUncalibrated == 2 )
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
double H1[3][3], H2[3][3], iM[3][3];
CvMat _H1 = cvMat(3, 3, CV_64F, H1);
CvMat _H2 = cvMat(3, 3, CV_64F, H2);
CvMat _iM = cvMat(3, 3, CV_64F, iM);
//Just to show you could have independently used F
if( useUncalibrated == 2 )
cvFindFundamentalMat(&_imagePoints1, &_imagePoints2, &_F);
cvStereoRectifyUncalibrated(
&_imagePoints1, &_imagePoints2, &_F,
imageSize,
&_H1, &_H2, 3
);
cvInvert(&_M1, &_iM);
cvMatMul(&_H1, &_M1, &_R1);
cvMatMul(&_iM, &_R1, &_R1);
cvInvert(&_M2, &_iM);
cvMatMul(&_H2, &_M2, &_R2);
cvMatMul(&_iM, &_R2, &_R2);
//Precompute map for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D1,&_R2,&_M2,mx2,my2);
}
else
assert(0);
cvReleaseMat( &mx1 );
cvReleaseMat( &my1 );
cvReleaseMat( &mx2 );
cvReleaseMat( &my2 );
cvReleaseMat( &img1r );
cvReleaseMat( &img2r );
cvReleaseMat( &disp );
}
}
int main(int argc, char** argv)
{
StereoCalib(argc > 1 ? argv[1] : "stereo_calib.txt", 0);
return 0;
}
Below are the extrinsic matrices obtained from the program.
R: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.9997887582765532e-001, 4.2746998112201760e-003,
-4.8964109286960510e-003, -4.1317666335754111e-003,
9.9957553950354616e-001, 2.8838677686057253e-002,
5.0176092857428471e-003, -2.8817837665560161e-002,
9.9957208635962669e-001 ]
T: !!opencv-matrix
rows: 3
cols: 1
dt: d
data: [ -8.3141294302865210e-001, -3.2181226087457654e-001,
-4.5924165239318537e-001 ]
R1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 8.3000228682826938e-001, 3.1110786082949388e-001,
4.6293423160308594e-001, -3.1818678207964091e-001,
9.4578880995670123e-001, -6.5120647036789381e-002,
-4.5809756119155060e-001, -9.3249267508025396e-002,
8.8399728423766677e-001 ]
R2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 8.2904793019998391e-001, 3.2089684317297251e-001,
4.5793530708249980e-001, -3.1381823995200708e-001,
9.4482404014772625e-001, -9.3944906367255512e-002,
-4.6281491084940990e-001, -6.5823621903907531e-002,
8.8400769741835628e-001 ]
P1: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ -4.4953673002726404e+001, 0., -1.3375267505645752e+001, 0.,
0., -4.4953673002726404e+001, 2.4430860614776611e+002, 0., 0., 0.,
1., 0. ]
P2: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ -4.4953673002726404e+001, 0., -1.3375267505645752e+001,
4.5081911684079330e+001, 0., -4.4953673002726404e+001,
2.4430860614776611e+002, 0., 0., 0., 1., 0. ]
And the intrinsic parameters found are as follows.
M1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.3107336978610317e+002, 0., 3.4686501809547735e+002, 0.,
4.3107336978610317e+002, 1.9221944996848421e+002, 0., 0., 1. ]
D1: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ -1.6825480517169825e-001, 1.0756945282000266e-001, 0., 0., 0. ]
M2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.3107336978610317e+002, 0., 3.5310162800332756e+002, 0.,
4.3107336978610317e+002, 1.8963116073129768e+002, 0., 0., 1. ]
D2: !!opencv-matrix
rows: 1
cols: 5
dt: d
data: [ -1.9546177300030809e-001, 1.7624631189915094e-001, 0., 0., 0. ]
Any help would be much appreciated. I am not very experienced with OpenCV, and I have a hard time wrapping my head around what most of the functions are even doing. So I ca
I think I found the answer. After much experimenting, it seemed that the flag for cvStereoCalibrate, CV_CALIB_SAME_FOCAL_LENGTH, caused my output images to appear warped and/or not work. Also, I took many more chessboard pictures with a larger chessboard, and this seemed to help my results quite a bit.
Hope this helps anyone in the future.

Resources