OpenCV: Fundamental matrix accuracy - opencv

I am trying to calculate the fundamental matrix of 2 images (different photos of a static scene taken by a same camera).
I calculated it using findFundamentalMat and I used the result to calculate other matrices (Essential, Rotation, ...). The results were obviously wrong. So, I tried to be sure of the accuracy of the calculated fundamental matrix.
Using the epipolar constraint equation, I Computed fundamental matrix error. The error is very high (like a few hundreds). I do not know what is wrong about my code. I really appreciate any help. In particular: Is there any thing that I am missing in Fundamental matrix calculation? and is the way that I calculate the error right?
Also, I ran the code with very different number of matches. There are usually lots of outliers. e.g in a case with more than 80 matches, there was only 10 inliers.
Mat img_1 = imread( "imgl.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( "imgr.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 1000;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L1, true);
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
vector<Point2f>imgpts1,imgpts2;
for( unsigned int i = 0; i<matches.size(); i++ )
{
// queryIdx is the "left" image
imgpts1.push_back(keypoints_1[matches[i].queryIdx].pt);
// trainIdx is the "right" image
imgpts2.push_back(keypoints_2[matches[i].trainIdx].pt);
}
//-- Step 4: Calculate Fundamental matrix
Mat f_mask;
Mat F = findFundamentalMat (imgpts1, imgpts2, FM_RANSAC, 0.5, 0.99, f_mask);
//-- Step 5: Calculate Fundamental matrix error
//Camera intrinsics
double data[] = {1189.46 , 0.0, 805.49,
0.0, 1191.78, 597.44,
0.0, 0.0, 1.0};
Mat K(3, 3, CV_64F, data);
//Camera distortion parameters
double dist[] = { -0.03432, 0.05332, -0.00347, 0.00106, 0.00000};
Mat D(1, 5, CV_64F, dist);
//working with undistorted points
vector<Point2f> undistorted_1,undistorted_2;
vector<Point3f> line_1, line_2;
undistortPoints(imgpts1,undistorted_1,K,D);
undistortPoints(imgpts2,undistorted_2,K,D);
computeCorrespondEpilines(undistorted_1,1,F,line_1);
computeCorrespondEpilines(undistorted_2,2,F,line_2);
double f_err=0.0;
double fx,fy,cx,cy;
fx=K.at<double>(0,0);fy=K.at<double>(1,1);cx=K.at<double>(0,2);cy=K.at<double>(1,2);
Point2f pt1, pt2;
int inliers=0;
//calculation of fundamental matrix error for inliers
for (int i=0; i<f_mask.size().height; i++)
if (f_mask.at<char>(i)==1)
{
inliers++;
//calculate non-normalized values
pt1.x = undistorted_1[i].x * fx + cx;
pt1.y = undistorted_1[i].y * fy + cy;
pt2.x = undistorted_2[i].x * fx + cx;
pt2.y = undistorted_2[i].y * fy + cy;
f_err += = fabs(pt1.x*line_2[i].x +
pt1.y*line_2[i].y + line_2[i].z)
+ fabs(pt2.x*line_1[i].x +
pt2.y*line_1[i].y + line_1[i].z);
}
double AvrErr = f_err/inliers;

I believe the problem is because you calculated the Fundamental matrix based on brute force matcher only, you should make some more optimization for these corresponding point, like ration test and symmetric test.
I recommend you to ready page 233, from book "OpenCV2 Computer Vision Application Programming Cookbook" Chapter 9.
Its explained very well!

Given that we are supplied with the intrinsic matrix K, and distortion matrix D, we should undistort the image points before feeding it to findFundamentalMat and should work on undistorted image co-ordinatates henceforth (ie for computing the error). I found that this simple change reduced the maximum error of any image point pair from 176.0f to 0.2, and the number of inliers increased from 18 to 77.
I also toyed with normalizing the undistorted image points before it to findFundamentalMat, which reduced the maximum error of any image point pair to almost zero, though it does not increase the number of inliers any further.
const float kEpsilon = 1.0e-6f;
float sampsonError(const Mat &dblFMat, const Point2f &pt1, const Point2f &pt2)
{
Mat m_pt1(3, 1 , CV_64FC1 );//m_pt1(pt1);
Mat m_pt2(3, 1 , CV_64FC1 );
m_pt1.at<double>(0,0) = pt1.x; m_pt1.at<double>(1,0) = pt1.y; m_pt1.at<double>(2,0) = 1.0f;
m_pt2.at<double>(0,0) = pt2.x; m_pt2.at<double>(1,0) = pt2.y; m_pt2.at<double>(2,0) = 1.0f;
assert(dblFMat.rows==3 && dblFMat.cols==3);
assert(m_pt1.rows==3 && m_pt1.cols==1);
assert(m_pt2.rows==3 && m_pt2.cols==1);
Mat dblFMatT(dblFMat.t());
Mat dblFMatp1=(dblFMat * m_pt1);
Mat dblFMatTp2=(dblFMatT * m_pt2);
assert(dblFMatp1.rows==3 && dblFMatp1.cols==1);
assert(dblFMatTp2.rows==3 && dblFMatTp2.cols==1);
Mat numerMat=m_pt2.t() * dblFMatp1;
double numer=numerMat.at<double>(0,0);
if (numer < kEpsilon)
{
return 0;
} else {
double denom=dblFMatp1.at<double>(0,0) + dblFMatp1.at<double>(1,0) + dblFMatTp2.at<double>(0,0) + dblFMatTp2.at<double>(1,0);
double ret=(numer*numer)/denom;
return (numer*numer)/denom;
}
}
#define UNDISTORT_IMG_PTS 1
#define NORMALIZE_IMG_PTS 1
int filter_imgpts_pairs_with_epipolar_constraint(
const vector<Point2f> &raw_imgpts_1,
const vector<Point2f> &raw_imgpts_2,
int imgW,
int imgH
)
{
#if UNDISTORT_IMG_PTS
//Camera intrinsics
double data[] = {1189.46 , 0.0, 805.49,
0.0, 1191.78, 597.44,
0.0, 0.0, 1.0};
Mat K(3, 3, CV_64F, data);
//Camera distortion parameters
double dist[] = { -0.03432, 0.05332, -0.00347, 0.00106, 0.00000};
Mat D(1, 5, CV_64F, dist);
//working with undistorted points
vector<Point2f> unnormalized_imgpts_1,unnormalized_imgpts_2;
undistortPoints(raw_imgpts_1,unnormalized_imgpts_1,K,D);
undistortPoints(raw_imgpts_2,unnormalized_imgpts_2,K,D);
#else
vector<Point2f> unnormalized_imgpts_1(raw_imgpts_1);
vector<Point2f> unnormalized_imgpts_2(raw_imgpts_2);
#endif
#if NORMALIZE_IMG_PTS
float c_col=imgW/2.0f;
float c_row=imgH/2.0f;
float multiply_factor= 2.0f/(imgW+imgH);
vector<Point2f> final_imgpts_1(unnormalized_imgpts_1);
vector<Point2f> final_imgpts_2(unnormalized_imgpts_2);
for( auto iit=final_imgpts_1.begin(); iit != final_imgpts_1.end(); ++ iit)
{
Point2f &imgpt(*iit);
imgpt.x=(imgpt.x - c_col)*multiply_factor;
imgpt.y=(imgpt.y - c_row)*multiply_factor;
}
for( auto iit=final_imgpts_2.begin(); iit != final_imgpts_2.end(); ++ iit)
{
Point2f &imgpt(*iit);
imgpt.x=(imgpt.x - c_col)*multiply_factor;
imgpt.y=(imgpt.y - c_row)*multiply_factor;
}
#else
vector<Point2f> final_imgpts_1(unnormalized_imgpts_1);
vector<Point2f> final_imgpts_2(unnormalized_imgpts_2);
#endif
int algorithm=FM_RANSAC;
//int algorithm=FM_LMEDS;
vector<uchar>status;
Mat F = findFundamentalMat (final_imgpts_1, final_imgpts_2, algorithm, 0.5, 0.99, status);
int n_inliners=std::accumulate(status.begin(), status.end(), 0);
assert(final_imgpts_1.size() == final_imgpts_2.size());
vector<float> serr;
for( unsigned int i = 0; i< final_imgpts_1.size(); i++ )
{
const Point2f &p_1(final_imgpts_1[i]);
const Point2f &p_2(final_imgpts_2[i]);
float err= sampsonError(F, p_1, p_2);
serr.push_back(err);
}
float max_serr=*max_element(serr.begin(), serr.end());
cout << "found " << raw_imgpts_1.size() << "matches " << endl;
cout << " and " << n_inliners << " inliners" << endl;
cout << " max sampson err" << max_serr << endl;
return 0;
}

Related

Comparing openCv PnP with openGv PnP

I am trying to build a test project to compare the openCv solvePnP implementation with the openGv one.
the opencv is detailed here:
https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#solvepnp
and the openGv here:
https://laurentkneip.github.io/opengv/page_how_to_use.html
Using the opencv example code, I am finding a chessboard in an image, and constructing the matching 3d points. i run the cv pnp, then set up the Gv solver. the cv pnp runs fine, and prints the values:
//rotation
-0.003040771263293328, 0.9797142824436152, -0.2003763421317906;
0.0623096853748876, 0.2001735322445355, 0.977777101438374]
//translation
[-12.06549797067309;
-9.533070368412945;
37.6825295047483]
I test by reprojecting the 3d points, and it looks good.
The Gv Pnp, however, prints nan for all values. i have tried to follow the example code, but I must be making a mistake somewhere. The code is:
int main(int argc, char **argv) {
cv::Mat matImg = cv::imread("chess.jpg");
cv::Size boardSize(8, 6);
//Construct the chessboard model
double squareSize = 2.80;
std::vector<cv::Point3f> objectPoints;
for (int i = 0; i < boardSize.height; i++) {
for (int j = 0; j < boardSize.width; j++) {
objectPoints.push_back(
cv::Point3f(double(j * squareSize), float(i * squareSize), 0));
}
}
cv::Mat rvec, tvec;
cv::Mat cameraMatrix, distCoeffs;
cv::FileStorage fs("CalibrationData.xml", cv::FileStorage::READ);
fs["cameraMatrix"] >> cameraMatrix;
fs["dist_coeffs"] >> distCoeffs;
//Found chessboard corners
std::vector<cv::Point2f> imagePoints;
bool found = cv::findChessboardCorners(matImg, boardSize, imagePoints, cv::CALIB_CB_FAST_CHECK);
if (found) {
cv::drawChessboardCorners(matImg, boardSize, cv::Mat(imagePoints), found);
//SolvePnP
cv::solvePnP(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec);
drawAxis(matImg, cameraMatrix, distCoeffs, rvec, tvec, squareSize);
}
//cv to matrix
cv::Mat R;
cv::Rodrigues(rvec, R);
std::cout << "results from cv:" << R << tvec << std::endl;
//START OPEN GV
//vars
bearingVectors_t bearingVectors;
points_t points;
rotation_t rotation;
//add points to the gv type
for (int i = 0; i < objectPoints.size(); ++i)
{
point_t pnt;
pnt.x() = objectPoints[i].x;
pnt.y() = objectPoints[i].y;
pnt.z() = objectPoints[i].z;
points.push_back(pnt);
}
/*
K is the common 3x3 camera matrix that you can compose with cx, cy, fx, and fy.
You put the image point into homogeneous form (append a 1),
multiply it with the inverse of K from the left, which gives you a normalized image point (a spatial direction vector).
You normalize that to norm 1.
*/
//to homogeneous
std::vector<cv::Point3f> imagePointsH;
convertPointsToHomogeneous(imagePoints, imagePointsH);
//multiply by K.Inv
for (int i = 0; i < imagePointsH.size(); i++)
{
cv::Point3f pt = imagePointsH[i];
cv::Mat ptMat(3, 1, cameraMatrix.type());
ptMat.at<double>(0, 0) = pt.x;
ptMat.at<double>(1, 0) = pt.y;
ptMat.at<double>(2, 0) = pt.z;
cv::Mat dstMat = cameraMatrix.inv() * ptMat;
//store as bearing vector
bearingVector_t bvec;
bvec.x() = dstMat.at<double>(0, 0);
bvec.y() = dstMat.at<double>(1, 0);
bvec.z() = dstMat.at<double>(2, 0);
bvec.normalize();
bearingVectors.push_back(bvec);
}
//create a central absolute adapter
absolute_pose::CentralAbsoluteAdapter adapter(
bearingVectors,
points,
rotation);
size_t iterations = 50;
std::cout << "running epnp (all correspondences)" << std::endl;
transformation_t epnp_transformation;
for (size_t i = 0; i < iterations; i++)
epnp_transformation = absolute_pose::epnp(adapter);
std::cout << "results from epnp algorithm:" << std::endl;
std::cout << epnp_transformation << std::endl << std::endl;
return 0;
}
Where am i going wrong in setting up the openGv Pnp solver?
Years later, i had this same issue, and solved it. To convert openCv to openGV bearing vectors, you can do this:
bearingVectors_t bearingVectors;
std::vector<cv::Point2f> dd2;
const int N1 = static_cast<int>(dd2.size());
cv::Mat points1_mat = cv::Mat(dd2).reshape(1);
// first rectify points and construct homogeneous points
// construct homogeneous points
cv::Mat ones_col1 = cv::Mat::ones(N1, 1, CV_32F);
cv::hconcat(points1_mat, ones_col1, points1_mat);
// undistort points
cv::Mat points1_rect = points1_mat * cameraMatrix.inv();
// compute bearings
points2bearings3(points1_rect, &bearingVectors);
using this function for the final conversion:
// Convert a set of points to bearing
// points Matrix of size Nx3 with the set of points.
// bearings Vector of bearings.
void points2bearings3(const cv::Mat& points,
opengv::bearingVectors_t* bearings) {
double l;
cv::Vec3f p;
opengv::bearingVector_t bearing;
for (int i = 0; i < points.rows; ++i) {
p = cv::Vec3f(points.row(i));
l = std::sqrt(p[0] * p[0] + p[1] * p[1] + p[2] * p[2]);
for (int j = 0; j < 3; ++j) bearing[j] = p[j] / l;
bearings->push_back(bearing);
}
}

Features2d + Homography not giving appropriate results

I am trying to detect an object using the SurfFeatureDetect and FLANN matcher. However, the code is not able to detect the image accurately. I have also posted the results in pictorial format.
Here's my code from the opencv tutorial website
int main(int argc, char** argv){
if (argc != 3){
readme(); return -1;
}
Mat img_object = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img_scene = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
if (!img_object.data || !img_scene.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 100;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute(img_object, keypoints_object, descriptors_object);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_object.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
Mat H = findHomography(obj, scene, CV_RANSAC);
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(img_object.cols, 0);
obj_corners[2] = cvPoint(img_object.cols, img_object.rows); obj_corners[3] = cvPoint(0, img_object.rows);
std::vector<Point2f> scene_corners(4);
perspectiveTransform(obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
//-- Show detected matches
imshow("Good Matches & Object detection", img_matches);
waitKey(0);
return 0;}
/** #function readme */
void readme()
{
std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl;}
That is a very common failure. The problem is that the homography has 8 degree of freedom (8DOF). This means that you need at least 4 correct correspondences to calculate a good homography:
As you can see, the homography has 8 parameters (the last parameter h33 is just a scale factor).
The problem arises when other than good corrspondces (inlier) you need to filter out bad correspondences (outlier). When the are more outliers than inliers (total/outliers > 50%) the RANSAC procedure cannot find the outlier and you obtain weird results.
Solutions to this problem are not easy. You could:
Use a training image with a similar out-of-plane rotation (and a similar scale) of the object in your query image.
Or, use a transformation with less degree of freedom (such as similarity transform). In this way you will need less inliers. Altho OpenCV lacks support for this simpler transformation with a robust fitting method.

OpenCV 2.4.X slow on square detection with WebCam vs OpenCV 2.1.X

I have tried to port Square detection with OpenCV 2.4.1-2.4.4 but results seem very slow. I was keen to move to newer versions of OpenCV because of new functionality given, but am having very slow results.
My OpenCV code for versions 2.4.X is:
// The "Square Detector" program.
// It loads several images sequentially and tries to find squares in
// each image
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <math.h>
#include <string.h>
using namespace cv;
using namespace std;
int thresh = 50, N = 11;
const char* wndname = "Square Detection Demo";
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle( Point pt1, Point pt2, Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for( int l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l+1)*255/N;
}
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.push_back(approx);
}
}
}
}
}
// the function draws all the squares in the image
static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{
for( size_t i = 0; i < squares.size(); i++ )
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, CV_AA);
}
imshow(wndname, image);
}
int main()
{
VideoCapture cap;
cap.open(0);
Mat frame,image;
namedWindow( "Square Detection Demo", 1 );
vector<vector<Point> > squares;
for(;;)
{
cap >> frame;
if( frame.empty() ){
break;
}
frame.copyTo(image);
if( image.empty() )
{
cout << "Couldn't load image" << endl;
continue;
}
findSquares(image, squares);
drawSquares(image, squares);
//imshow("Window", image);
int c = waitKey(1);
if( (char)c == 27 )
break;
}
return 0;
}
You can notice that the code is a simple mix of Webcam visualization and the squares code provided both by OpenCV 2.4.X.
However, the equivalent code for version 2.1 of OpenCV which i will put now is a lot faster:
#include <cv.h>
#include <highgui.h>
int thresh = 50;
IplImage* img = 0;
IplImage* img0 = 0;
CvMemStorage* storage = 0;
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
double angle( CvPoint* pt1, CvPoint* pt2, CvPoint* pt0 )
{
double dx1 = pt1->x - pt0->x;
double dy1 = pt1->y - pt0->y;
double dx2 = pt2->x - pt0->x;
double dy2 = pt2->y - pt0->y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
CvSeq* findSquares4( IplImage* img, CvMemStorage* storage )
{
CvSeq* contours;
int i, c, l, N = 11;
CvSize sz = cvSize( img->width & -2, img->height & -2 );
IplImage* timg = cvCloneImage( img ); // make a copy of input image
IplImage* gray = cvCreateImage( sz, 8, 1 );
IplImage* pyr = cvCreateImage( cvSize(sz.width/2, sz.height/2), 8, 3 );
IplImage* tgray;
CvSeq* result;
double s, t;
// create empty sequence that will contain points -
// 4 points per square (the square's vertices)
CvSeq* squares = cvCreateSeq( 0, sizeof(CvSeq), sizeof(CvPoint), storage );
// select the maximum ROI in the image
// with the width and height divisible by 2
cvSetImageROI( timg, cvRect( 0, 0, sz.width, sz.height ));
//cvSetImageROI( timg, cvRect( 0,0,50, 50 ));
// down-scale and upscale the image to filter out the noise
cvPyrDown( timg, pyr, 7 );
cvPyrUp( pyr, timg, 7 );
tgray = cvCreateImage( sz, 8, 1 );
// find squares in every color plane of the image
for( c = 0; c < 3; c++ )
{
// extract the c-th color plane
cvSetImageCOI( timg, c+1 );
cvCopy( timg, tgray, 0 );
// try several threshold levels
for( l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
cvCanny( tgray, gray, 0, thresh, 5 );
// dilate canny output to remove potential
// holes between edge segments
cvDilate( gray, gray, 0, 1 );
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
cvThreshold( tgray, gray, (l+1)*255/N, 255, CV_THRESH_BINARY );
}
// find contours and store them all as a list
cvFindContours( gray, storage, &contours, sizeof(CvContour),
CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0) );
// test each contour
while( contours )
{
// approximate contour with accuracy proportional
// to the contour perimeter
result = cvApproxPoly( contours, sizeof(CvContour), storage,
CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 );
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( result->total == 4 &&
cvContourArea(result,CV_WHOLE_SEQ,0) > 1000 &&
cvCheckContourConvexity(result) )
{
s = 0;
for( i = 0; i < 5; i++ )
{
// find minimum angle between joint
// edges (maximum of cosine)
if( i >= 2 )
{
t = fabs(angle(
(CvPoint*)cvGetSeqElem( result, i ),
(CvPoint*)cvGetSeqElem( result, i-2 ),
(CvPoint*)cvGetSeqElem( result, i-1 )));
s = s > t ? s : t;
}
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( s < 0.3 )
for( i = 0; i < 4; i++ )
cvSeqPush( squares,
(CvPoint*)cvGetSeqElem( result, i ));
}
// take the next contour
contours = contours->h_next;
}
}
}
// release all the temporary images
cvReleaseImage( &gray );
cvReleaseImage( &pyr );
cvReleaseImage( &tgray );
cvReleaseImage( &timg );
return squares;
}
// the function draws all the squares in the image
void drawSquares( IplImage* img, CvSeq* squares )
{
CvSeqReader reader;
IplImage* cpy = cvCloneImage( img );
int i;
// initialize reader of the sequence
cvStartReadSeq( squares, &reader, 0 );
// read 4 sequence elements at a time (all vertices of a square)
for( i = 0; i < squares->total; i += 4 )
{
CvPoint pt[4], *rect = pt;
int count = 4;
// read 4 vertices
CV_READ_SEQ_ELEM( pt[0], reader );
CV_READ_SEQ_ELEM( pt[1], reader );
CV_READ_SEQ_ELEM( pt[2], reader );
CV_READ_SEQ_ELEM( pt[3], reader );
// draw the square as a closed polyline
cvPolyLine( cpy, &rect, &count, 1, 1, CV_RGB(0,255,0), 3, CV_AA, 0 );
}
// show the resultant image
cvShowImage( "Squares", cpy );
cvReleaseImage( &cpy );
}
int main(int argc, char** argv){
// Crea una ventana llamada Original Image con un tamaño predeterminado.
cvNamedWindow("Original Image", CV_WINDOW_AUTOSIZE);
cvNamedWindow("Squares", CV_WINDOW_AUTOSIZE);
// Crea la conexion con la Webcam.
CvCapture* capture = cvCreateCameraCapture(0);
if( !capture ){
throw "Error when reading steam_avi";
}
storage = cvCreateMemStorage(0);
while(true)
{
// Pongo el frame capturado dentro de la imagen originalImg.
img0 = cvQueryFrame(capture);
if(!img0){
break;
}
img = cvCloneImage( img0 );
// find and draw the squares
drawSquares( img, findSquares4( img, storage ) );
cvShowImage("Original Image", img0);
cvReleaseImage(&img);
// clear memory storage - reset free space position
cvClearMemStorage( storage );
// Espero a que me pulsen el ESC para salir del bucle infinito.
char c = cvWaitKey(10);
if( c == 27 ) break;
}
//cvReleaseImage(&img);
cvReleaseImage(&img0);
// clear memory storage - reset free space position
cvClearMemStorage( storage );
// Destruye la ventana “Original Image”.
cvDestroyWindow("Original Image");
cvDestroyWindow("Squares");
// Libera la memoria utilizada por la variable capture.
cvReleaseCapture(&capture);
}
I am aware that I can use one colour channel to speed up x3, and change other params to speed up, but wonder why equivalent codes give such different execution times.
Is there anything basic which I am missing out on?
I have tried to put working code up for everyone to try, so as to not waste anybody's time with vague questions such as: Opencv 2.4.X is slow.
Finaly left out Canny and checked for Area of square being below certain values (less 20% of image area) so that unwanted squares were not detected. As for getting multiple results for same square, am not too bothered with it at the moment, as i can input given squares as possible template images for comparisson. Now off to recognition of image in square. Thanks Chris for at least reading this comment (I cant give you points as answer as it was only a comment, but either way, thank you).

Re-distort points with camera intrinsics/extrinsics

Given a set of 2D points, how can I apply the opposite of undistortPoints?
I have the camera intrinsics and distCoeffs and would like to (for example) create a square, and distort it as if the camera had viewed it through the lens.
I have found a 'distort' patch here : http://code.opencv.org/issues/1387 but it would seem this is only good for images, I want to work on sparse points.
This question is rather old but since I ended up here from a google search without seeing a neat answer I decided to answer it anyway.
There is a function called projectPoints that does exactly this. The C version is used internally by OpenCV when estimating camera parameters with functions like calibrateCamera and stereoCalibrate
EDIT:
To use 2D points as input, we can set all z-coordinates to 1 with convertPointsToHomogeneous and use projectPoints with no rotation and no translation.
cv::Mat points2d = ...;
cv::Mat points3d;
cv::Mat distorted_points2d;
convertPointsToHomogeneous(points2d, points3d);
projectPoints(points3d, cv::Vec3f(0,0,0), cv::Vec3f(0,0,0), camera_matrix, dist_coeffs, distorted_points2d);
A simple solution is to use initUndistortRectifyMap to obtain a map from undistorted coordinates to distorted ones:
cv::Mat K = ...; // 3x3 intrinsic parameters
cv::Mat D = ...; // 4x1 or similar distortion parameters
int W = 640; // image width
int H = 480; // image height
cv::Mat mapx, mapy;
cv::initUndistortRectifyMap(K, D, cv::Mat(), K, cv::Size(W, H),
CV_32F, mapx, mapy);
float distorted_x = mapx.at<float>(y, x);
float distorted_y = mapy.at<float>(y, x);
I edit to clarify the code is correct:
I cite the documentation of initUndistortRectifyMap:
for each pixel (u, v) in the destination (corrected and rectified)
image, the function computes the corresponding coordinates in the
source image (that is, in the original image from camera.
map_x(u,v) = x''f_x + c_x
map_y(u,v) = y''f_y + c_y
undistortPoint is a simple reverse version of project points
In my case I would like to do the following:
Undistort points:
int undisortPoints(const vector<cv::Point2f> &uv, vector<cv::Point2f> &xy, const cv::Mat &M, const cv::Mat &d)
{
cv::undistortPoints(uv, xy, M, d, cv::Mat(), M);
return 0;
}
This will undistort the points to the very similar coordinate to the origin of the image, but without distortion. This is the default behavior for the cv::undistort() function.
Redistort points:
int distortPoints(const vector<cv::Point2f> &xy, vector<cv::Point2f> &uv, const cv::Mat &M, const cv::Mat &d)
{
vector<cv::Point2f> xy2;
vector<cv::Point3f> xyz;
cv::undistortPoints(xy, xy2, M, cv::Mat());
for (cv::Point2f p : xy2)xyz.push_back(cv::Point3f(p.x, p.y, 1));
cv::Mat rvec = cv::Mat::zeros(3, 1, CV_64FC1);
cv::Mat tvec = cv::Mat::zeros(3, 1, CV_64FC1);
cv::projectPoints(xyz, rvec, tvec, M, d, uv);
return 0;
}
The little tricky thing here is to first project the points to the z=1 plane with a linear camera model. After that, you must project them with the original camera model.
I found these useful, I hope it also works for you.
I have had exactly the same need.
Here is a possible solution :
void MyDistortPoints(const std::vector<cv::Point2d> & src, std::vector<cv::Point2d> & dst,
const cv::Mat & cameraMatrix, const cv::Mat & distorsionMatrix)
{
dst.clear();
double fx = cameraMatrix.at<double>(0,0);
double fy = cameraMatrix.at<double>(1,1);
double ux = cameraMatrix.at<double>(0,2);
double uy = cameraMatrix.at<double>(1,2);
double k1 = distorsionMatrix.at<double>(0, 0);
double k2 = distorsionMatrix.at<double>(0, 1);
double p1 = distorsionMatrix.at<double>(0, 2);
double p2 = distorsionMatrix.at<double>(0, 3);
double k3 = distorsionMatrix.at<double>(0, 4);
//BOOST_FOREACH(const cv::Point2d &p, src)
for (unsigned int i = 0; i < src.size(); i++)
{
const cv::Point2d &p = src[i];
double x = p.x;
double y = p.y;
double xCorrected, yCorrected;
//Step 1 : correct distorsion
{
double r2 = x*x + y*y;
//radial distorsion
xCorrected = x * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
yCorrected = y * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
//tangential distorsion
//The "Learning OpenCV" book is wrong here !!!
//False equations from the "Learning OpenCv" book
//xCorrected = xCorrected + (2. * p1 * y + p2 * (r2 + 2. * x * x));
//yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x);
//Correct formulae found at : http://www.vision.caltech.edu/bouguetj/calib_doc/htmls/parameters.html
xCorrected = xCorrected + (2. * p1 * x * y + p2 * (r2 + 2. * x * x));
yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x * y);
}
//Step 2 : ideal coordinates => actual coordinates
{
xCorrected = xCorrected * fx + ux;
yCorrected = yCorrected * fy + uy;
}
dst.push_back(cv::Point2d(xCorrected, yCorrected));
}
}
void MyDistortPoints(const std::vector<cv::Point2d> & src, std::vector<cv::Point2d> & dst,
const cv::Matx33d & cameraMatrix, const cv::Matx<double, 1, 5> & distorsionMatrix)
{
cv::Mat cameraMatrix2(cameraMatrix);
cv::Mat distorsionMatrix2(distorsionMatrix);
return MyDistortPoints(src, dst, cameraMatrix2, distorsionMatrix2);
}
void TestDistort()
{
cv::Matx33d cameraMatrix = 0.;
{
//cameraMatrix Init
double fx = 1000., fy = 950.;
double ux = 324., uy = 249.;
cameraMatrix(0, 0) = fx;
cameraMatrix(1, 1) = fy;
cameraMatrix(0, 2) = ux;
cameraMatrix(1, 2) = uy;
cameraMatrix(2, 2) = 1.;
}
cv::Matx<double, 1, 5> distorsionMatrix;
{
//distorsion Init
const double k1 = 0.5, k2 = -0.5, k3 = 0.000005, p1 = 0.07, p2 = -0.05;
distorsionMatrix(0, 0) = k1;
distorsionMatrix(0, 1) = k2;
distorsionMatrix(0, 2) = p1;
distorsionMatrix(0, 3) = p2;
distorsionMatrix(0, 4) = k3;
}
std::vector<cv::Point2d> distortedPoints;
std::vector<cv::Point2d> undistortedPoints;
std::vector<cv::Point2d> redistortedPoints;
distortedPoints.push_back(cv::Point2d(324., 249.));// equals to optical center
distortedPoints.push_back(cv::Point2d(340., 200));
distortedPoints.push_back(cv::Point2d(785., 345.));
distortedPoints.push_back(cv::Point2d(0., 0.));
cv::undistortPoints(distortedPoints, undistortedPoints, cameraMatrix, distorsionMatrix);
MyDistortPoints(undistortedPoints, redistortedPoints, cameraMatrix, distorsionMatrix);
cv::undistortPoints(redistortedPoints, undistortedPoints, cameraMatrix, distorsionMatrix);
//Poor man's unit test ensuring we have an accuracy that is better than 0.001 pixel
for (unsigned int i = 0; i < undistortedPoints.size(); i++)
{
cv::Point2d dist = redistortedPoints[i] - distortedPoints[i];
double norm = sqrt(dist.dot(dist));
std::cout << "norm = " << norm << std::endl;
assert(norm < 1E-3);
}
}
For those still searching, here is a simple python function that will distort points back:
def distortPoints(undistortedPoints, k, d):
undistorted = np.float32(undistortedPoints[:, np.newaxis, :])
kInv = np.linalg.inv(k)
for i in range(len(undistorted)):
srcv = np.array([undistorted[i][0][0], undistorted[i][0][1], 1])
dstv = kInv.dot(srcv)
undistorted[i][0][0] = dstv[0]
undistorted[i][0][1] = dstv[1]
distorted = cv2.fisheye.distortPoints(undistorted, k, d)
return distorted
Example:
undistorted = np.array([(639.64, 362.09), (234, 567)])
distorted = distortPoints(undistorted, camK, camD)
print(distorted)
This question and it's related questions on SO have been around for nearly a decade, but there still isn't an answer that satisfies the criteria below so I'm proposing a new answer that
uses methods readily available in OpenCV,
works for points, not images, (and also points at subpixel locations),
can be used beyond fisheye distortion models,
does not involve manual interpolation or maps and
can be used in the context of rectification
Preliminaries
It is important to distinquish between ideal coordinates (also called 'normalized' or 'sensor' coordinates) which are the input variables to the distortion model or 'x' and 'y' in the OpenCV docs vs. observed coordinates (also called 'image' coordinates) or 'u' and 'v' in OpenCV docs. Ideal coordinates have been normalized by the intrinsic parameters so that they have been scaled by the focal length and are relative to the image centroid at (cx,cy). This is important to point out because the undistortPoints() method can return either ideal or observed coordinates depending on the input arguments.
undistortPoints() can essentially do any combination of two things: remove distortions and apply a rotational transformation with the output either being in ideal or observed coordinates, depending on if a projection mat (InputArray P) is provided in the input. The input coordinates (InputArray src) for undistortPoints() is always in observed or image coordinates.
At a high level undistortPoints() converts the input coordinates from observed to ideal coordinates and uses an iterative process to remove distortions from the ideal or normalized points. The reason the process is iterative is because the OpenCV distortion model is not easy to invert analytically.
In the example below, we use undistortPoints() twice. First, we apply a reverse rotational transformation to undo image rectification. This step can be skipped if you are not working with rectified images. The output of this first step is in observed coordinates so we use undistortPoints() again to convert these to ideal coordinates. The conversion to ideal coordinates makes setting up the input for projectPoints() easier (which we use to apply the distortions). With the ideal coordinates, we can simply convert them to homogeneous by appending a 1 to each point. This is equivalent to projecting the points to a plane in 3D world coordinates with a linear camera model.
As of currently, there isn't a method in OpenCV to apply distortions to a set of ideal coordinates (with the exception of fisheye distortions using distort()) so we employ the projectPoints() method which can apply distortions as well as transformations as part of its projection algorithm. The tricky part about using projectPoints() is that the input is in terms of world or model coordinates in 3D, which is why we homogenized the output of the second use of undistortPoints(). By using projectPoints() with a dummy, zero-valued rotation vector (InputArray rvec) and translation vector (Input Array tvec) the result is simply a distorted set of coordinates which is conveniently output in observed or image coordinates.
Some helpful links
Difference between undistortPoints() and projectPoints() in OpenCV
https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga1019495a2c8d1743ed5cc23fa0daff8c
https://docs.opencv.org/3.4/da/d54/group__imgproc__transform.html#ga55c716492470bfe86b0ee9bf3a1f0f7e
Re-distort points with camera intrinsics/extrinsics
https://stackoverflow.com/questions/28678985/exact-definition-of-the-matrices-in-opencv-stereorectify#:~:text=Normally%20the%20definition%20of%20a,matrix%20with%20the%20extrinsic%20parameters
https://docs.opencv.org/4.x/db/d58/group__calib3d__fisheye.html#ga75d8877a98e38d0b29b6892c5f8d7765
https://docs.opencv.org/3.4/d9/d0c/group__calib3d.html#ga617b1685d4059c6040827800e72ad2b6
Does OpenCV's undistortPoints also rectify them?
Removing distortions in rectified image coordinates
Before providing the solution to recovering the original image coordinates with distortions we provide a short snippet to convert from the original distorted image coordinates to the corresponding rectified, undistorted coordinates that can be used for testing the reverse solution below.
The rotation matrix R1 and the projection matrix P1 come from stereoRectify(). The intrinsic parameters M1 and distortion parameters D1 come from stereoCalibrate().
const size_t img_w = 2448;
const size_t img_h = 2048;
const size_t num_rand_pts = 100;
// observed coordinates of the points in the original
// distorted image (used as a benchmark for testing)
std::vector<cv::Point2f> benchmark_obs_dist_points;
// undistorted and rectified obnserved coordinates
std::vector<cv::Point2f> obs_rect_undist_points;
// initialize with uniform random numbers
cv::RNG rng( 0xFFFFFFFF );
for(size_t i =0;i<num_rand_pts;++i)
benchmark_obs_dist_points.push_back(
cv::Point2f(rng.uniform(0.0,(double)img_w),
rng.uniform(0.0,(double)img_h))
);
// undistort and rectify
cv::undistortPoints(benchmark_obs_dist_points,obs_rect_undist_points,
M1,D1,R1,P1);
Re-distorting and unrectifying points to recover the original image coordinates
We will need three mats to reverse the rectification: the inverse of the rectification rotation matrix from stereoRectify R1, and two others to 'swap' the P1 and M1 projections that happen in undistortPoints(). P1_prime is the rotation matrix sub-portion of the projection matrix and M1_prime converts the rectification rotation matrix into a projection matrix with no translation. Note this only works if the output of stereoRectify has no translation, i.e. the last column of P1 is zeros which can be easily verified.
assert(cv::norm(P1(cv::Rect(3,0,1,3))==0.0));
// create a 3x3 shallow copy of the rotation matrix portion of the projection P1
cv::Mat P1_prime = P1(cv::Rect(0,0,3,3));
// create a 3x4 projection matrix with the rotation portion of
// the rectification rotation matrix R1
cv::Mat M1_prime = cv::Mat::zeros(3,4,CV_64F);
M1.copyTo(M1_prime(cv::Rect(0,0,3,3)));
With these mats, the reversal can proceed as follows
// reverse the image rectification transformation
// (result will still be undistorted)
std::vector<cv::Point2f> obs_undist_points;
cv::undistortPoints(obs_rect_undist_points,obs_undist_points,
P1_prime,cv::Mat(),R1.inv(),M1_prime);
// convert the image coordinates into sensor or normalized or ideal coordinates
// (again, still undistorted)
std::vector<cv::Point2f> ideal_undist_points;
cv::undistortPoints(obs_undist_points,ideal_undist_points,M1,cv::Mat());
// artificially project the ideal 2d points to a plane in world coordinates
// using a linear camera model (z=1)
std::vector<cv::Point3f> world_undist_points;
for (cv::Point2f pt : ideal_undist_points)
world_undist_points.push_back(cv::Point3f(pt.x,pt.y,1));
// add the distortions back in to get the original coordinates
cv::Mat rvec = cv::Mat::zeros(3,1,CV_64FC1); // dummy zero rotation vec
cv::Mat tvec = cv::Mat::zeros(3,1,CV_64FC1); // dummy zero translation vec
std::vector<cv::Point2f> obs_dist_points;
cv::projectPoints(world_undist_points,rvec,tvec,M1,D1,obs_dist_points);
To test the results, we can compare them to the benchmark values
for(size_t i=0;i<num_rand_pts;++i)
std::cout << "benchmark_x: " << benchmark_obs_dist_points[i].x
<< " benchmark_y: " << benchmark_obs_dist_points[i].y
<< " computed_x: " << obs_dist_points[i].x
<< " computed_y: " << obs_dist_points[i].y
<< " diff_x: "
<< std::abs(benchmark_obs_dist_points[i].x-obs_dist_points[i].x)
<< " diff_y: "
<< std::abs(benchmark_obs_dist_points[i].y-obs_dist_points[i].y)
<< std::endl;
This is main.cpp. It is self-sufficient and does not need anything else but opencv. I don't remember where I found this, it works, I used it in my project. The program eats the set of standard chessboard images and generates json/xml files with all the distortions of the camera.
#include <iostream>
#include <sstream>
#include <time.h>
#include <stdio.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/highgui/highgui.hpp>
#ifndef _CRT_SECURE_NO_WARNINGS
# define _CRT_SECURE_NO_WARNINGS
#endif
using namespace cv;
using namespace std;
static void help()
{
cout << "This is a camera calibration sample." << endl
<< "Usage: calibration configurationFile" << endl
<< "Near the sample file you'll find the configuration file, which has detailed help of "
"how to edit it. It may be any OpenCV supported file format XML/YAML." << endl;
}
class Settings
{
public:
Settings() : goodInput(false) {}
enum Pattern { NOT_EXISTING, CHESSBOARD, CIRCLES_GRID, ASYMMETRIC_CIRCLES_GRID };
enum InputType {INVALID, CAMERA, VIDEO_FILE, IMAGE_LIST};
void write(FileStorage& fs) const //Write serialization for this class
{
fs << "{" << "BoardSize_Width" << boardSize.width
<< "BoardSize_Height" << boardSize.height
<< "Square_Size" << squareSize
<< "Calibrate_Pattern" << patternToUse
<< "Calibrate_NrOfFrameToUse" << nrFrames
<< "Calibrate_FixAspectRatio" << aspectRatio
<< "Calibrate_AssumeZeroTangentialDistortion" << calibZeroTangentDist
<< "Calibrate_FixPrincipalPointAtTheCenter" << calibFixPrincipalPoint
<< "Write_DetectedFeaturePoints" << bwritePoints
<< "Write_extrinsicParameters" << bwriteExtrinsics
<< "Write_outputFileName" << outputFileName
<< "Show_UndistortedImage" << showUndistorsed
<< "Input_FlipAroundHorizontalAxis" << flipVertical
<< "Input_Delay" << delay
<< "Input" << input
<< "}";
}
void read(const FileNode& node) //Read serialization for this class
{
node["BoardSize_Width" ] >> boardSize.width;
node["BoardSize_Height"] >> boardSize.height;
node["Calibrate_Pattern"] >> patternToUse;
node["Square_Size"] >> squareSize;
node["Calibrate_NrOfFrameToUse"] >> nrFrames;
node["Calibrate_FixAspectRatio"] >> aspectRatio;
node["Write_DetectedFeaturePoints"] >> bwritePoints;
node["Write_extrinsicParameters"] >> bwriteExtrinsics;
node["Write_outputFileName"] >> outputFileName;
node["Calibrate_AssumeZeroTangentialDistortion"] >> calibZeroTangentDist;
node["Calibrate_FixPrincipalPointAtTheCenter"] >> calibFixPrincipalPoint;
node["Input_FlipAroundHorizontalAxis"] >> flipVertical;
node["Show_UndistortedImage"] >> showUndistorsed;
node["Input"] >> input;
node["Input_Delay"] >> delay;
interprate();
}
void interprate()
{
goodInput = true;
if (boardSize.width <= 0 || boardSize.height <= 0)
{
cerr << "Invalid Board size: " << boardSize.width << " " << boardSize.height << endl;
goodInput = false;
}
if (squareSize <= 10e-6)
{
cerr << "Invalid square size " << squareSize << endl;
goodInput = false;
}
if (nrFrames <= 0)
{
cerr << "Invalid number of frames " << nrFrames << endl;
goodInput = false;
}
if (input.empty()) // Check for valid input
inputType = INVALID;
else
{
if (input[0] >= '0' && input[0] <= '9')
{
stringstream ss(input);
ss >> cameraID;
inputType = CAMERA;
}
else
{
if (readStringList(input, imageList))
{
inputType = IMAGE_LIST;
nrFrames = (nrFrames < (int)imageList.size()) ? nrFrames : (int)imageList.size();
}
else
inputType = VIDEO_FILE;
}
if (inputType == CAMERA)
inputCapture.open(cameraID);
if (inputType == VIDEO_FILE)
inputCapture.open(input);
if (inputType != IMAGE_LIST && !inputCapture.isOpened())
inputType = INVALID;
}
if (inputType == INVALID)
{
cerr << " Inexistent input: " << input << endl;
goodInput = false;
}
flag = 0;
if(calibFixPrincipalPoint) flag |= CV_CALIB_FIX_PRINCIPAL_POINT;
if(calibZeroTangentDist) flag |= CV_CALIB_ZERO_TANGENT_DIST;
if(aspectRatio) flag |= CV_CALIB_FIX_ASPECT_RATIO;
calibrationPattern = NOT_EXISTING;
if (!patternToUse.compare("CHESSBOARD")) calibrationPattern = CHESSBOARD;
if (!patternToUse.compare("CIRCLES_GRID")) calibrationPattern = CIRCLES_GRID;
if (!patternToUse.compare("ASYMMETRIC_CIRCLES_GRID")) calibrationPattern = ASYMMETRIC_CIRCLES_GRID;
if (calibrationPattern == NOT_EXISTING)
{
cerr << " Inexistent camera calibration mode: " << patternToUse << endl;
goodInput = false;
}
atImageList = 0;
}
Mat nextImage()
{
Mat result;
if( inputCapture.isOpened() )
{
Mat view0;
inputCapture >> view0;
view0.copyTo(result);
}
else if( atImageList < (int)imageList.size() )
result = imread(imageList[atImageList++], CV_LOAD_IMAGE_COLOR);
return result;
}
static bool readStringList( const string& filename, vector<string>& l )
{
l.clear();
FileStorage fs(filename, FileStorage::READ);
if( !fs.isOpened() )
return false;
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
return false;
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
l.push_back((string)*it);
return true;
}
public:
Size boardSize; // The size of the board -> Number of items by width and height
Pattern calibrationPattern;// One of the Chessboard, circles, or asymmetric circle pattern
float squareSize; // The size of a square in your defined unit (point, millimeter,etc).
int nrFrames; // The number of frames to use from the input for calibration
float aspectRatio; // The aspect ratio
int delay; // In case of a video input
bool bwritePoints; // Write detected feature points
bool bwriteExtrinsics; // Write extrinsic parameters
bool calibZeroTangentDist; // Assume zero tangential distortion
bool calibFixPrincipalPoint;// Fix the principal point at the center
bool flipVertical; // Flip the captured images around the horizontal axis
string outputFileName; // The name of the file where to write
bool showUndistorsed; // Show undistorted images after calibration
string input; // The input ->
int cameraID;
vector<string> imageList;
int atImageList;
VideoCapture inputCapture;
InputType inputType;
bool goodInput;
int flag;
private:
string patternToUse;
};
static void read(const FileNode& node, Settings& x, const Settings& default_value = Settings())
{
if(node.empty())
x = default_value;
else
x.read(node);
}
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<vector<Point2f> > imagePoints );
int main(int argc, char* argv[])
{
// help();
Settings s;
const string inputSettingsFile = argc > 1 ? argv[1] : "default.xml";
FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
if (!fs.isOpened())
{
cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
return -1;
}
fs["Settings"] >> s;
fs.release(); // close Settings file
if (!s.goodInput)
{
cout << "Invalid input detected. Application stopping. " << endl;
return -1;
}
vector<vector<Point2f> > imagePoints;
Mat cameraMatrix, distCoeffs;
Size imageSize;
int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
clock_t prevTimestamp = 0;
const Scalar RED(0,0,255), GREEN(0,255,0);
const char ESC_KEY = 27;
for(int i = 0;;++i)
{
Mat view;
bool blinkOutput = false;
view = s.nextImage();
//----- If no more image, or got enough, then stop calibration and show result -------------
if( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames )
{
if( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
mode = CALIBRATED;
else
mode = DETECTION;
}
if(view.empty()) // If no more images then run calibration, save and stop loop.
{
if( imagePoints.size() > 0 )
runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
break;
}
imageSize = view.size(); // Format input image.
if( s.flipVertical ) flip( view, view, 0 );
vector<Point2f> pointBuf;
bool found;
switch( s.calibrationPattern ) // Find feature points on the input format
{
case Settings::CHESSBOARD:
found = findChessboardCorners( view, s.boardSize, pointBuf,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
break;
case Settings::CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf );
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
break;
default:
found = false;
break;
}
if ( found) // If done with success,
{
// improve the found corners' coordinate accuracy for chessboard
if( s.calibrationPattern == Settings::CHESSBOARD)
{
Mat viewGray;
cvtColor(view, viewGray, COLOR_BGR2GRAY);
cornerSubPix( viewGray, pointBuf, Size(11,11),
Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
}
if( mode == CAPTURING && // For camera only take new samples after delay time
(!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) )
{
imagePoints.push_back(pointBuf);
prevTimestamp = clock();
blinkOutput = s.inputCapture.isOpened();
}
// Draw the corners.
drawChessboardCorners( view, s.boardSize, Mat(pointBuf), found );
}
//----------------------------- Output Text ------------------------------------------------
string msg = (mode == CAPTURING) ? "100/100" :
mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
int baseLine = 0;
Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);
if( mode == CAPTURING )
{
if(s.showUndistorsed)
msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames );
else
msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames );
}
putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);
if( blinkOutput )
bitwise_not(view, view);
//------------------------- Video capture output undistorted ------------------------------
if( mode == CALIBRATED && s.showUndistorsed )
{
Mat temp = view.clone();
undistort(temp, view, cameraMatrix, distCoeffs);
}
//------------------------------ Show image and check for input commands -------------------
imshow("Image View", view);
char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay);
if( key == ESC_KEY )
break;
if( key == 'u' && mode == CALIBRATED )
s.showUndistorsed = !s.showUndistorsed;
if( s.inputCapture.isOpened() && key == 'g' )
{
mode = CAPTURING;
imagePoints.clear();
}
}
// -----------------------Show the undistorted image for the image list ------------------------
if( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed )
{
Mat view, rview, map1, map2;
initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
imageSize, CV_16SC2, map1, map2);
for(int i = 0; i < (int)s.imageList.size(); i++ )
{
view = imread(s.imageList[i], 1);
if(view.empty())
continue;
remap(view, rview, map1, map2, INTER_LINEAR);
imshow("Image View", rview);
char c = (char)waitKey();
if( c == ESC_KEY || c == 'q' || c == 'Q' )
break;
}
}
return 0;
}
static double computeReprojectionErrors( const vector<vector<Point3f> >& objectPoints,
const vector<vector<Point2f> >& imagePoints,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const Mat& cameraMatrix , const Mat& distCoeffs,
vector<float>& perViewErrors)
{
vector<Point2f> imagePoints2;
int i, totalPoints = 0;
double totalErr = 0, err;
perViewErrors.resize(objectPoints.size());
for( i = 0; i < (int)objectPoints.size(); ++i )
{
projectPoints( Mat(objectPoints[i]), rvecs[i], tvecs[i], cameraMatrix,
distCoeffs, imagePoints2);
err = norm(Mat(imagePoints[i]), Mat(imagePoints2), CV_L2);
int n = (int)objectPoints[i].size();
perViewErrors[i] = (float) std::sqrt(err*err/n);
totalErr += err*err;
totalPoints += n;
}
return std::sqrt(totalErr/totalPoints);
}
static void calcBoardCornerPositions(Size boardSize, float squareSize, vector<Point3f>& corners,
Settings::Pattern patternType /*= Settings::CHESSBOARD*/)
{
corners.clear();
switch(patternType)
{
case Settings::CHESSBOARD:
case Settings::CIRCLES_GRID:
for( int i = 0; i < boardSize.height; ++i )
for( int j = 0; j < boardSize.width; ++j )
corners.push_back(Point3f(float( j*squareSize ), float( i*squareSize ), 0));
break;
case Settings::ASYMMETRIC_CIRCLES_GRID:
for( int i = 0; i < boardSize.height; i++ )
for( int j = 0; j < boardSize.width; j++ )
corners.push_back(Point3f(float((2*j + i % 2)*squareSize), float(i*squareSize), 0));
break;
default:
break;
}
}
static bool runCalibration( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
vector<vector<Point2f> > imagePoints, vector<Mat>& rvecs, vector<Mat>& tvecs,
vector<float>& reprojErrs, double& totalAvgErr)
{
cameraMatrix = Mat::eye(3, 3, CV_64F);
if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
cameraMatrix.at<double>(0,0) = 1.0;
distCoeffs = Mat::zeros(8, 1, CV_64F);
vector<vector<Point3f> > objectPoints(1);
calcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0], s.calibrationPattern);
objectPoints.resize(imagePoints.size(),objectPoints[0]);
//Find intrinsic and extrinsic camera parameters
double rms = calibrateCamera(objectPoints, imagePoints, imageSize, cameraMatrix,
distCoeffs, rvecs, tvecs, s.flag|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5);
cout << "Re-projection error reported by calibrateCamera: "<< rms << endl;
bool ok = checkRange(cameraMatrix) && checkRange(distCoeffs);
totalAvgErr = computeReprojectionErrors(objectPoints, imagePoints,
rvecs, tvecs, cameraMatrix, distCoeffs, reprojErrs);
return ok;
}
// Print camera parameters to the output file
static void saveCameraParams( Settings& s, Size& imageSize, Mat& cameraMatrix, Mat& distCoeffs,
const vector<Mat>& rvecs, const vector<Mat>& tvecs,
const vector<float>& reprojErrs, const vector<vector<Point2f> >& imagePoints,
double totalAvgErr )
{
FileStorage fs( s.outputFileName, FileStorage::WRITE );
time_t tm;
time( &tm );
struct tm *t2 = localtime( &tm );
char buf[1024];
strftime( buf, sizeof(buf)-1, "%c", t2 );
fs << "calibration_Time" << buf;
if( !rvecs.empty() || !reprojErrs.empty() )
fs << "nrOfFrames" << (int)std::max(rvecs.size(), reprojErrs.size());
fs << "image_Width" << imageSize.width;
fs << "image_Height" << imageSize.height;
fs << "board_Width" << s.boardSize.width;
fs << "board_Height" << s.boardSize.height;
fs << "square_Size" << s.squareSize;
if( s.flag & CV_CALIB_FIX_ASPECT_RATIO )
fs << "FixAspectRatio" << s.aspectRatio;
if( s.flag )
{
sprintf( buf, "flags: %s%s%s%s",
s.flag & CV_CALIB_USE_INTRINSIC_GUESS ? " +use_intrinsic_guess" : "",
s.flag & CV_CALIB_FIX_ASPECT_RATIO ? " +fix_aspectRatio" : "",
s.flag & CV_CALIB_FIX_PRINCIPAL_POINT ? " +fix_principal_point" : "",
s.flag & CV_CALIB_ZERO_TANGENT_DIST ? " +zero_tangent_dist" : "" );
cvWriteComment( *fs, buf, 0 );
}
fs << "flagValue" << s.flag;
fs << "Camera_Matrix" << cameraMatrix;
fs << "Distortion_Coefficients" << distCoeffs;
fs << "Avg_Reprojection_Error" << totalAvgErr;
if( !reprojErrs.empty() )
fs << "Per_View_Reprojection_Errors" << Mat(reprojErrs);
if( !rvecs.empty() && !tvecs.empty() )
{
CV_Assert(rvecs[0].type() == tvecs[0].type());
Mat bigmat((int)rvecs.size(), 6, rvecs[0].type());
for( int i = 0; i < (int)rvecs.size(); i++ )
{
Mat r = bigmat(Range(i, i+1), Range(0,3));
Mat t = bigmat(Range(i, i+1), Range(3,6));
CV_Assert(rvecs[i].rows == 3 && rvecs[i].cols == 1);
CV_Assert(tvecs[i].rows == 3 && tvecs[i].cols == 1);
//*.t() is MatExpr (not Mat) so we can use assignment operator
r = rvecs[i].t();
t = tvecs[i].t();
}
cvWriteComment( *fs, "a set of 6-tuples (rotation vector + translation vector) for each view", 0 );
fs << "Extrinsic_Parameters" << bigmat;
}
if( !imagePoints.empty() )
{
Mat imagePtMat((int)imagePoints.size(), (int)imagePoints[0].size(), CV_32FC2);
for( int i = 0; i < (int)imagePoints.size(); i++ )
{
Mat r = imagePtMat.row(i).reshape(2, imagePtMat.cols);
Mat imgpti(imagePoints[i]);
imgpti.copyTo(r);
}
fs << "Image_points" << imagePtMat;
}
}
bool runCalibrationAndSave(Settings& s, Size imageSize, Mat& cameraMatrix, Mat& distCoeffs,vector<vector<Point2f> > imagePoints )
{
vector<Mat> rvecs, tvecs;
vector<float> reprojErrs;
double totalAvgErr = 0;
bool ok = runCalibration(s,imageSize, cameraMatrix, distCoeffs, imagePoints, rvecs, tvecs,
reprojErrs, totalAvgErr);
cout << (ok ? "Calibration succeeded" : "Calibration failed")
<< ". avg re projection error = " << totalAvgErr ;
if( ok )
saveCameraParams( s, imageSize, cameraMatrix, distCoeffs, rvecs ,tvecs, reprojErrs,
imagePoints, totalAvgErr);
return ok;
}

OpenCV polar transform selective region

I want to restrict the operating region of the polar transform in OpenCV's cvLogPolar function. I would consider rewriting the function from scratch. I am unwrapping a fisheye lens image to yield a panorama, and I want to make it as efficient as possible. Much of the image is cropped away after the transform, giving a donut-shaped region of interest in the input image:
This means much processing is wasted on black pixels.
This should be pretty simple, right? The function should take two additional arguments for clipping extents, radius1 and radius2. Here is the relevant pol-to-cart portion of the cvLogPolar function from imgwarp.cpp:
cvLogPolar( const CvArr* srcarr, CvArr* dstarr,
CvPoint2D32f center, double M, int flags )
{
cv::Ptr<CvMat> mapx, mapy;
CvMat srcstub, *src = cvGetMat(srcarr, &srcstub);
CvMat dststub, *dst = cvGetMat(dstarr, &dststub);
CvSize ssize, dsize;
if( !CV_ARE_TYPES_EQ( src, dst ))
CV_Error( CV_StsUnmatchedFormats, "" );
if( M <= 0 )
CV_Error( CV_StsOutOfRange, "M should be >0" );
ssize = cvGetMatSize(src);
dsize = cvGetMatSize(dst);
mapx = cvCreateMat( dsize.height, dsize.width, CV_32F );
mapy = cvCreateMat( dsize.height, dsize.width, CV_32F );
if( !(flags & CV_WARP_INVERSE_MAP) )
//---snip---
else
{
int x, y;
CvMat bufx, bufy, bufp, bufa;
double ascale = ssize.height/(2*CV_PI);
cv::AutoBuffer<float> _buf(4*dsize.width);
float* buf = _buf;
bufx = cvMat( 1, dsize.width, CV_32F, buf );
bufy = cvMat( 1, dsize.width, CV_32F, buf + dsize.width );
bufp = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*2 );
bufa = cvMat( 1, dsize.width, CV_32F, buf + dsize.width*3 );
for( x = 0; x < dsize.width; x++ )
bufx.data.fl[x] = (float)x - center.x;
for( y = 0; y < dsize.height; y++ )
{
float* mx = (float*)(mapx->data.ptr + y*mapx->step);
float* my = (float*)(mapy->data.ptr + y*mapy->step);
for( x = 0; x < dsize.width; x++ )
bufy.data.fl[x] = (float)y - center.y;
#if 1
cvCartToPolar( &bufx, &bufy, &bufp, &bufa );
for( x = 0; x < dsize.width; x++ )
bufp.data.fl[x] += 1.f;
cvLog( &bufp, &bufp );
for( x = 0; x < dsize.width; x++ )
{
double rho = bufp.data.fl[x]*M;
double phi = bufa.data.fl[x]*ascale;
mx[x] = (float)rho;
my[x] = (float)phi;
}
#else
//---snip---
#endif
}
}
cvRemap( src, dst, mapx, mapy, flags, cvScalarAll(0) );
}
Since the routine works by iterating through pixels in the destination image, the r1 and r2 clipping region would just need to be translated to y1 and y2 row region. Then we just change the for loop: for( y = 0; y < dsize.height; y++ ) becomes for( y = y1; y < y2; y++ ).
Correct?
What about constraining cvRemap? I am hoping it ignores unmoved pixels or it is a negligible computational cost.
I ended up doing a different optimization: I store the result of the polar transform operation in persistent remapping matrices. This helps a LOT. If you're doing polar unwrap on full motion video using the same polar transform mapping at all times, you don't want to recalculate the transform with a million sin/cos operations every single frame. So this just required some small modification on the logPolar/linearPolar operations in the OpenCV source to save the remap result somewhere outside.

Resources