I am doing an image stitching project using OpenCV. Now I have the homography H1 between img1 and img2, and the homography H2 between img2 and img3. Now I need to compute the homography between img1 and img3, simply multiply H1*H2 is not working.
Are any ideas to calculate the new homography between img1 and img3?
for me computing H1 * H2 works well and gives the right results.
Here, H1 = H2_1 since it warps from image2 to image1.
H2 = H3_2 since it warps from image3 to image2.
H1 * H2 = H3_1 since it warps from image3 to image 1.
int main()
{
try {
cv::Mat H2_1 = (cv::Mat_<double>(3, 3) << 1.0e+00, 0.0e+00, 1.8e+03, 0.0e+00, 1.0e+00, 5.0e+01, 0.0e+00, 0.0e+00, 1.0e+00);
cv::Mat H3_2 = (cv::Mat_<double>(3, 3) << 0.949534471, -0.00581765975, 66.7917766, -0.0113810490, 0.981450515, -0.672362563, -0.000147974585, 0.00000992770511, 1.0);
cv::Mat H3_1 = H2_1 * H3_2;
cv::Point2f p3(500, 500);
std::vector<cv::Point2f> src3;
src3.push_back( p3 );
src3.push_back(p3);
std::vector<cv::Point2f> dst2;
cv::perspectiveTransform(src3, dst2, H3_2);
std::cout << "p1: " << p3 << std::endl;
std::cout << "p2: " << dst2[0] << std::endl;
std::vector<cv::Point2f> dst1;
cv::perspectiveTransform(dst2, dst1, H2_1);
std::cout << "p1 from p2: " << dst1[0] << std::endl;
dst1.clear();
cv::perspectiveTransform(src3, dst1, H3_1);
std::cout << "p1 from p3: " << dst1[0] << std::endl;
}
catch (std::exception& e)
{
std::cout << e.what() << std::endl;
}
std::cin.get();
}
results:
Related
I am trying to do 3D reconstruction using this code from opencv. As far as I understood, I need a textfile with the 2D points as per the given format. I am wondering if anyone could help me in getting these 2D points they mention in this program. I have a set of images with the calibration parameters but I have been not able to understand how could I track and save these 2D points e.g. the first point in frame 1 is the same point in frame 2 in the format they specified .
#include <opencv2/core.hpp>
#include <opencv2/sfm.hpp>
#include <opencv2/viz.hpp>
#include <iostream>
#include <fstream>
#include <string>
using namespace std;
using namespace cv;
using namespace cv::sfm;
static void help() {
cout
<< "\n------------------------------------------------------------\n"
<< " This program shows the camera trajectory reconstruction capabilities\n"
<< " in the OpenCV Structure From Motion (SFM) module.\n"
<< " \n"
<< " Usage:\n"
<< " example_sfm_trajectory_reconstruction <path_to_tracks_file> <f> <cx> <cy>\n"
<< " where: is the tracks file absolute path into your system. \n"
<< " \n"
<< " The file must have the following format: \n"
<< " row1 : x1 y1 x2 y2 ... x36 y36 for track 1\n"
<< " row2 : x1 y1 x2 y2 ... x36 y36 for track 2\n"
<< " etc\n"
<< " \n"
<< " i.e. a row gives the 2D measured position of a point as it is tracked\n"
<< " through frames 1 to 36. If there is no match found in a view then x\n"
<< " and y are -1.\n"
<< " \n"
<< " Each row corresponds to a different point.\n"
<< " \n"
<< " f is the focal lenght in pixels. \n"
<< " cx is the image principal point x coordinates in pixels. \n"
<< " cy is the image principal point y coordinates in pixels. \n"
<< "------------------------------------------------------------------\n\n"
<< endl;
}
/* Build the following structure data
*
* frame1 frame2 frameN
* track1 | (x11,y11) | -> | (x12,y12) | -> | (x1N,y1N) |
* track2 | (x21,y11) | -> | (x22,y22) | -> | (x2N,y2N) |
* trackN | (xN1,yN1) | -> | (xN2,yN2) | -> | (xNN,yNN) |
*
*
* In case a marker (x,y) does not appear in a frame its
* values will be (-1,-1).
*/
void
parser_2D_tracks(const string &_filename, std::vector<Mat> &points2d )
{
ifstream myfile(_filename.c_str());
if (!myfile.is_open())
{
cout << "Unable to read file: " << _filename << endl;
exit(0);
} else {
double x, y;
string line_str;
int n_frames = 0, n_tracks = 0;
// extract data from text file
vector<vector<Vec2d> > tracks;
for ( ; getline(myfile,line_str); ++n_tracks)
{
istringstream line(line_str);
vector<Vec2d> track;
for ( n_frames = 0; line >> x >> y; ++n_frames)
{
if ( x > 0 && y > 0)
track.push_back(Vec2d(x,y));
else
track.push_back(Vec2d(-1));
}
tracks.push_back(track);
}
// embed data in reconstruction api format
for (int i = 0; i < n_frames; ++i)
{
Mat_<double> frame(2, n_tracks);
for (int j = 0; j < n_tracks; ++j)
{
frame(0,j) = tracks[j][i][0];
frame(1,j) = tracks[j][i][1];
}
points2d.push_back(Mat(frame));
}
myfile.close();
}
}
/* Keyboard callback to control 3D visualization
*/
bool camera_pov = false;
void keyboard_callback(const viz::KeyboardEvent &event, void* cookie)
{
if ( event.action == 0 &&!event.symbol.compare("s") )
camera_pov = !camera_pov;
}
/* Sample main code
*/
int main(int argc, char** argv)
{
// Read input parameters
if ( argc != 5 )
{
help();
exit(0);
}
// Read 2D points from text file
std::vector<Mat> points2d;
parser_2D_tracks( argv[1], points2d );
// Set the camera calibration matrix
const double f = atof(argv[2]),
cx = atof(argv[3]), cy = atof(argv[4]);
Matx33d K = Matx33d( f, 0, cx,
0, f, cy,
0, 0, 1);
bool is_projective = true;
vector<Mat> Rs_est, ts_est, points3d_estimated;
reconstruct(points2d, Rs_est, ts_est, K, points3d_estimated, is_projective);
// Print output
cout << "\n----------------------------\n" << endl;
cout << "Reconstruction: " << endl;
cout << "============================" << endl;
cout << "Estimated 3D points: " << points3d_estimated.size() << endl;
cout << "Estimated cameras: " << Rs_est.size() << endl;
cout << "Refined intrinsics: " << endl << K << endl << endl;
cout << "3D Visualization: " << endl;
cout << "============================" << endl;
viz::Viz3d window_est("Estimation Coordinate Frame");
window_est.setBackgroundColor(); // black by default
window_est.registerKeyboardCallback(&keyboard_callback);
// Create the pointcloud
cout << "Recovering points ... ";
// recover estimated points3d
vector<Vec3f> point_cloud_est;
for (int i = 0; i < points3d_estimated.size(); ++i)
point_cloud_est.push_back(Vec3f(points3d_estimated[i]));
cout << "[DONE]" << endl;
cout << "Recovering cameras ... ";
vector<Affine3d> path_est;
for (size_t i = 0; i < Rs_est.size(); ++i)
path_est.push_back(Affine3d(Rs_est[i],ts_est[i]));
cout << "[DONE]" << endl;
cout << "Rendering Trajectory ... ";
cout << endl << "Press: " << endl;
cout << " 's' to switch the camera pov" << endl;
cout << " 'q' to close the windows " << endl;
if ( path_est.size() > 0 )
{
// animated trajectory
int idx = 0, forw = -1, n = static_cast<int>(path_est.size());
while(!window_est.wasStopped())
{
for (size_t i = 0; i < point_cloud_est.size(); ++i)
{
Vec3d point = point_cloud_est[i];
Affine3d point_pose(Mat::eye(3,3,CV_64F), point);
char buffer[50];
sprintf (buffer, "%d", static_cast<int>(i));
viz::WCube cube_widget(Point3f(0.1,0.1,0.0), Point3f(0.0,0.0,-0.1), true, viz::Color::blue());
cube_widget.setRenderingProperty(viz::LINE_WIDTH, 2.0);
window_est.showWidget("Cube"+string(buffer), cube_widget, point_pose);
}
Affine3d cam_pose = path_est[idx];
viz::WCameraPosition cpw(0.25); // Coordinate axes
viz::WCameraPosition cpw_frustum(K, 0.3, viz::Color::yellow()); // Camera frustum
if ( camera_pov )
window_est.setViewerPose(cam_pose);
else
{
// render complete trajectory
window_est.showWidget("cameras_frames_and_lines_est", viz::WTrajectory(path_est, viz::WTrajectory::PATH, 1.0, viz::Color::green()));
window_est.showWidget("CPW", cpw, cam_pose);
window_est.showWidget("CPW_FRUSTUM", cpw_frustum, cam_pose);
}
// update trajectory index (spring effect)
forw *= (idx==n || idx==0) ? -1: 1; idx += forw;
// frame rate 1s
window_est.spinOnce(1, true);
window_est.removeAllWidgets();
}
}
return 0;
}
I would be really grateful if someone can help me through this. Thank you.
I am working on a queue bypass detection project and i need to select a region of interest or the boundary. If a person crosses the boundary, we should get an alert. Please help me to select a region of interest in live video similar to the one in the image.
please see this image
After doing some research I found what you need on github
#include "opencv2/opencv.hpp"
#include <iostream>
using namespace cv;
using namespace std;
/*~~~~~~~~~~~~~~~~~~*/
char ky;
bool got_roi = false;
Point points_array[4];
Mat src, ROI_Img,backup,ROI_MASK;
Rect2d ROI_Select;
int width_roi = 0, height_roi = 0,min_x,min_y,max_x,max_y;
Rect ROI_RECT ;
vector< vector<Point> > co_ordinates;
/*~~~~~~~~~~~~~~~~~~*/
/*~~~~~~~~~~~~~~~~~~*/
//Callback for mousclick event, the x-y coordinate of mouse button-down
//are stored array of points [points_array].
void mouse_click(int event, int x, int y, int flags, void *param)
{
static int count=0;
switch (event)
{
case CV_EVENT_LBUTTONDOWN:
{
switch (count) // number of set Point
{
case 0:
cout << "Select top-right point" << endl;
break;
case 1:
cout << "Select bottom-right point" << endl;
break;
case 2:
cout << "Select bottom-left point" << endl << endl;
break;
default:
break;
}
if (!got_roi) // you are not select ROI yet!
{
points_array[count] = Point(x,y);
circle(src, points_array[count], 2, Scalar(0, 255, 0), 2); //show points on image
imshow("My_Win", src);
count++;
if (count == 4) // if select 4 point finished
{
cout << "ROI x & y points :" << endl;
cout << points_array[0] << endl;
cout << points_array[1] << endl;
cout << points_array[2] << endl;
cout << points_array[3] << endl;
cout << endl << "ROI Saved You can continue with double press any keys except 'c' " << endl <<"once press 'c' or 'C' to clear points and retry select ROI " << endl << endl;
ky = waitKey(0) & 0xFF;
if (ky == 99 || ky == 67) // c or C to clear
{
backup.copyTo(src);
points_array[0] = Point(0, 0);
points_array[1] = Point(0, 0);
points_array[2] = Point(0, 0);
points_array[3] = Point(0, 0);
imshow("My_Win", src);
count = 0;
cout << endl << endl << endl << "#--------------------- Clear Points! ------------------# " << endl << endl << endl ;
}
else // user accept points & dosn't want to clear them
{
min_x = std::min(points_array[0].x, points_array[3].x); //find rectangle for minimum ROI surround it!
max_x = std::max(points_array[1].x, points_array[2].x);
min_y = std::min(points_array[0].y, points_array[1].y);
max_y = std::max(points_array[3].y, points_array[2].y);
height_roi = max_y - min_y;
width_roi = max_x - min_x;
ROI_RECT = Rect(min_x, min_y, width_roi, height_roi);
got_roi = true;
co_ordinates.push_back(vector<Point>());
co_ordinates[0].push_back(points_array[0]);
co_ordinates[0].push_back(points_array[1]);
co_ordinates[0].push_back(points_array[2]);
co_ordinates[0].push_back(points_array[3]);
}
}
}
else { // if got_roi se true => select roi before
cout << endl << "You Select ROI Before " << endl << "if you want to clear point press 'c' or double press other keys to continue" << endl << endl;
}
break;
}
}
}
/*~~~~~~~~~~~~~~~~~~*/
int main()
{
// replace all "My_Win" with your window name
/*~~~~~~~~~~~~~~~~~~*/
namedWindow("My_Win", 1);
/*~~~~~~~~~~~~~~~~~~*/
VideoCapture input_video("Video_path");
// Set source imafe as [src]
/*~~~~~~~~~~~~~~~~~~*/
input_video >> src;
imshow("My_Win", src);
src.copyTo(backup);
setMouseCallback("My_Win", mouse_click, 0);
waitKey(0);
Mat mask(src.rows, src.cols, CV_8UC1, cv::Scalar(0));
drawContours(mask, co_ordinates, 0, Scalar(255), CV_FILLED, 8);
/*~~~~~~~~~~~~~~~~~~*/
while (1)
{
input_video >> src;
/*~~~~~~~~~~~~~~~~~~*/
//Need to copy Select ROI as MASK
src.copyTo(ROI_MASK, mask);
//Creat a rectangle around the Mask to reduce size of mask
ROI_Img = ROI_MASK(ROI_RECT);
/*~~~~~~~~~~~~~~~~~~*/
//Show Image
imshow("My_Win", ROI_Img);
// Do remaining processing here on capture roi for every frame
if(char (waitKey(1)& 0xFF) == 27) break;
}
}
I think the following examples shows a bug in warpAffine (OpenCV 3.1 with precompiled Win64 dlls):
Mat x(1,20, CV_32FC1);
for (int iCol(0); iCol<x.cols; iCol++) { x.col(iCol).setTo(iCol); }
Mat crop;
Point2d c(10., 0.);
double scale(1.3);
int cropSz(11);
double vals[6] = { scale, 0.0, c.x-(cropSz/2)*scale, 0.0, scale, c.y };
Mat map(2, 3, CV_64FC1, vals);
warpAffine(x, crop, map, Size(cropSz, 1), WARP_INVERSE_MAP | INTER_LINEAR);
float dx = (crop.at<float>(0, crop.cols-1) - crop.at<float>(0, 0))/(crop.cols-1);
Mat constGrad = crop.clone().setTo(0);
for (int iCol(0); iCol<constGrad.cols; iCol++) {
constGrad.col(iCol) = c.x + (iCol-cropSz/2)*scale;
}
Mat diff = crop - constGrad;
double err = norm(diff, NORM_INF);
if (err>1e-4) {
cout << "Problem:" << endl;
cout << "computed output: " << crop << endl;
cout << "expected output: " << constGrad << endl;
cout << "difference: " << diff << endl;
Mat dxImg;
Mat dxFilt(1, 2, CV_32FC1);
dxFilt.at<float>(0) = -1.0f;
dxFilt.at<float>(1) = 1.0f;
filter2D(crop, dxImg, crop.depth(), dxFilt);
cout << "x-derivative in computed output: " << dxImg(Rect(1,0,10,1)) << endl;
cout << "Note: We expect a constant difference of 1.3" << endl;
}
Here is the program output:
Problem:
computed output: [3.5, 4.8125, 6.09375, 7.40625, 8.6875, 10, 11.3125, 12.59375, 13.90625, 15.1875, 16.5]
expected output: [3.5, 4.8000002, 6.0999999, 7.4000001, 8.6999998, 10, 11.3, 12.6, 13.9, 15.2, 16.5]
difference: [0, 0.012499809, -0.0062499046, 0.0062499046, -0.012499809, 0, 0.012499809, -0.0062503815, 0.0062503815, -0.012499809, 0]
x-derivative in computed output: [1.3125, 1.28125, 1.3125, 1.28125, 1.3125, 1.3125, 1.28125, 1.3125, 1.28125, 1.3125]
Note: We expect a constant difference of 1.3
I create an image with entries 0, 1, 2, ...n-1, and cut a region around (10,0) with scale 1.3. I also create an expected image constGrad. However, they are not the same. Even more, since the input image has a constant derivative in x-direction and the mapping is affine, I expect also a constant gradient in the resulting image.
The problem is not a boundary stuff problem, the same happens at the inner of an image. It's also not related to WARP_INVERSE_MAP.
Is this a known issue? Any comments on this?
I am trying to get pixel value before doing some calculation. When i run the below code at loc (10,10)i get the below answers. I am not sure why the color value is different than others. When do the same for loc (0,0) i get same answers for all. Image is black and white with 3 channels. Or is it that for 3 channels i should always use Vec3b?
char imageName[] = "0001.png";
cv::Mat img = cv::imread(imageName);
cout << "Image rows is: " << img.rows << " cols is : " << img.cols << endl;
cout << "Image channels " << img.channels() << endl;
cout << "Image depth " << img.depth() << endl; //CV_8U == 0
cout << (int)img.at<uchar>(10, 10) << endl;
cout << (int)img.ptr(10)[10] << endl;
Scalar intensity = img.at<uchar>(10, 10);
cout << intensity << endl;
Vec3b color = img.at<Vec3b>(10, 10);
cout << color << endl;
output:
Image rows is: 96 cols is : 72
Image channels 3
Image depth 0
103
103
[103, 0, 0, 0]
[164, 164, 164]
i have some problems while computing rectification of stereo pairs with OpenCV: stereoCalibrate returns an high rms error and i obtain bad rectification pairs.
I tried both my rectification program and the stereo_calib.cpp provided with opencv. They both returns similiar rms errors. Plus, i ran my program with the sample stereo pairs in opencv/sample/cpp and i got correctly rectified images. So i believe the problem is in the way i take stereo picture, is it possible?
I use the stereo camera of an Htc Evo 3D (a 3D smartphone) taking pictures of a chessboard pattern. I tried to change the number and the set of pictures used as input but the littlest stereoCalibration rms i got was around 1.5 and the rectified images are totally wrong.
Is there any "suggested" way to take a set of pictures for calibration?
Thanks, Andrea
Check the detected chessboard pattern pairs (e.g. with drawChessboardCorners). In some cases the order of the points differs in both images (see figure; in top image the pattern is recognised from right to left, in the bottom picture from left to right). If that is the case, then the points do not longer correspond to each other. This leads to a high average reprojection error in stereoCalibrate. When checking the images for each camera individually with calibrateCamera the rms could at the same time be very low for the intrinsics, as the differing order in the stereo images doesn't matter here.
I solved the problem by checking the found chessboard patterns and removing the image pairs where the pattern is detected in different order. In my case this improved the rms from 15 (only one or few wrong pairs) to less than 1. In cases where the pairs are corrupted more often or when using higher resolution images (with more pixels) the error will be much higher, I guess.
Another solution could be the usage of ArUco and ChArUco patterns where the order shouldn't be ambiguous, but I haven't tested that approach.
Check out the frequent mistakes guide here to see if you have made any of these common errors.
http://www.cvlibs.net/software/calibration/mistakes.php
Also when calibrating a stereo camera, you may want to first calibrate each camera on it's own, then calibrate them as a stereo pair given their pre-estimated camera matrices.
another alternative toolbox, as well as cvlibs (link above), is here:
http://www.vision.caltech.edu/bouguetj/calib_doc/
cheers
I take 9 calibration pictures of a 12x18 corners chessboard, forbid fisheye lenses k3=0, and refine the found corners to subpixel positions. The minimum achieved error is something like 0.2 at a resolution 640x480. I suggest to look for cornerSubpix(), TermCritiria and flags for stereocalibrate() in opencv doc. Code looks like this:
namedWindow( "Webcaml", CV_WINDOW_AUTOSIZE );
namedWindow( "Webcamr", CV_WINDOW_AUTOSIZE );
int successes=0;
int n_boards=9;
while(successes<n_boards)
{
video_l.read( frame_l );
video_r.read( frame_r );
if( framenumber++ % board_dt == 0 && framenumber != 0)
{
bool patternfoundl = findChessboardCorners( frame_l, Size( board_w, board_h ), corners_l, CV_CALIB_CB_FILTER_QUADS + CV_CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_FAST_CHECK );
bool patternfoundr = findChessboardCorners( frame_r, Size( board_w, board_h ), corners_r, CV_CALIB_CB_FILTER_QUADS + CV_CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_FAST_CHECK );
if(patternfoundl && patternfoundr)
{
cvtColor(frame_l,frame_l_grey,CV_RGB2GRAY);
cvtColor(frame_r,frame_r_grey,CV_RGB2GRAY);
cornerSubPix(frame_l_grey,corners_l,Size(6,6),Size(-1,-1),TermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,50000000000,0.0000000000001));
cornerSubPix(frame_r_grey,corners_r,Size(6,6),Size(-1,-1),TermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,50000000000,0.0000000000001));
}
drawChessboardCorners( frame_l, Size( board_w, board_h ), corners_l, patternfoundl );
drawChessboardCorners( frame_r, Size( board_w, board_h ), corners_r, patternfoundr );
imshow( "Webcaml", frame_l );
imshow( "Webcamr", frame_r );
if( corners_l.size() == (board_w*board_h) && corners_r.size() == (board_w*board_h) )
{
cornervector_l.push_back( corners_l );
cornervector_r.push_back( corners_r );
point3dvector.push_back( point3d );
successes++;
int c = cvWaitKey( 1000 );
}
}
else
{
imshow( "Webcaml", frame_l);
imshow( "Webcamr", frame_r);
}
char c = cvWaitKey( 1 );
if( c == 27 )break;
}
destroyAllWindows();
rms_l = calibrateCamera( point3dvector, cornervector_l, Size( video_r.get( CV_CAP_PROP_FRAME_WIDTH ), video_r.get( CV_CAP_PROP_FRAME_HEIGHT )),
intrinsics_l, distortion_l, rvecs_l, tvecs_l, CV_CALIB_FIX_K3 , cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,150000000000000000,DBL_EPSILON ) );
rms_r = calibrateCamera( point3dvector, cornervector_r, Size( video_r.get( CV_CAP_PROP_FRAME_WIDTH ), video_r.get( CV_CAP_PROP_FRAME_HEIGHT )),
intrinsics_r, distortion_r, rvecs_r, tvecs_r, CV_CALIB_FIX_K3 , cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,150000000000000000,DBL_EPSILON ) );
cout << "intrinsic_l = " << endl << format(intrinsics_l,"C" ) << endl << endl;
cout << "intrinsic_r = " << endl << format(intrinsics_r,"C" ) << endl << endl;
cout << "distortion_l = " << endl << format(distortion_l,"C" ) << endl << endl;
cout << "distortion_r = " << endl << format(distortion_r,"C" ) << endl << endl;
cout << "tvecs_l = " << endl << format(tvecs_l[0],"C" ) << endl << endl;
cout << "rvecs_l = " << endl << format(rvecs_l[0],"C" ) << endl << endl;
double rms_stereo = stereoCalibrate( point3dvector, cornervector_l, cornervector_r, intrinsics_l, distortion_l, intrinsics_r, distortion_r,
Size( video_r.get( CV_CAP_PROP_FRAME_WIDTH ),video_r.get( CV_CAP_PROP_FRAME_HEIGHT )), R, T, E, F,
TermCriteria( TermCriteria::COUNT+TermCriteria::EPS, 150000000000000000,DBL_EPSILON ), CV_CALIB_FIX_K3+CV_CALIB_FIX_INTRINSIC);
Rodrigues(R, R);
cout << "R = " << endl << format(R,"C" ) << endl << endl;
cout << "T = " << endl << format(T,"C" ) << endl << endl;
cout << "E = " << endl << format(E,"C" ) << endl << endl;
cout << "F = " << endl << format(F,"C" ) << endl << endl;
cout << "RMS Fehler l,r,stereo: " << rms_l << rms_r << rms_stereo << endl;
stereoRectify( intrinsics_l, distortion_l, intrinsics_r, distortion_r, Size( video_r.get( CV_CAP_PROP_FRAME_WIDTH ),video_r.get( CV_CAP_PROP_FRAME_HEIGHT )), R, T,
rectify_l, rectify_r, projection_l, projection_r, Q);
Hi made some more research and found this post on OpenCV answer.
The phone seems to adjust the intrinsic matrix continuosly changing focus and i am unable to make a good calibration.