Maybe the problem is, that I'm not good at English.
I am new to openCV. I want know area which stitcher merge. like this↓
to
If you know the order in which your image is taken then you may follow this code for stitching your images together. If the order is not known then the solution becomes more complex. Also, this code is designed for images of same size, if your camera is shifted it may result in some erroneous result.Implement some checks for proper understanding. You may refer to this article "http://ramsrigoutham.com/2012/11/22/panorama-image-stitching-in-opencv/"for much proper understanding of the stitching function that has been called twice in main.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
void stitching( cv::Mat&,cv::Mat& ,cv::Mat& );
int main()
{
Mat image1= imread("image1.jpg");
Mat image2= imread("image2.jpg");
Mat image3= imread("image3.jpg");
Mat gray_image1;
Mat gray_image2;
Mat gray_image3;
Mat result1,result2;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
cvtColor( image3, gray_image3, CV_RGB2GRAY );
stitching(gray_image1,gray_image2,result1);
stitching(result1,gray_image3,result2);
cv::imshow("stitched image"result2);
cv::WaitKey(0);
}
void stitching( cv::Mat& im1,cv::Mat& im2,cv::Mat& stitch_im)
{
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect(im1, keypoints_object );
detector.detect(im2, keypoints_scene );
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( im1, keypoints_object, descriptors_object );
extractor.compute( im2, keypoints_scene, descriptors_scene );
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
cv::Mat result;
warpPerspective(im1,stitch_im,H,cv::Size(im1.cols+im2.cols,im1.rows));
}
Related
I'm using openCV 2.4, where I'm using FlannBasedMatcher to match keypoints. However, instead of using SurfFeatureDetector::detect()to extract keypoints, I'm passing the image corners as keypoints.
Sadly I'm getting zero matches for all values of minHessian.
Below is my code:
void matches(int, void*)
{
SurfFeatureDetector detector( minHessian );
vector<KeyPoint> keypoints_frame,keypoints_trng;
Mat descriptors_frame,descriptors_trng;
//detect keypoints
for(int i=0;i<corners.size();i++)
{
keypoints_frame.push_back(KeyPoint(corners[i].x,corners[i].y,0));
cout<<"\n"<<keypoints_frame[i].pt.x<<"\t"<<keypoints_frame[i].pt.y;
}
keypoints_trng.push_back(KeyPoint(337,288,0));
keypoints_trng.push_back(KeyPoint(337,241,0));
keypoints_trng.push_back(KeyPoint(370,288,0));
keypoints_trng.push_back(KeyPoint(370,241,0));
keypoints_trng.push_back(KeyPoint(291,239,0));
keypoints_trng.push_back(KeyPoint(287,203,0));
keypoints_trng.push_back(KeyPoint(288,329,0));
keypoints_trng.push_back(KeyPoint(426,237,0));
keypoints_trng.push_back(KeyPoint(428,326,0));
keypoints_trng.push_back(KeyPoint(426,201,0));
keypoints_trng.push_back(KeyPoint(427,293,0));
keypoints_trng.push_back(KeyPoint(287,297,0));
for(int i=0;i<corners.size();i++)
{
cout<<"\n"<<keypoints_trng[i].pt.x<<"\t"<<keypoints_trng[i].pt.y;
}
//describe keypoints
SurfDescriptorExtractor extractor;
extractor.compute( src_gray_resized, keypoints_frame, descriptors_frame);
extractor.compute( trng_gray_resized, keypoints_trng, descriptors_trng);
//matching the keypoints
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_frame, descriptors_trng, matches );
cout<<"\n\n no of matches: "<<matches.size();
std::vector< DMatch > good_matches;
if (matches.size()>20)
{minHessian=minHessian+100;}
for( int i = 0; i < descriptors_frame.rows; i++ )
{ if( matches[i].distance <= max(2*100.00, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//cout<<"\nno of good matches"<<good_matches.size();
//-- Draw only "good" matches
Mat img_matches;
drawMatches( src_gray, keypoints_frame, trng_gray, keypoints_trng,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
namedWindow("Good Matches",CV_WINDOW_NORMAL);
imshow( "Good Matches", img_matches );
}
I am using sift to detect keypoints of two images of 3264x2466. Here follows my code. However, I got an error saying that opencv error: insufficient memory. Is there anything wrong?
Here is the image http://img42.imageshack.us/img42/6963/v839.jpg and I am running the program on win7x86, opencv 2.4.7
#include <opencv\cv.h>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <string>
#include <vector>
#include <cmath>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/nonfree//nonfree.hpp>
using namespace std;
using namespace cv;
int main(int argc, char **argv)
{
cv::initModule_nonfree();
Mat image1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat image2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE );
//1. compute the keypoints
int minHessian = 400;
Ptr<FeatureDetector> detector = FeatureDetector::create("SIFT");
vector<KeyPoint> keypoints1,keypoints2;
detector->detect(image1, keypoints1);
detector->detect(image2, keypoints2);
//2. compute the descriptor
Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("SIFT");
Mat descriptors1, descriptors2;
extractor->compute( image1, keypoints1, descriptors1);
extractor->compute( image2, keypoints2, descriptors2);
//3. match
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
std::vector< DMatch > matches;
matcher->match( descriptors1, descriptors2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors1.rows; i++ )
{
if( matches[i].distance <= 2*min_dist ) {
good_matches.push_back( matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( image1, keypoints1, image2, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imwrite("C:\\Users\\flex\\Desktop\\output2.jpg", img_matches);
//imshow( "Good Matches", img_matches );
//waitKey(0);
return 0;
}
I'm new to OpenCV.
I'm trying to draw matches of features between to images using FLANN/SURF in OpenCV on iOS.
I'm following this example:
http://docs.opencv.org/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.html#feature-matching-with-flann
Here's my code with some little modifications (wrapped the code in the example in a function that return a UIImage as a result and read the starting images from bundle):
UIImage* SURFRecognition::test()
{
UIImage *img1 = [UIImage imageNamed:#"wallet"];
UIImage *img2 = [UIImage imageNamed:#"wallet2"];
Mat img_1;
Mat img_2;
UIImageToMat(img1, img_1);
UIImageToMat(img2, img_2);
if( !img_1.data || !img_2.data )
{
std::cout<< " --(!) Error reading images " << std::endl;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= 2*min_dist )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
//imshow( "Good Matches", img_matches );
UIImage *imgTemp = MatToUIImage(img_matches);
for( int i = 0; i < good_matches.size(); i++ )
{
printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx );
}
return imgTemp;
}
The result I the function above is:
Only the line that connect the matches are show, but the to original images are not show. If I understood well the drawMatches fiunction return an cv::Mat which contains the images and the connection between similar feature. Is this correct or I am missing something? Can someone help me?
I've found the solution by myself.
It seems, after searching a lot, that drawMatches need img1 and img2 to be with 1 to 3 channel. I was opening a PNGa with alpha so these were 4 channel images.
Here's my code reviewed:
Added
UIImageToMat(img1, img_1);
UIImageToMat(img2, img_2);
cvtColor(img_1, img_1, CV_BGRA2BGR);
cvtColor(img_2, img_2, CV_BGRA2BGR);
anyone familiar with this error? I tested a surf descriptor in real-time. I want to use it for recognition of different species of fish using of this features. Sometimes the program is okay but sometimes it gets an error. The compilation is Build successfully. After the compilation this error show.
#include <opencv2\imgproc\imgproc_c.h>
#include <stdio.h>
#include <math.h>
#include <opencv\highgui.h>
#include <opencv\cv.h>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
using namespace cv;
#define nimg 3
int main()
{
Mat object = imread( "D:/galunggong.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat object1 = imread( "D:/sapsap.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat object2 = imread( "D:/bisugo.jpg",CV_LOAD_IMAGE_GRAYSCALE );
// Mat object3 = imread( "4.jpg", CV_LOAD_IMAGE_GRAYSCALE );
//Mat object4 = imread( "5.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
if( !object1.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
if( !object2.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 1000;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
std::vector<KeyPoint> kp_object1;
std::vector<KeyPoint> kp_object2;
std::vector<KeyPoint> kp_object3;
std::vector<KeyPoint> kp_object4;
detector.detect( object, kp_object );
detector.detect( object1, kp_object1 );
detector.detect( object2, kp_object2 );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
Mat des_object1;
Mat des_object2;
extractor.compute( object, kp_object, des_object );
extractor.compute( object1, kp_object1, des_object1 );
extractor.compute( object2, kp_object2, des_object2 );
FlannBasedMatcher matcher;
CvCapture* cap = cvCreateCameraCapture(0);
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT, 240 );
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
Mat frame;
Mat des_image, img_matches;
char vect[nimg];
char contor;
char ok, ko;
while (1)
{
frame = cvQueryFrame(cap);
if (framecount < 5)
{
framecount++;
continue;
}
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<vector<DMatch > > matches1;
std::vector<vector<DMatch > > matches2;
std::vector<DMatch > good_matches;
std::vector<DMatch > good_matches1;
std::vector<DMatch > good_matches2;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
// printf("d \n");
//////////////////////////////////////////////////////
contor=0;
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int)
matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
vect[contor]=good_matches.size();
/////////////////////////////////////////////////////
contor=1;
matcher.knnMatch(des_object1, des_image, matches1, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches1.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches1[i][0].distance < 0.6*(matches1[i][1].distance)) && ((int)
matches1[i].size()<=2 && (int) matches1[i].size()>0))
{
good_matches1.push_back(matches1[i][0]);
}
}
vect[contor]=good_matches1.size();
/////////////////////////////////////////////////////
contor=2;
matcher.knnMatch(des_object2, des_image, matches2, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches2.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches2[i][0].distance < 0.6*(matches2[i][1].distance)) && ((int)
matches2[i].size()<=2 && (int) matches2[i].size()>0))
{
good_matches2.push_back(matches2[i][0]);
}
}
vect[contor]=good_matches2.size();
////////////////////////////////////////////////////
/*
contor =3;
matcher.knnMatch(des_object, des_image, matches3, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches3.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches3[i][0].distance < 0.6*(matches3[i][1].distance)) && ((int)
matches3[i].size()<=2 && (int) matches3[i].size()>0))
{
good_matches3.push_back(matches[i][0]);
}
}
vect[contor]=good_matches3.size();
//////////////////////////////////////////////////
contor=4;
matcher.knnMatch(des_object, des_image, matches4, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches4.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches4[i][0].distance < 0.6*(matches4[i][1].distance)) && ((int)
matches4[i].size()<=2 && (int) matches4[i].size()>0))
{
good_matches4.push_back(matches[i][0]);
}
}
vect[contor]=good_matches4.size();
*/
printf("%d %d %d \n ",vect[0],vect[1],vect[2]);
ok=0;
for (contor=1;contor<nimg;contor++)
if (vect[contor]>vect[contor-1])
ok=contor;
for (ko=10;ko>3;ko++)
{
if (ok==0 && vect[ok]>ko)
{printf("Forward \n");
ko=2;}
else if (ok==1 && vect[ok]>ko)
{printf("Turn Left \n");
ko=2;}
else if (ok==2 && vect[ok]>ko)
{printf("Turn Right \n");
ko=2;}
}
//Show detected matches
imshow( "Good Matches",frame /*img_matches*/ );
key = waitKey(1);
}
}
Sometimes SURF cannot find any feature points so it may return an empty descriptor.
You should check the descriptors for data:
if(des_object.empty()||des_object1.empty()||des_object2.empty())
{
std::cout<<"Empty descriptor(s)."<<std::endl;
return -1;
}
Anyone familiar with this error? I tested a surf descriptor in real-time and it worked well but after few seconds it crashes and I got this error.
It was related when no points were detected. I run my code again and have the detected object stays for more than 2 mins and still no error. but when I removed the object and there were no points, it crashes again after 40 secs.
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/legacy/legacy.hpp"
using namespace cv;
using namespace std;
char key = 'a';
int framecount = 0;
SurfFeatureDetector detector(1000);
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;
Mat frame, des_object, image;
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
int main()
{
//reference image
Mat object = imread("D:/milo.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//compute detectors and descriptors of reference image
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
//create video capture object
CvCapture* capture = cvCaptureFromCAM(0);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 270);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 190);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
//wile loop for real time detection
while (key != 27)
{
Mat frame;
frame = cvQueryFrame(capture);
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
int goodMatchesCounter =0;
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if(((int)matches[i].size()<=2 && (int)matches[i].size()>0) && (matches[i}
[0].distance<0.6*(matches[i][1].distance)))
{
// good_matches.push_back(matches[i][0]);
obj.push_back( kp_object[ matches[i][0].queryIdx ].pt );
scene.push_back( kp_image[ matches[i][0].trainIdx ].pt );
goodMatchesCounter++;
}
}
//Draw only "good" matches
// drawMatches( object, kp_object, image, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (goodMatchesCounter >= 4)
{
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( image, scene_corners[0], scene_corners[1], Scalar( 0, 0, 0), 4 );
line( image, scene_corners[1], scene_corners[2], Scalar( 0, 0, 0),
4 );
line( image, scene_corners[2], scene_corners[3], Scalar( 0, 0, 0),
4 );
line( image, scene_corners[3], scene_corners[0], Scalar( 0, 0, 0),
4 );
}
//Show detected matches
imshow( "Good Matches", image );
key = waitKey(1);
}
return 0;
}