Strange result of SURF_GPU and BruteForceMatcher_GPU with knnMatch - opencv

OpenCV 2.4.5, CUDA 5.0
I tried to transfer my SURF matcher from the CPU to the GPU and got such a strange result. I use knnMatch and findHomography + perspectiveTransform together with my function, which checks the corners of the bounding box for the result to more precision.
GPU part:
const int baseImagesSize = baseImages.size();
SURF_GPU surf(1500);
surf.extended = false;
GpuMat keypoints_test_GPU, descriptors_test_GPU;
surf(frame, GpuMat(), keypoints_test_GPU, descriptors_test_GPU);
vector<float> descriptors_test_CPU;
surf.downloadDescriptors(descriptors_test_GPU, descriptors_test_CPU);
Mat descriptors_test_CPU_Mat(descriptors_test_CPU);
vector<Point2f> objs_corners(4);
BruteForceMatcher_GPU< L2<float> > matcher;
vector<KeyPoint> keypoints_test_CPU;
surf.downloadKeypoints(keypoints_test_GPU, keypoints_test_CPU);
for (int i = 0; i < baseImagesSize; ++i)
{
//Get the corners from the object
objs_corners[0] = cvPoint(0,0);
objs_corners[1] = cvPoint( baseImages[i].cols, 0 );
objs_corners[2] = cvPoint( baseImages[i].cols, baseImages[i].rows );
objs_corners[3] = cvPoint( 0, baseImages[i].rows );
//cout<<endl<<objs_corners[0]<<" "<<objs_corners[1]<<" "<<objs_corners[2]<<" "<<objs_corners[3]<<endl;
GpuMat keypoints_tmp_GPU, descriptors_tmp_GPU;
surf(baseImages[i], GpuMat(), keypoints_tmp_GPU, descriptors_tmp_GPU);
GpuMat trainIdx, distance;
vector< vector<DMatch> > matches;
matcher.knnMatch(descriptors_test_GPU, descriptors_tmp_GPU, matches, 2);
vector<KeyPoint> keypoints_tmp_CPU;
surf.downloadKeypoints(keypoints_tmp_GPU, keypoints_tmp_CPU);
std::vector<DMatch > good_matches;
for(int k = 0; k < min(descriptors_test_CPU_Mat.rows-1,(int) matches.size()); k++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[k][0].distance < 0.6*(matches[k][1].distance)) && ((int) matches[k].size()<=2 && (int) matches[k].size()>0))
{
good_matches.push_back(matches[k][0]);
}
}
vector<Point2f> obj;
vector<Point2f> scene;
vector<Point2f> scene_corners(4);
Mat H;
Mat img (baseImages[i]), img_matches, frame_cpu (frame);
std::ostringstream o_stream;
o_stream<<"Logo_save/"<<baseImagesNames[i];
try
{
drawMatches( img, keypoints_tmp_CPU, frame_cpu, keypoints_test_CPU, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
imwrite(o_stream.str(),img_matches);
}
catch(...)
{
cout<<"Error in drawMatches name: "<< baseImagesNames[i]<<endl;
}
if (good_matches.size() >= 4)
{
for( int k = 0; k < good_matches.size(); k++ )
{
//Get the keypoints from the good matches
obj.push_back( (keypoints_tmp_CPU)[ good_matches[k].queryIdx ].pt );
scene.push_back( keypoints_test_CPU[ good_matches[k].trainIdx ].pt );
}
cout<<good_matches.size()<<" "<<baseImagesNames[i]<<endl;
H = findHomography( obj, scene, CV_RANSAC);
perspectiveTransform( objs_corners, scene_corners, H);
bool falseDetect = isSmallAngle(scene_corners);
//cout<< falseDetect<< endl;
if(!falseDetect)
{
cout<<"DETECT "<<baseImagesNames[i]<<endl;
}
}
matcher.clear();
}
Bad result on GPU (MIN_HESSIAN==1500):
Bad result on GPU (MIN_HESSIAN==400):
CPU part:
SurfFeatureDetector detector( MIN_HESSIAN );//MIN_HESSIAN==400
const int baseImagesSize = baseImages.size();
vector< vector<KeyPoint> > kp_objects(baseImagesSize);
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
vector<Mat> des_objects(baseImagesSize);
FlannBasedMatcher matcher;
//namedWindow("SURF feature detector");
vector< vector<Point2f> > objs_corners(baseImagesSize,vector<Point2f>(4));
for (int i = 0; i < baseImagesSize; ++i)
{
detector.detect(baseImages[i], kp_objects[i]);
extractor.compute(baseImages[i], kp_objects[i], des_objects[i]);
//Get the corners from the object
(objs_corners[i])[0] = cvPoint(0,0);
(objs_corners[i])[1] = cvPoint( baseImages[i].cols, 0 );
(objs_corners[i])[2] = cvPoint( baseImages[i].cols, baseImages[i].rows );
(objs_corners[i])[3] = cvPoint( 0, baseImages[i].rows );
}
Mat des_image;
std::vector<KeyPoint> kp_image;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
for (int i = 0; i < baseImagesSize; ++i)
{
Mat img_matches;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
matcher.knnMatch(des_objects[i], des_image, matches, 2);
for(int k = 0; k < min(des_image.rows-1,(int) matches.size()); k++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[k][0].distance < 0.6*(matches[k][4].distance)) && ((int) matches[k].size()<=2 && (int) matches[k].size()>0))
{
good_matches.push_back(matches[k][0]);
}
}
//Draw only "good" matches
std::ostringstream o_stream;
o_stream<<"Logo_save/"<<baseImagesNames[i];
try
{
drawMatches( baseImages[i], kp_objects[i], image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
imwrite(o_stream.str(),img_matches);
}
catch(...)
{
cout<<"Error in drawMatches name: "<< baseImagesNames[i]<<endl;
}
if (good_matches.size() >= 4)
{
for( int k = 0; k < good_matches.size(); k++ )
{
//Get the keypoints from the good matches
obj.push_back( (kp_objects[i])[ good_matches[k].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[k].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC);
perspectiveTransform( objs_corners[i], scene_corners, H);
bool falseDetect = isSmallAngle(scene_corners);
if(!falseDetect)
{
cout<<"DETECT "<<baseImagesNames[i]<<endl;
}
}
}
Good result on CPU (MIN_HESSIAN==400):

Problem solved here topic
Swap parameters in drawMatches call:
drawMatches(frame_cpu, keypoints_test_CPU, img, keypoints_tmp_CPU, ...);
Swap keypoints_tmp_CPU and keypoints_test_CPU:
obj.push_back( keypoints_test_CPU[ good_matches[k].trainIdx ].pt );
scene.push_back( (keypoints_tmp_CPU)[ good_matches[k].queryIdx ].pt );

Related

Cannot perform corner matching between two images

I'm using openCV 2.4, where I'm using FlannBasedMatcher to match keypoints. However, instead of using SurfFeatureDetector::detect()to extract keypoints, I'm passing the image corners as keypoints.
Sadly I'm getting zero matches for all values of minHessian.
Below is my code:
void matches(int, void*)
{
SurfFeatureDetector detector( minHessian );
vector<KeyPoint> keypoints_frame,keypoints_trng;
Mat descriptors_frame,descriptors_trng;
//detect keypoints
for(int i=0;i<corners.size();i++)
{
keypoints_frame.push_back(KeyPoint(corners[i].x,corners[i].y,0));
cout<<"\n"<<keypoints_frame[i].pt.x<<"\t"<<keypoints_frame[i].pt.y;
}
keypoints_trng.push_back(KeyPoint(337,288,0));
keypoints_trng.push_back(KeyPoint(337,241,0));
keypoints_trng.push_back(KeyPoint(370,288,0));
keypoints_trng.push_back(KeyPoint(370,241,0));
keypoints_trng.push_back(KeyPoint(291,239,0));
keypoints_trng.push_back(KeyPoint(287,203,0));
keypoints_trng.push_back(KeyPoint(288,329,0));
keypoints_trng.push_back(KeyPoint(426,237,0));
keypoints_trng.push_back(KeyPoint(428,326,0));
keypoints_trng.push_back(KeyPoint(426,201,0));
keypoints_trng.push_back(KeyPoint(427,293,0));
keypoints_trng.push_back(KeyPoint(287,297,0));
for(int i=0;i<corners.size();i++)
{
cout<<"\n"<<keypoints_trng[i].pt.x<<"\t"<<keypoints_trng[i].pt.y;
}
//describe keypoints
SurfDescriptorExtractor extractor;
extractor.compute( src_gray_resized, keypoints_frame, descriptors_frame);
extractor.compute( trng_gray_resized, keypoints_trng, descriptors_trng);
//matching the keypoints
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_frame, descriptors_trng, matches );
cout<<"\n\n no of matches: "<<matches.size();
std::vector< DMatch > good_matches;
if (matches.size()>20)
{minHessian=minHessian+100;}
for( int i = 0; i < descriptors_frame.rows; i++ )
{ if( matches[i].distance <= max(2*100.00, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//cout<<"\nno of good matches"<<good_matches.size();
//-- Draw only "good" matches
Mat img_matches;
drawMatches( src_gray, keypoints_frame, trng_gray, keypoints_trng,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
namedWindow("Good Matches",CV_WINDOW_NORMAL);
imshow( "Good Matches", img_matches );
}

OpenCV draw rectangle from webcam with 2 largest objects

I need to draw rectangle with 2 largest object from webcam. I already got to draw contours with 2 largest object from webcam but now i confuse in how to draw 2 largest Rectangle.
Someone can show me the code Please~
//find and draw contours
void showconvex(Mat &thresh,Mat &frame) {
int largestIndex = 0;
int largestContour = 0;
int secondLargestIndex = 0;
int secondLargestContour = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours
findContours(thresh, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
/// Find the convex hull object for each contour
vector<vector<Point> >hull(contours.size());
vector<vector<int> >inthull(contours.size());
vector<vector<Vec4i> >defects(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(Mat(contours[i]), hull[i], false);
convexHull(Mat(contours[i]),inthull[i], false);
if (inthull[i].size()>3)
convexityDefects(contours[i], inthull[i], defects[i]);
}
//find 2 largest contour
for( int i = 0; i< contours.size(); i++ )
{
if(contours[i].size() > largestContour)
{
secondLargestContour = largestContour;
secondLargestIndex = largestIndex;
largestContour = contours[i].size();
largestIndex = i;
}
else if(contours[i].size() > secondLargestContour)
{
secondLargestContour = contours[i].size();
secondLargestIndex = i;
}
}
//show contours of 2 biggest and hull as well
if(contours.size()>0)
{
//check for contouraea function if error occur
//draw the 2 largest contour using previously stored index.
drawContours(frame, contours, largestIndex, CV_RGB(0,255,0), 2, 8, hierarchy);
drawContours(frame, contours, secondLargestIndex, CV_RGB(0,255,0), 2, 8, hierarchy);
}
}
take a look at the code below
based on sorting contours by bounding boxes or by areas.
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std;
struct contour_sorter_dsc // sorts contours by their bounding boxes descending
{
bool operator ()( const vector<Point>& a, const vector<Point> & b )
{
Rect ra( boundingRect(a) );
Rect rb( boundingRect(b) );
return ( ( rb.width * rb.height ) < ( ra.width * ra.height ) );
}
};
struct contour_sorter_dsc_area // sorts contours by their areas descending
{
bool operator ()( const vector<Point>& a, const vector<Point> & b )
{
double area_a = contourArea( a );
double area_b = contourArea( b );
return ( area_b < area_a );
}
};
int main( int argc, char** argv )
{
Mat src = imread( argv[1] );
if( src.empty() )
{
return -1;
}
Mat canvas1 = src.clone();
Mat canvas2 = src.clone();
Mat gray;
cvtColor( src, gray, COLOR_BGR2GRAY );
gray = gray > 127; // binarize image
vector<vector<Point> > contours;
findContours( gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE );
sort(contours.begin(), contours.end(), contour_sorter_dsc());
for( size_t i = 0; i< 2; i++ )
{ // checks if the first contour is image boundary
if( contours[0][0] == Point( 1, 1 ) & contours[0][1] == Point( 1, gray.rows -2 )
& contours[0][2] == Point( gray.cols - 2, gray.rows -2 ) & contours[0][3] == Point( gray.cols - 2, 1 ) )
{
contours[0] = contours[1];
contours[1] = contours[2];
}
if( i < contours.size())
{
drawContours( canvas1, contours, i, Scalar( 255,255,0 ) );
Rect minRect = boundingRect( Mat(contours[i]) );
rectangle( canvas1, minRect, Scalar( 0, 0, 255 ) );
}
}
imshow( "result of sorting contours by bounding boxes ", canvas1 );
sort(contours.begin(), contours.end(), contour_sorter_dsc_area());
for( size_t i = 0; i< 2; i++ )
{ // checks if the first contour is image boundary
if( contours[0][0] == Point( 1, 1 ) & contours[0][1] == Point( 1, gray.rows -2 )
& contours[0][2] == Point( gray.cols - 2, gray.rows -2 ) & contours[0][3] == Point( gray.cols - 2, 1 ) )
{
contours[0] = contours[1];
contours[1] = contours[2];
}
if( i < contours.size())
{
drawContours( canvas2, contours, i, Scalar( 255,255,0 ) );
Rect minRect = boundingRect( Mat(contours[i]) );
rectangle( canvas2, minRect, Scalar( 0, 0, 255 ) );
}
}
imshow( "result of sorting contours by areas ", canvas2 );
waitKey();
return 0;
}
Input image
Result Images according sort type

Extract Contours path attributes

I am currently working on extracting Contours path attributes from a particular image file. I am able to extract Contours using Open CV function findContours() the output look like this
[98, 81][97, 80][95, 80][94, 79][93, 79][92, 78][91, 78][88, 75][87, 75][85, 73][84, 73][83, 72][82, 72]
But my desired output is look like this
M 398.7,106.8 c -5.5,-2.7 -20.7,-4.7 -36.1,-4.6 -15.4,0.1
How can I get it
This is my code:
using namespace cv;
using namespace std;
Mat src_grays;
int threshs = 100;
int max_threshs = 255;
RNG rng(12345);
void thresh_callbacks(int, void* );
void main( )
{
Mat src = imread( "F:/academic/pro4/t/download.jpg" );
imshow("real Image", src);
Mat gray,edge,edges, draw,draws;
Mat samples(src.rows * src.cols, 3, CV_32F);
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
for( int z = 0; z < 3; z++)
samples.at<float>(y + x*src.rows, z) = src.at<Vec3b>(y,x)[z];
int clusterCount = 5;
Mat labels;
int attempts = 10;
Mat centers;
kmeans(samples, clusterCount, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers );
Mat new_image( src.size(), src.type() );
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
{
int cluster_idx = labels.at<int>(y + x*src.rows,0);
new_image.at<Vec3b>(y,x)[0] = centers.at<float>(cluster_idx, 0);
new_image.at<Vec3b>(y,x)[1] = centers.at<float>(cluster_idx, 1);
new_image.at<Vec3b>(y,x)[2] = centers.at<float>(cluster_idx, 2);
}
imshow( "clustered image", new_image );
char filename[80];
sprintf(filename,"F:/academic/pro4/t/seg.png");
imwrite(filename, new_image);
cvtColor(src, gray, CV_BGR2GRAY);
Canny( new_image, edges, 50, 150, 3);
edges.convertTo(draws, CV_8U);
namedWindow("imageAfterSegmnetation", CV_WINDOW_AUTOSIZE);
imshow("imagesAfterCluster", draws);
cvtColor( new_image, src_grays, CV_BGR2GRAY );
blur( src_grays, src_grays, Size(3,3) );
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &threshs, max_threshs, thresh_callbacks );
thresh_callbacks( 0, 0 );
waitKey( 0 );
}
void thresh_callbacks(int, void* )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( src_grays, canny_output, threshs, threshs*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
for(int i= 0; i < contours.size(); i++)
{
for(int j= 0; j < contours[i].size();j++) // run until j < contours[i].size();
{
int a= contours[i][j].x ;
int b =contours[i][j].y ;
// printf("Point(x,y)=" + a, b);
std::cout << contours[i][j] << std::endl;
}
printf ("%i", i + "\n");
}
/// Draw contours
int a=contours.size();
for( int i = 0; i<contours.size(); i++ )
{
Mat drawing_i = Mat::zeros( canny_output.size(), CV_8UC3 );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing_i, contours, i, color, 2, 8, hierarchy, 0, Point() );
namedWindow( "Contours_i", CV_WINDOW_AUTOSIZE );
imshow( "Contours_i", drawing_i );
}
}
Note:
I need Contours path, that mean how to contours connected for example it can be M = moveto L = lineto H = horizontal lineto V = vertical lineto C = curveto S = smooth curveto Q = quadratic Bézier curve T = smooth quadratic Bézier curveto A = elliptical Arc Z = closepath just like SVG path

convert cvSeq* to vector<vector<Point> >

How can i convert cvSeq* to vector< vector< Point > > ?
this is my code:
first_contour = NULL;
cvFindContours(
src_copy_gray,
storage,
&first_contour,
sizeof(CvContour),
CV_RETR_TREE );
// Finding convex Hull
CvMemStorage* hull_storage = cvCreateMemStorage();
CvSeq* retHulls = NULL;
CvMat* tmp;
cv::Mat tmp22;
// drawing contours
ff = cv::Mat::zeros( tmp4.size(), CV_8UC3 );
draw = new IplImage(ff);
for(CvSeq* i = first_contour; i != 0; i = i->h_next){
cvDrawContours(draw,i,cv::Scalar(255,255,0,0),cv::Scalar(255,0,0,0),0,1,8);
retHulls = cvConvexHull2(i,hull_storage,CV_CLOCKWISE,1);
cvDrawContours(draw,retHulls,cv::Scalar(255,0,0,0),cv::Scalar(255,0,0,0),0,1,8);
for (int k = 0; k < vertices_number; k++)
{
if(pointPolygonTest( ??????, cv::Point2f(verticesB[k].x,verticesB[k].y), false )>0){
cv::line( ff,cv::Point2f(verticesB[k].x,verticesB[k].y), cv::Point2f(verticesB[k].x,verticesB[k].y), cv::Scalar(255,0,255 ), 2, 8 );
}
}
}
what should i put instead of ?????
I neea a cv::_InputArray, but I don't know how to convert the cvseq* retHulls to a cv::_InputArray like vector of vectors.

OPENCV (Unsupported format or combination formats) SURF

anyone familiar with this error? I tested a surf descriptor in real-time. I want to use it for recognition of different species of fish using of this features. Sometimes the program is okay but sometimes it gets an error. The compilation is Build successfully. After the compilation this error show.
#include <opencv2\imgproc\imgproc_c.h>
#include <stdio.h>
#include <math.h>
#include <opencv\highgui.h>
#include <opencv\cv.h>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
using namespace cv;
#define nimg 3
int main()
{
Mat object = imread( "D:/galunggong.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat object1 = imread( "D:/sapsap.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat object2 = imread( "D:/bisugo.jpg",CV_LOAD_IMAGE_GRAYSCALE );
// Mat object3 = imread( "4.jpg", CV_LOAD_IMAGE_GRAYSCALE );
//Mat object4 = imread( "5.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
if( !object1.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
if( !object2.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 1000;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
std::vector<KeyPoint> kp_object1;
std::vector<KeyPoint> kp_object2;
std::vector<KeyPoint> kp_object3;
std::vector<KeyPoint> kp_object4;
detector.detect( object, kp_object );
detector.detect( object1, kp_object1 );
detector.detect( object2, kp_object2 );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
Mat des_object1;
Mat des_object2;
extractor.compute( object, kp_object, des_object );
extractor.compute( object1, kp_object1, des_object1 );
extractor.compute( object2, kp_object2, des_object2 );
FlannBasedMatcher matcher;
CvCapture* cap = cvCreateCameraCapture(0);
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT, 240 );
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
Mat frame;
Mat des_image, img_matches;
char vect[nimg];
char contor;
char ok, ko;
while (1)
{
frame = cvQueryFrame(cap);
if (framecount < 5)
{
framecount++;
continue;
}
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<vector<DMatch > > matches1;
std::vector<vector<DMatch > > matches2;
std::vector<DMatch > good_matches;
std::vector<DMatch > good_matches1;
std::vector<DMatch > good_matches2;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
// printf("d \n");
//////////////////////////////////////////////////////
contor=0;
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int)
matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
vect[contor]=good_matches.size();
/////////////////////////////////////////////////////
contor=1;
matcher.knnMatch(des_object1, des_image, matches1, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches1.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches1[i][0].distance < 0.6*(matches1[i][1].distance)) && ((int)
matches1[i].size()<=2 && (int) matches1[i].size()>0))
{
good_matches1.push_back(matches1[i][0]);
}
}
vect[contor]=good_matches1.size();
/////////////////////////////////////////////////////
contor=2;
matcher.knnMatch(des_object2, des_image, matches2, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches2.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches2[i][0].distance < 0.6*(matches2[i][1].distance)) && ((int)
matches2[i].size()<=2 && (int) matches2[i].size()>0))
{
good_matches2.push_back(matches2[i][0]);
}
}
vect[contor]=good_matches2.size();
////////////////////////////////////////////////////
/*
contor =3;
matcher.knnMatch(des_object, des_image, matches3, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches3.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches3[i][0].distance < 0.6*(matches3[i][1].distance)) && ((int)
matches3[i].size()<=2 && (int) matches3[i].size()>0))
{
good_matches3.push_back(matches[i][0]);
}
}
vect[contor]=good_matches3.size();
//////////////////////////////////////////////////
contor=4;
matcher.knnMatch(des_object, des_image, matches4, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches4.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches4[i][0].distance < 0.6*(matches4[i][1].distance)) && ((int)
matches4[i].size()<=2 && (int) matches4[i].size()>0))
{
good_matches4.push_back(matches[i][0]);
}
}
vect[contor]=good_matches4.size();
*/
printf("%d %d %d \n ",vect[0],vect[1],vect[2]);
ok=0;
for (contor=1;contor<nimg;contor++)
if (vect[contor]>vect[contor-1])
ok=contor;
for (ko=10;ko>3;ko++)
{
if (ok==0 && vect[ok]>ko)
{printf("Forward \n");
ko=2;}
else if (ok==1 && vect[ok]>ko)
{printf("Turn Left \n");
ko=2;}
else if (ok==2 && vect[ok]>ko)
{printf("Turn Right \n");
ko=2;}
}
//Show detected matches
imshow( "Good Matches",frame /*img_matches*/ );
key = waitKey(1);
}
}
Sometimes SURF cannot find any feature points so it may return an empty descriptor.
You should check the descriptors for data:
if(des_object.empty()||des_object1.empty()||des_object2.empty())
{
std::cout<<"Empty descriptor(s)."<<std::endl;
return -1;
}

Resources