'OpenCV' matching multiple object with rotation using a single simple template - opencv

I try to match multi-object with rotation using a simple template like a smile face template
,and I wanna detect it in the test image like test image
I have tried to using Features2D and Homography to detect, however there are many problems.
P1: It seems this keypoints matching method is not accurate for SIMPLE template(I have tried this method in another template which is much more complicated, the matching result is better). Is there any method on this problem?
P2: Definitely this method is not suitable in multi-object test image. How could I match multiple objects using a single template?(the premise is I don't know the number and location of objects in the template)
Below is my function code.
`//load image
Mat img1 = imread( "2.png", CV_LOAD_IMAGE_GRAYSCALE );
Mat img2 = imread( "1.png", CV_LOAD_IMAGE_GRAYSCALE );
//-- Step 1: Detect the keypoints using SURF Detector
SurfFeatureDetector detector( hessian );
vector<KeyPoint> keypoints1, keypoints2;
detector.detect( img1, keypoints1 );
detector.detect( img2, keypoints2 );
//-- Step 2: Extract the keypoints using SURF Extractor
Mat descriptors1,descriptors2;// extract keypoints
SurfDescriptorExtractor extractor; //Create Descriptor Extractor
extractor.compute( img1, keypoints1, descriptors1 );
extractor.compute( img2, keypoints2, descriptors2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
//-- Draw only "good" matches
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols,0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
`
I am a beginner in computer-vision,and it is my first time asking on this forum. Many thanks for your help!

If your problem is to detect only that kind of images, a simple thing that you can do is to use a circle detector. And you can group the point of the bigger circle (head) and the points of the eyes. If you know the position of the centroids of those 3 circles, you can have the position and rotation of the face by studying where are the eyes.
In the image, the red points represent the centroids of the circles, you can get the head position by finding where the main centroid is, alpha is the angle between the right eye and the main centroid. If you can find the new angle you can compute theta which will indicate the rotation of the face, and maybe this could work even scale changes

Related

Cannot perform corner matching between two images

I'm using openCV 2.4, where I'm using FlannBasedMatcher to match keypoints. However, instead of using SurfFeatureDetector::detect()to extract keypoints, I'm passing the image corners as keypoints.
Sadly I'm getting zero matches for all values of minHessian.
Below is my code:
void matches(int, void*)
{
SurfFeatureDetector detector( minHessian );
vector<KeyPoint> keypoints_frame,keypoints_trng;
Mat descriptors_frame,descriptors_trng;
//detect keypoints
for(int i=0;i<corners.size();i++)
{
keypoints_frame.push_back(KeyPoint(corners[i].x,corners[i].y,0));
cout<<"\n"<<keypoints_frame[i].pt.x<<"\t"<<keypoints_frame[i].pt.y;
}
keypoints_trng.push_back(KeyPoint(337,288,0));
keypoints_trng.push_back(KeyPoint(337,241,0));
keypoints_trng.push_back(KeyPoint(370,288,0));
keypoints_trng.push_back(KeyPoint(370,241,0));
keypoints_trng.push_back(KeyPoint(291,239,0));
keypoints_trng.push_back(KeyPoint(287,203,0));
keypoints_trng.push_back(KeyPoint(288,329,0));
keypoints_trng.push_back(KeyPoint(426,237,0));
keypoints_trng.push_back(KeyPoint(428,326,0));
keypoints_trng.push_back(KeyPoint(426,201,0));
keypoints_trng.push_back(KeyPoint(427,293,0));
keypoints_trng.push_back(KeyPoint(287,297,0));
for(int i=0;i<corners.size();i++)
{
cout<<"\n"<<keypoints_trng[i].pt.x<<"\t"<<keypoints_trng[i].pt.y;
}
//describe keypoints
SurfDescriptorExtractor extractor;
extractor.compute( src_gray_resized, keypoints_frame, descriptors_frame);
extractor.compute( trng_gray_resized, keypoints_trng, descriptors_trng);
//matching the keypoints
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_frame, descriptors_trng, matches );
cout<<"\n\n no of matches: "<<matches.size();
std::vector< DMatch > good_matches;
if (matches.size()>20)
{minHessian=minHessian+100;}
for( int i = 0; i < descriptors_frame.rows; i++ )
{ if( matches[i].distance <= max(2*100.00, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//cout<<"\nno of good matches"<<good_matches.size();
//-- Draw only "good" matches
Mat img_matches;
drawMatches( src_gray, keypoints_frame, trng_gray, keypoints_trng,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
namedWindow("Good Matches",CV_WINDOW_NORMAL);
imshow( "Good Matches", img_matches );
}

using SURF for handdetection

I'm using SURF algorithm for real time hand detection.I'll let the user put his hand in a rectangle and then use it as my object and find the hand in first frame using that object,in the next loop i'll set the detected hand as my object and so on...
But when running it shows the following error
OpenCV Error: Assertion failed (count >= 4) in cvFindHomography, file /build/buildd/opencv-2.4.8+dfsg1/modules/calib3d/src/fundam.cpp, line 235
terminate called after throwing an instance of 'cv::Exception'
what(): /build/buildd/opencv-2.4.8+dfsg1/modules/calib3d/src/fundam.cpp:235: error: (-215) count >= 4 in function cvFindHomography
Here's my code:
#include <stdio.h>
#include <iostream>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
using namespace std;
int main()
{
char k;
Mat img_object,img_scene,frame;
VideoCapture cap(0);
if(cap.isOpened()==0)
{
cout<<"ERROR";
return -1;
}
while(1)
{
cap>>frame;
rectangle(frame,Point(100,100),Point(300,300),(0,255,0),4,8,0);
imshow("gig",frame);
k=waitKey(1);
if(k=='q')
{
Mat img_object1(frame,Rect(25,25,100,100));
img_object=img_object1.clone();
cvtColor(img_object,img_object,CV_BGR2GRAY);
break;
}
}
while(1)
{
cap>>img_scene;
cvtColor(img_scene,img_scene,CV_BGR2GRAY);
int minHessian = 50;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_object.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
Mat nn(img_scene,Rect((scene_corners[0] ).x,(scene_corners[0] ).y,(scene_corners[2] ).x,(scene_corners[2]).y));
imshow( "Good Matches & Object detection", img_matches );
img_object=nn.clone();
k=waitKey(1);
if(k=='q')
{
break;
}
}
return 0;
}

Features2d + Homography not giving appropriate results

I am trying to detect an object using the SurfFeatureDetect and FLANN matcher. However, the code is not able to detect the image accurately. I have also posted the results in pictorial format.
Here's my code from the opencv tutorial website
int main(int argc, char** argv){
if (argc != 3){
readme(); return -1;
}
Mat img_object = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img_scene = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
if (!img_object.data || !img_scene.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 100;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute(img_object, keypoints_object, descriptors_object);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_object.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
Mat H = findHomography(obj, scene, CV_RANSAC);
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(img_object.cols, 0);
obj_corners[2] = cvPoint(img_object.cols, img_object.rows); obj_corners[3] = cvPoint(0, img_object.rows);
std::vector<Point2f> scene_corners(4);
perspectiveTransform(obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
//-- Show detected matches
imshow("Good Matches & Object detection", img_matches);
waitKey(0);
return 0;}
/** #function readme */
void readme()
{
std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl;}
That is a very common failure. The problem is that the homography has 8 degree of freedom (8DOF). This means that you need at least 4 correct correspondences to calculate a good homography:
As you can see, the homography has 8 parameters (the last parameter h33 is just a scale factor).
The problem arises when other than good corrspondces (inlier) you need to filter out bad correspondences (outlier). When the are more outliers than inliers (total/outliers > 50%) the RANSAC procedure cannot find the outlier and you obtain weird results.
Solutions to this problem are not easy. You could:
Use a training image with a similar out-of-plane rotation (and a similar scale) of the object in your query image.
Or, use a transformation with less degree of freedom (such as similarity transform). In this way you will need less inliers. Altho OpenCV lacks support for this simpler transformation with a robust fitting method.

Calculate similarity score between scene and template object

How can I calculate some comparable similarity score which tells me how similar the img_scene is compared to img_object.
When I render the img_matches, the homography successfully renders the boundaries of the found object in the scene, but I need some comparable score like if (score > THRESHOLD) { /* have match */ } else { /* dont have match */ }.
Mat img_scene = srcImage;
Mat img_object = _templateImage;
//-- Step 1: Detect the keypoints using SURF Detector
SurfFeatureDetector detector(_minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute(img_object, keypoints_object, descriptors_object);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
if (descriptors_object.type() != descriptors_scene.type())
return;
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match(descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (size_t i = 0; i < (size_t)descriptors_object.rows; i++ ) {
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector<DMatch> good_matches;
for(size_t i = 0; i < (size_t)descriptors_object.rows; i++) {
if (matches[i].distance < 2 * min_dist) {
good_matches.push_back(matches[i]);
}
}
if (good_matches.size() < 4)
return;
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (size_t i = 0; i < (size_t)good_matches.size(); i++) {
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[(size_t)good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[(size_t)good_matches[i].trainIdx].pt);
}
vector<uchar> mask;
Mat H = findHomography(obj, scene, CV_RANSAC, 3, mask);
//-- Get the corners from the image_1 (the object to be "detected")
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0);
obj_corners[1] = cvPoint(img_object.cols, 0);
obj_corners[2] = cvPoint(img_object.cols, img_object.rows);
obj_corners[3] = cvPoint(0, img_object.rows);
std::vector<Point2f> scene_corners(4);
perspectiveTransform(obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
UPDATE:
Here is the working solution as #mikesapi proposed:
...
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector<DMatch> good_matches;
double good_matches_sum = 0.0;
for (size_t i = 0; i < matches.size(); i++ ) {
if( matches[i].distance < max(2*min_dist, 0.02) ) {
good_matches.push_back(matches[i]);
good_matches_sum += matches[i].distance;
}
}
double score = (double)good_matches_sum / (double)good_matches.size();
if (score < 0.18) {
// have match
} else {
// dont have match
}
...
A similarity score is greater if the object and scene are more similar (as opposed to a dissimilarity score, where a higher score means they are more dissimilar). Since you are using distances with FLANN (which I assume is giving you approximate euclidean distances between descriptors) a dissimilarity score is easier to generate, since euclidean distance is greater if descriptors are further apart in the descriptor space, and small if they are close together.
One simple way to generate a dissimilarity score would be to:
1. For each descriptor in the object image: calculate the minimum distance to each descriptor in the scene image.
2. Sum the (minimum) distances, and normalize by the number of descriptors in the object image.
Then you will have a single score quantifying the match between the object and the scene.

Matching template imge(scaled) to Main/larger image

I want to find/check subImage/template image in main image and want to know its coordinates,
I have used code given at following link to implement it,
Check presence of subimage in image in iOS
It is working fine, if the size of the template image is exactly same as size of the matching part of larger image.
But it is not giving result properly if the subimage is scaled down or scaled up than matching part of larger image.
Use OpenCV Feature Detection. it is more accurate than template matching..
Please try with this code..
-(void)featureDetection:(UIImage*)largerImage withImage:(UIImage*)subImage
{
cv::Mat tempMat1 = [largerImage CVMat];
cv::Mat tempMat2 = [subImage CVMat];
cv::cvtColor(tempMat1, tempMat1, CV_RGB2GRAY);
cv::cvtColor(tempMat2, tempMat2, CV_RGB2GRAY);
if( !tempMat1.data || !tempMat2.data ) {
return;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 25;
cv::SurfFeatureDetector detector( minHessian ); // More Accurate bt take more time..
//cv::FastFeatureDetector detector( minHessian ); //Less Accurate bt take less time..
std::vector<cv::KeyPoint> keypoints_1, keypoints_2;
detector.detect( tempMat1, keypoints_1 );
detector.detect( tempMat2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
cv::SurfDescriptorExtractor extractor;
cv::Mat descriptors_1, descriptors_2;
extractor.compute( tempMat1, keypoints_1, descriptors_1 );
extractor.compute( tempMat2, keypoints_2, descriptors_2 );
std::vector<cv::Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = (cvPoint(0,0));
obj_corners[1] = (cvPoint(tempMat2.cols,0));
obj_corners[2] = (cvPoint(tempMat2.cols,tempMat2.rows));
obj_corners[3] = (cvPoint(0, tempMat2.rows));
//-- Step 3: Matching descriptor vectors with a brute force matcher
//cv::BruteForceMatcher < cv::L2<float> > matcher;
cv::FlannBasedMatcher matcher;
//std::vector< cv::DMatch > matches;
std::vector<cv::vector<cv::DMatch > > matches;
std::vector<cv::DMatch > good_matches;
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
std::vector<cv::Point2f> scene_corners(4);
cv::Mat H;
matcher.knnMatch( descriptors_2, descriptors_1, matches,2);
for(int i = 0; i < cv::min(tempMat1.rows-1,(int) matches.size()); i++) {
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)) {
good_matches.push_back(matches[i][0]);
}
}
cv::Mat img_matches;
drawMatches( tempMat2, keypoints_2, tempMat1, keypoints_1, good_matches, img_matches );
NSLog(#"good matches %lu",good_matches.size());
if (good_matches.size() >= 4) {
for( int i = 0; i < good_matches.size(); i++ ) {
//Get the keypoints from the good matches
obj.push_back( keypoints_2[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_1[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
NSLog(#"%f %f",scene_corners[0].x,scene_corners[0].y);
NSLog(#"%f %f",scene_corners[1].x,scene_corners[1].y);
NSLog(#"%f %f",scene_corners[2].x,scene_corners[2].y);
NSLog(#"%f %f",scene_corners[3].x,scene_corners[3].y);
//Draw lines between the corners (the mapped object in the scene image )
line( tempMat1, scene_corners[0], scene_corners[1], cvScalar(0, 255, 0), 4 );
line( tempMat1, scene_corners[1], scene_corners[2], cvScalar( 0, 255, 0), 4 );
line( tempMat1, scene_corners[2], scene_corners[3], cvScalar( 0, 255, 0), 4 );
line( tempMat1, scene_corners[3], scene_corners[0], cvScalar( 0, 255, 0), 4 );
}
// View matching..
UIImage *resultimage = [UIImage imageWithCVMat:img_matches];
UIImageView *imageview = [[UIImageView alloc] initWithImage:resultimage];
imageview.frame = CGRectMake(0, 0, 320, 240);
[self.view addSubview:imageview];
// View Result
UIImage *resultimage2 = [UIImage imageWithCVMat:tempMat1];
UIImageView *imageview2 = [[UIImageView alloc] initWithImage:resultimage2];
imageview2.frame = CGRectMake(0, 240, 320, 240);
[self.view addSubview:imageview2];
}

Resources