How to match keypoints in SIFT ? - image-processing

How to match keypoints in SIFT ?
I have calculated 128 size vector for each keypoint in an image.
let, I1 is original image, I2 is 45 degree rotated image.
I got 130 keypoints for I1 and 104 keypoints for I2.
i.e. 128x130 and 128x104.
I calculated euclidean distance between one keypoint of I1 and all keypoints of I2. so I got again euclidean distance matrix of size 128x104.
Now I need to choose nearest keypoint from this euclidean distance matrix. How I can select minimum distance 128 size vector out of 128 x 104 sized matrix?

Since you have already calculated the distance between the keypoints, in order to match them, sort them in increasing order of Euclidean distance, and consider only those keypoints which are a constant*min_distance [i.e: select on some %age of the sorted distances] as 'good matches'.
There is also BruteForceMatcher, KNNMatch and FlannBasedMatcher in OpenCV (URL Below)
http://docs.opencv.org/2.4/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.html#feature-flann-matcher
and
http://docs.opencv.org/2.4/modules/features2d/doc/common_interfaces_of_descriptor_matchers.html#descriptormatcher-knnmatch
Also, have a look at these questions and their responses.
1) Trying to match two images using sift in OpenCv, but too many matches
2) Efficient way for SIFT descriptor matching
Just for completeness, providing some very rough code for your reference.
If you have ;
class SIFTDemo
{
private:
Mat image;
vector<cv::KeyPoint> keypoints;
Mat descriptors;
Mat sift_output;
vector<DMatch> matches;
public:
SIFTDemo();
~SIFTDemo();
SIFTDemo(Mat m);
void extractSiftFeatures();
vector <DMatch> FindMatchesEuclidian(SIFTDemo &m2);
};
Then one can have something like this;
void SIFTDemo::extractSiftFeatures()
{
SIFT siftobject;
siftobject.operator()(image, Mat(), keypoints, descriptors);
}
vector<DMatch> SIFTDemo::FindMatchesEuclidian(SIFTDemo &m2)
{
// Calculate euclidian distance between keypoints to find best matching pairs.
// create two dimensional vector for storing euclidian distance
vector< vector<float> > vec1, unsortedvec1;
for (int i=0; i<this->keypoints.size(); i++)
{
vec1.push_back(vector<float>()); // Add an empty row
unsortedvec1.push_back(vector<float>());
}
// create vector of DMatch for storing matxhes point
vector<DMatch> matches1;
DMatch dm1;
// loop through keypoints1.size
for (int i=0; i<this->keypoints.size(); i++)
{
// get 128 dimensions in a vector
vector<float> k1;
for(int x=0; x<128; x++)
{
k1.push_back((float)this->descriptors.at<float>(i,x));
}
// loop through keypoints2.size
for (int j=0; j<m2.keypoints.size(); j++)
{
double temp=0;
// calculate euclidian distance
for(int x=0; x<128; x++)
{
temp += (pow((k1[x] - (float)m2.descriptors.at<float>(j,x)), 2.0));
}
vec1[i].push_back((float)sqrt(temp)); // store distance for each keypoints in image2
unsortedvec1[i] = vec1[i];
}
sort(vec1[i].begin(),vec1[i].end()); // sort the vector distances to get shortest distance
// find position of the shortest distance
int pos = (int)(find(unsortedvec1[i].begin(), unsortedvec1[i].end(), vec1[i][0]) - unsortedvec1[i].begin());
// assign that matchin feature to DMatch variable dm1
dm1.queryIdx = i;
dm1.trainIdx = pos;
dm1.distance = vec1[i][0];
matches1.push_back(dm1);
this->matches.push_back(dm1);
//cout << pos << endl;
}
// craete two dimensional vector for storing euclidian distance
vector<vector<float>> vec2, unsortedvec2;
for (int i=0; i<m2.keypoints.size(); i++)
{
vec2.push_back(vector<float>()); // Add an empty row
unsortedvec2.push_back(vector<float>());
}
// create vector of DMatch for storing matxhes point
vector<DMatch> matches2;
DMatch dm2;
// loop through keypoints2.size
for (int i=0; i<m2.keypoints.size(); i++)
{
// get 128 dimensions in a vector
vector<float> k1;
for(int x=0; x<128; x++)
{
k1.push_back((float)m2.descriptors.at<float>(i,x));
}
// loop through keypoints1.size
for (int j=0; j<this->keypoints.size(); j++)
{
double temp=0;
// calculate euclidian distance
for(int x=0; x<128; x++)
{
temp += (pow((k1[x] - (float)this->descriptors.at<float>(j,x)), 2.0));
}
vec2[i].push_back((float)sqrt(temp)); // store distance for each keypoints in image1
unsortedvec2[i] = vec2[i];
}
sort(vec2[i].begin(),vec2[i].end()); // sort the vector distances to get shortest distance
// find position of the shortest distance
int pos = (int)(find(unsortedvec2[i].begin(), unsortedvec2[i].end(), vec2[i][0]) - unsortedvec2[i].begin());
// assign that matchin feature to DMatch variable
dm2.queryIdx = i;
dm2.trainIdx = pos;
dm2.distance = vec2[i][0];
matches2.push_back(dm2);
m2.matches.push_back(dm2);
//cout << pos << endl;
}
// Ref : http://docs.opencv.org/2.4/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.html#feature-flann-matcher
//-- Quick calculation of max and min distances between keypoints1
double max_dist = 0;
double min_dist = 500.0;
for( int i = 0; i < matches1.size(); i++ )
{
double dist = matches1[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
// Draw only "good" matches1 (i.e. whose distance is less than 2*min_dist )
vector<DMatch> good_matches1;
for( int i = 0; i < matches1.size(); i++ )
{
if( matches1[i].distance <= 2*min_dist )
{
good_matches1.push_back( matches1[i]);
}
}
// Quick calculation of max and min distances between keypoints2 but not used
for( int i = 0; i < matches2.size(); i++ )
{
double dist = matches2[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
// Draw only "good" matches by comparing that (ft1 gives ft2) and (ft2 gives ft1)
vector<DMatch> good_matches;
for(unsigned int i=0; i<good_matches1.size(); i++)
{
// check ft1=ft2 and ft2=ft1
if(good_matches1[i].queryIdx == matches2[good_matches1[i].trainIdx].trainIdx)
good_matches.push_back(good_matches1[i]);
}
return good_matches;
}
FInally, as mentioned in the comment also look at RANSAC to do this. Not diving into that not to make the answer longer but you can find resources online and on SO.

Related

Opencv Feature Matching is not matching correctly for cropped images of different sizes and images taken from various sources?

I am trying to match two images one is screen shot of mobile screen and template image is any app icon.If i match source and template cropped from same images it is matching perfectly.But when i use app icon cropped from different mobile screen it is not matching properly.
For image matching am working on the following code:
int main( int argc, char** argv )
{
Mat objectImg = imread("source.jpg", cv::IMREAD_GRAYSCALE);
Mat sceneImg = imread("note4-3.jpg", cv::IMREAD_GRAYSCALE);
//cv::resize(sceneImg,sceneImg,objectImg.size(),0,0,CV_INTER_CUBIC);
if( !objectImg.data || !sceneImg.data )
{
printf( " No image data \n " );
return -1337;
}
std::vector<cv::KeyPoint> objectKeypoints;
std::vector<cv::KeyPoint> sceneKeypoints;
cv::Mat objectDescriptors;
cv::Mat sceneDescriptors;
Ptr<FeatureDetector> detector;
detector = cv::MSER::create();
detector->detect(objectImg, objectKeypoints);
detector->detect(sceneImg, sceneKeypoints);
Ptr<DescriptorExtractor> extractor = cv::ORB::create();
extractor->compute( objectImg, objectKeypoints, objectDescriptors );
extractor->compute( sceneImg, sceneKeypoints, sceneDescriptors );
if(objectDescriptors.type()!=CV_32F) {
objectDescriptors.convertTo(objectDescriptors, CV_32F);
}
if(sceneDescriptors.type()!=CV_32F) {
sceneDescriptors.convertTo(sceneDescriptors, CV_32F);
}
vector< vector<DMatch> > matches;
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce");
matcher->knnMatch( objectDescriptors, sceneDescriptors, matches, 8 );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < objectDescriptors.rows; i++ )
{
double dist = matches[i][0].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std::vector<cv::DMatch> good_matches;
for( int i = 0; i < objectDescriptors.rows; i++ )
{
if( matches[i][0].distance <= max(2*min_dist, 0.02) ) {
good_matches.push_back( matches[i][0]);
}
}
//look whether the match is inside a defined area of the image
//only 25% of maximum of possible distance
/*double tresholdDist = 0.50 * sqrt(double(sceneImg.size().height*sceneImg.size().height + sceneImg.size().width*sceneImg.size().width));
vector< DMatch > good_matches2;
good_matches2.reserve(matches.size());
for (size_t i = 0; i < matches.size(); ++i)
{
for (int j = 0; j < matches[i].size(); j++)
{
Point2f from = objectKeypoints[matches[i][j].queryIdx].pt;
Point2f to = sceneKeypoints[matches[i][j].trainIdx].pt;
//calculate local distance for each possible match
double dist = sqrt((from.x - to.x) * (from.x - to.x) + (from.y - to.y) * (from.y - to.y));
//save as best match if local distance is in specified area and on same height
if (dist < tresholdDist && abs(from.y-to.y)<5)
{
good_matches2.push_back(matches[i][j]);
j = matches[i].size();
}
}
}*/
Mat allmatchs;
drawMatches(objectImg,objectKeypoints,sceneImg,sceneKeypoints,good_matches,allmatchs,Scalar::all(-1), Scalar::all(-1),vector<char>(),0);
namedWindow("Matchs" , CV_WINDOW_NORMAL);
imshow( "Matchs",allmatchs);
waitKey(0);
}
[Wrong Match When cropped from different source][1]
The above result is obtained when matching source from one mobile screen shot and template from different screen shot.
I am using opencv3.0
Please help whether I have make changes in code or i have to use template matching or some other technique.I cannot use SUR detectors since i cannot have paid versions due to license conflits??
Sample images:
Source Image
Template
Looking at image you've provided, I can suggest some changes which will help you out.
Remove selecting good matches, this creates issues when sharp features are present. Sharp features have very less hamming distance when compared to other good matches. When you select 2*min_dist , indirectly you are ignoring possible good matches.
Make sure to have reasonable number of feature points in object image.
If this feature detector and descriptor combination doesn't work out, select other feature detector and descriptors like STAR-BRIEF, SURF, which are far better then MSER-ORB.
In your situation, detector-matcher need not be rotation invariant, it should be scale invariant. So try re-sizing object image
Hope my suggestions help you
I got better matches by the following combinations:
Kaze detector
Kaze extractor
BruteForce-L1 matcher
combined with cross check matching given in following link
http://ecee.colorado.edu/~siewerts/extra/ecen5043/ecen5043_code/sift/descriptor_extractor_matcher.cpp

what algorithm does cv::arclength use to compute perimeter?

I am currently doing a project which requires me use some structural analysis like finding the perimeter and area. I have successfully obtained the contour of the object.
when I use contour.size() function it return 1108(in this case)
when I used cv::arclength(contour) function it returns 1200.
shouldn't the Perimeter be the number of points of the contour.(the contour is the external boundary of the object)? Which should I trust?
not necessarily, with cv::arclength you summarize the euclidean distances between the consecutive points in the curve.
Here is a code snippet of cv::arclength:
...
const Point2f* ptf = curve.ptr<Point2f>();
...
for( i = 0; i < count; i++ )
{
Point2f p = ptf[i];
float dx = p.x - prev.x, dy = p.y - prev.y;
buf[j] = dx*dx + dy*dy;
if( ++j == N || i == count-1 )
{
Mat bufmat(1, j, CV_32F, buf);
sqrt(bufmat, bufmat);
for( ; j > 0; j-- )
perimeter += buf[j-1];
}
prev = p;
}
return perimeter;

Detecting edges of a card with rounded corners

Hi currently i am working on an OCR reading app where i have successfully able to capture the card image by using AVFoundation framework.
For next step, i need to find out edges of the card , so that i can crop the card image from main captured image & later i can sent it to OCR engine for processing.
The main problem is now to find the edges of the card & i am using below code(taken from another open source project) which uses OpenCV for this purpose.It is working fine if the card is pure rectangular Card or Paper. But when i use a card with rounded corner (e.g Driving License), it is failed to detect . Also i dont have much expertise in OpenCV , Can any one help me in solving this issue?
- (void)detectEdges
{
cv::Mat original = [MAOpenCV cvMatFromUIImage:_adjustedImage];
CGSize targetSize = _sourceImageView.contentSize;
cv::resize(original, original, cvSize(targetSize.width, targetSize.height));
cv::vector<cv::vector<cv::Point>>squares;
cv::vector<cv::Point> largest_square;
find_squares(original, squares);
find_largest_square(squares, largest_square);
if (largest_square.size() == 4)
{
// Manually sorting points, needs major improvement. Sorry.
NSMutableArray *points = [NSMutableArray array];
NSMutableDictionary *sortedPoints = [NSMutableDictionary dictionary];
for (int i = 0; i < 4; i++)
{
NSDictionary *dict = [NSDictionary dictionaryWithObjectsAndKeys:[NSValue valueWithCGPoint:CGPointMake(largest_square[i].x, largest_square[i].y)], #"point" , [NSNumber numberWithInt:(largest_square[i].x + largest_square[i].y)], #"value", nil];
[points addObject:dict];
}
int min = [[points valueForKeyPath:#"#min.value"] intValue];
int max = [[points valueForKeyPath:#"#max.value"] intValue];
int minIndex;
int maxIndex;
int missingIndexOne;
int missingIndexTwo;
for (int i = 0; i < 4; i++)
{
NSDictionary *dict = [points objectAtIndex:i];
if ([[dict objectForKey:#"value"] intValue] == min)
{
[sortedPoints setObject:[dict objectForKey:#"point"] forKey:#"0"];
minIndex = i;
continue;
}
if ([[dict objectForKey:#"value"] intValue] == max)
{
[sortedPoints setObject:[dict objectForKey:#"point"] forKey:#"2"];
maxIndex = i;
continue;
}
NSLog(#"MSSSING %i", i);
missingIndexOne = i;
}
for (int i = 0; i < 4; i++)
{
if (missingIndexOne != i && minIndex != i && maxIndex != i)
{
missingIndexTwo = i;
}
}
if (largest_square[missingIndexOne].x < largest_square[missingIndexTwo].x)
{
//2nd Point Found
[sortedPoints setObject:[[points objectAtIndex:missingIndexOne] objectForKey:#"point"] forKey:#"3"];
[sortedPoints setObject:[[points objectAtIndex:missingIndexTwo] objectForKey:#"point"] forKey:#"1"];
}
else
{
//4rd Point Found
[sortedPoints setObject:[[points objectAtIndex:missingIndexOne] objectForKey:#"point"] forKey:#"1"];
[sortedPoints setObject:[[points objectAtIndex:missingIndexTwo] objectForKey:#"point"] forKey:#"3"];
}
[_adjustRect topLeftCornerToCGPoint:[(NSValue *)[sortedPoints objectForKey:#"0"] CGPointValue]];
[_adjustRect topRightCornerToCGPoint:[(NSValue *)[sortedPoints objectForKey:#"1"] CGPointValue]];
[_adjustRect bottomRightCornerToCGPoint:[(NSValue *)[sortedPoints objectForKey:#"2"] CGPointValue]];
[_adjustRect bottomLeftCornerToCGPoint:[(NSValue *)[sortedPoints objectForKey:#"3"] CGPointValue]];
}
original.release();
}
This naive implementation is based on some of the techniques demonstrated in squares.cpp, available in the OpenCV sample directory. The following posts also discuss similar applications:
OpenCV C++/Obj-C: Detecting a sheet of paper / Square Detection
Square detection doesn't find squares
Find corner of papers
#John, the code below has been tested with the sample image you provided and another one I created:
The processing pipeline starts with findSquares(), a simplification of the same function implemented by OpenCV's squares.cpp demo. This function converts the input image to grayscale and applies a blur to improve the detection of the edges (Canny):
The edge detection is good, but a morphological operation (dilation) is needed to join nearby lines:
After that we try to find the contours (edges) and assemble squares out of them. If we tried to draw all the detected squares on the input images, this would be the result:
It looks good, but it's not exactly what we are looking for since there are too many detected squares. However, the largest square is actually the card, so from here on it's pretty simple and we just figure out which of the squares is the largest. That's exactly what findLargestSquare() does.
Once we know the largest square, we simply paint red dots at the corners of the square for debugging purposes:
As you can see, the detection is not perfect but it seems good enough for most uses. This is not a robust solution and I only wanted to share one approach to solve the problem. I'm sure that there are other ways to deal with this that might be more interesting to you. Good luck!
#include <iostream>
#include <cmath>
#include <vector>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgproc/imgproc_c.h>
/* angle: finds a cosine of angle between vectors, from pt0->pt1 and from pt0->pt2
*/
double angle(cv::Point pt1, cv::Point pt2, cv::Point pt0)
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
/* findSquares: returns sequence of squares detected on the image
*/
void findSquares(const cv::Mat& src, std::vector<std::vector<cv::Point> >& squares)
{
cv::Mat src_gray;
cv::cvtColor(src, src_gray, cv::COLOR_BGR2GRAY);
// Blur helps to decrease the amount of detected edges
cv::Mat filtered;
cv::blur(src_gray, filtered, cv::Size(3, 3));
cv::imwrite("out_blur.jpg", filtered);
// Detect edges
cv::Mat edges;
int thresh = 128;
cv::Canny(filtered, edges, thresh, thresh*2, 3);
cv::imwrite("out_edges.jpg", edges);
// Dilate helps to connect nearby line segments
cv::Mat dilated_edges;
cv::dilate(edges, dilated_edges, cv::Mat(), cv::Point(-1, -1), 2, 1, 1); // default 3x3 kernel
cv::imwrite("out_dilated.jpg", dilated_edges);
// Find contours and store them in a list
std::vector<std::vector<cv::Point> > contours;
cv::findContours(dilated_edges, contours, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);
// Test contours and assemble squares out of them
std::vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional to the contour perimeter
cv::approxPolyDP(cv::Mat(contours[i]), approx, cv::arcLength(cv::Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 && std::fabs(contourArea(cv::Mat(approx))) > 1000 &&
cv::isContourConvex(cv::Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = std::fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
/* findLargestSquare: find the largest square within a set of squares
*/
void findLargestSquare(const std::vector<std::vector<cv::Point> >& squares,
std::vector<cv::Point>& biggest_square)
{
if (!squares.size())
{
std::cout << "findLargestSquare !!! No squares detect, nothing to do." << std::endl;
return;
}
int max_width = 0;
int max_height = 0;
int max_square_idx = 0;
for (size_t i = 0; i < squares.size(); i++)
{
// Convert a set of 4 unordered Points into a meaningful cv::Rect structure.
cv::Rect rectangle = cv::boundingRect(cv::Mat(squares[i]));
//std::cout << "find_largest_square: #" << i << " rectangle x:" << rectangle.x << " y:" << rectangle.y << " " << rectangle.width << "x" << rectangle.height << endl;
// Store the index position of the biggest square found
if ((rectangle.width >= max_width) && (rectangle.height >= max_height))
{
max_width = rectangle.width;
max_height = rectangle.height;
max_square_idx = i;
}
}
biggest_square = squares[max_square_idx];
}
int main()
{
cv::Mat src = cv::imread("cc.png");
if (src.empty())
{
std::cout << "!!! Failed to open image" << std::endl;
return -1;
}
std::vector<std::vector<cv::Point> > squares;
findSquares(src, squares);
// Draw all detected squares
cv::Mat src_squares = src.clone();
for (size_t i = 0; i < squares.size(); i++)
{
const cv::Point* p = &squares[i][0];
int n = (int)squares[i].size();
cv::polylines(src_squares, &p, &n, 1, true, cv::Scalar(0, 255, 0), 2, CV_AA);
}
cv::imwrite("out_squares.jpg", src_squares);
cv::imshow("Squares", src_squares);
std::vector<cv::Point> largest_square;
findLargestSquare(squares, largest_square);
// Draw circles at the corners
for (size_t i = 0; i < largest_square.size(); i++ )
cv::circle(src, largest_square[i], 4, cv::Scalar(0, 0, 255), cv::FILLED);
cv::imwrite("out_corners.jpg", src);
cv::imshow("Corners", src);
cv::waitKey(0);
return 0;
}
instead of "pure" rectangular blobs, try to go for nearly rectangular ones.
1- gaussian blur
2- grayscale and canny edge detection
3- extract all blobs (contours) in your image and filter out small ones. you will use findcontours and contourarea functions for that purpose.
4- using moments, filter out non-rectangular ones. First you need to check out moments of rectangle-like objects. You can do it by yourself or google it. Then list those moments and find similarity between objects, create your filter as such.
Ex: After test, say you found out central moment m30's are similar for rectangle-like objects -> filter out objects having inaccurate m30.
I know maybe it's too late for this post, but I am posting this in case it might help someone else.
The iOS Core Image framework already has a good tool to detect features such as rectangles (since iOS 5), faces, QR codes and even regions containing text in a still image. If you check out the CIDetector class you'll find what you need. I am using it for an OCR app too, it's super easy and very reliable compared to what you can do with OpenCV (I am not good with OpenCV, but the CIDetector gives much better results with 3-5 lines of code).
I don't know if it is an option, but you could have the user define the edges of it rather than trying to do it programatically.

compare 2 histograms with Chi-Square

i want to compare 2 Histograms, which have 2 dimensions.
For this i want to use the Chi-Square-Metric.
My comparator looks like this function:
double Histogram::compareHistogram(Histogram *hist){
double result=0;
double a=0;
double b=0;
for (int y=0 ; y < bins_1 ; y++) {
for (int x=0 ; x < bins_2 ; x++) {
a=getHistogramValue(x,y)-hist->getHistogramValue(x,y);
b=getHistogramValue(x,y)+hist->getHistogramValue(x,y);
if(fabs(b)>0.0){
result+=a*a/b;
}
}
}
return result;
}
I've compared the result with the result of OpenCv's cv::compareHist() function and it is different. I don't know why.
Before i compared the histograms, i norm the histograms with the MINMAX-Norm.
I compared my normed histogram with the normed histogram of openCV and they are equal.
So I think, the problem is in my compareHist function.
But where?
Best regards,
The relevant section of source code from OpenCV is as follows:
if( method == CV_COMP_CHISQR )
{
for( j = 0; j < len; j++ )
{
double a = h1[j] - h2[j];
double b = h1[j];
if( fabs(b) > DBL_EPSILON )
result += a*a/b;
}
}
So you can see that the difference in your code is this line
b=getHistogramValue(x,y)+hist->getHistogramValue(x,y);
which should be
b=getHistogramValue(x,y);

OpenCV squares: filtering output

Here is the out put of square-detection example my problem is filter this squares
first problem is its drawing one than more lines for same area;
second one is i just need to detect object not all image.
The other problem is i have to take just biggest object except all image.
Here is a code for detection:
static void findSquares( const Mat& image, vector >& squares ){
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for( int l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
gray = gray0 >= (l+1)*255/N;
}
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
if( approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if( maxCosine < 0.3 )
squares.push_back(approx);
}
}
}
}
}
You need to take a look at the flags for findContours(). You can set a flag called CV_RETR_EXTERNAL which will return only the outer-most contour (all contours inside of it are thrown away). This will probably return the entire frame, so you'll need to narrow down the search so that it doesnt check your frame boundaries. Use the function copyMakeBorder() to accomplish this. I would also recommend removing your dilate function as it is probably causing duplicate contours on either side of a line (you might not even need the border if you remove the dilate). Here is my output:

Resources