Bacisally the task is:
Implement automatic landing/takeoff of a quadcopter, which can carry one FlyCam/GoPro camera. Orientation should happen relative to altitude and position of the landing platform on 2D plane including rotation. That means the drone has a "head" and "tail" and should land in a specific position.
The landing platform looks like this
The corner shapes are for orientation on big distances, and small repetitive shapes in the center circle are for exact landing.
What approach would you take to solve this task?
Here is a pseudo code, assuming you already have full access to the motor control API; i.e. you have successfully defined what is needed for changing altitude, rotating left etc.
loop
{
if(landing board detected)
{
if(circle including the center point detected)
{
find orientation from corner circles' center
change device's orientation accordingly
}
else
{
lose altitude & move towards the center point
}
}
else
{
move around
}
}
Landing board & its center:
Assumption: It is the biggest & nearly perfect square.
1- Threshold
2- Extract contours
3- Apply shape (square) filter to contours
4- Find the biggest contour
5- Find its center
6- Crop the image with the bounding rect of this contour
Mat image = imread("~\\image.jpg");
// scale down for faster processing
pyrDown(image, image);
pyrDown(image, image);
// safe copy
Mat temp = image.clone();
// noise reduction & thresholding
GaussianBlur(image, image, Size(5,5), 3);
cvtColor(image, image, CV_BGR2GRAY);
threshold(image, image, 127, 255, CV_THRESH_OTSU);
// extract all contours
vector<vector<Point> > contours;
findContours(image, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
// define a perfect square
vector<Point> square;
square.push_back(Point(0,0));
square.push_back(Point(0,10));
square.push_back(Point(10,10));
square.push_back(Point(10,0));
// filter out contours that are not square
bool erased;
for(unsigned int i = 0; i<contours.size(); i++)
{
erased = false;
double x = matchShapes(contours[i], square, CV_CONTOURS_MATCH_I2, 0);
if(x > 0.005)
{
contours.erase(contours.begin() + i);
erased = true;
}
if(erased) i--;
}
// area filtering to find the biggest square contour
vector<double> contourAreas(contours.size());
for(unsigned int i = 0; i<contours.size(); i++)
{
contourAreas[i] = contourArea(contours[i]);
}
int ID = max_element(contourAreas.begin(), contourAreas.end()) - contourAreas.begin();
for(unsigned int i = 0; i<contours.size(); i++)
{
erased = false;
if(i != ID)
{
contours.erase(contours.begin() + i);
erased = true;
ID--;
}
if(erased) i--;
}
// find the bounding rect of this contour and crop the image within that rect
vector<Point> total;
for(unsigned int j = 0; j<contours[0].size(); j++)
{
total.push_back(contours[0][j]);
}
Rect rect = boundingRect(total);
Mat t = Mat(temp, rect);
// find the center of the landing board - to move towards it when necessary
Moments m = moments(contours[0], false);
Point center = Point(cvRound(m.m10/m.m00), cvRound(m.m01/m.m00));
Now that we have detected the board, we need to detect the corner circles for orientation.
1- Threshold
2- Extract contours
3- Apply shape (circular) filter to contours
4- Filter out circles close to the center of the board
5- Resultant circles are the corner circles, find the center of their biggest
// threshold
Mat gray;
cvtColor(t, gray, CV_BGR2GRAY);
threshold(gray, gray, 2187451321, 12186471, CV_THRESH_OTSU);
// extract contours
vector<vector<Point> > conts;
findContours(gray, conts, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
// circularity check
for(unsigned int i = 0; i<conts.size(); i++)
{
erased = false;
if(4*3.14*contourArea(conts[i]) / ((arcLength(conts[i],true) * arcLength(conts[i],true))) < 0.85)
{
conts.erase(conts.begin() + i);
erased = true;
}
if(erased) i--;
}
// position check - filtering out center circle
vector<Moments> mu(conts.size());
vector<Point2f> mc(conts.size());
for(unsigned int i = 0; i<conts.size(); i++ )
{
mu[i] = moments(conts[i], false);
}
for(unsigned int i = 0; i <conts.size(); i++ )
{
mc[i] = Point2f(mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00);
}
for(unsigned int i=0; i<conts.size(); i++)
{
erased = false;
if((((int)mc[i].x > t.cols/3) && ((int)mc[i].x < 2*t.cols/3) && ((int)mc[i].y < 2*t.rows/3) && ((int)mc[i].y > t.rows/3)))
{
mc.erase(mc.begin() + i);
conts.erase(conts.begin() + i);
erased = true;
}
if(erased) i--;
}
// selecting the biggest circle
vector<double> contAreas(conts.size());
for(unsigned int i = 0; i<conts.size(); i++)
{
contAreas[i] = contourArea(conts[i]);
}
ID = max_element(contAreas.begin(), contAreas.end()) - contAreas.begin();
for(unsigned int i = 0; i<conts.size(); i++)
{
erased = false;
if(i != ID)
{
conts.erase(conts.begin() + i);
erased = true;
ID--;
}
if(erased) i--;
}
drawContours(t, conts, -1, Scalar(0,255,255));
// finding its center - this is nothing but current orientation
Moments m2 = moments(conts[0], false);
Point c = Point(cvRound(m2.m10/m2.m00), cvRound(m2.m01/m2.m00));
input image
detected biggest-square (Mat t)
detected biggest-not close to center-circle-inside that biggest square (conts[0])
circle center and board center respectively, for orientation purposes
EDIT: Board center (center) is the position according to the image whereas circle center (c) is the position according to the board (t). Only thing left is to find the slope of line that passes through the board center and the circle center.
Related
I need something like here OpenCV C++/Obj-C: Detecting a sheet of paper / Square Detection
My code is working like a charm when my background and foreground is not the same, but if my background is almost the same color as the document it can't work anymore.
Here is the picture with a beige bg + almost beige document what is not working.. Can somebody help in this how can I fix this code?
https://i.imgur.com/81DrIIK.jpg
and the code is here:
vector<Point> getPoints(Mat image)
{
int width = image.size().width;
int height = image.size().height;
Mat image_proc = image.clone();
vector<vector<Point> > squares;
// blur will enhance edge detection
Mat blurred(image_proc);
medianBlur(image_proc, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
double largest_area = -1;
int largest_contour_index = 0;
for(int i=0;i<squares.size();i++)
{
double a =contourArea(squares[i],false);
if(a>largest_area)
{
largest_area = a;
largest_contour_index = i;
}
}
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning size() %d",squares.size());
vector<Point> points;
if(squares.size() > 0)
{
points = squares[largest_contour_index];
}
else
{
points.push_back(Point(0, 0));
points.push_back(Point(width, 0));
points.push_back(Point(0, height));
points.push_back(Point(width, height));
}
return points;
}
}
Thanks
You can do threshold operation in S space of HSV-color-space. https://en.wikipedia.org/wiki/HSL_and_HSV#General_approach
I just split the channels of BGR and HSV as follow. More operations are needed.
I am using iPhone5s to do black object tracking: but often meet with
Thread 6:EXC_BAD_ACCESS(code 1, dress=0x8)
and then my App quit suddenly. Could anyone tell me why this happen?
this error happens at :
template<typename _Tp> inline
_Tp Rect_<_Tp>::area() const
{
return width * height; //Thread 6:EXC_BAD_ACCESS(code 1, dress=0x8)
}
//this method is in types.hp in latest opencv framework
my colored object recognition code is as below:
#pragma mark - Protocol CvVideoCameraDelegate
#ifdef __cplusplus
- (void)processImage:(cv::Mat &)image{
Mat imageCopy,imageCopy2;
cvtColor(image, imageCopy, COLOR_BGRA2BGR);
cvtColor(imageCopy, imageCopy2, COLOR_BGR2HSV);
//smooth the image
GaussianBlur(imageCopy2, imageCopy, cv::Size(5,5),0, 0);
cv::inRange(imageCopy, cv::Scalar(0,0,0,0), cv::Scalar(180,255,30,0),
imageCopy2);
/*****************************find the contour of the detected area abd draw it***********************************/
//2-D point to store countour
std::vector< std::vector<cv::Point>> contour1;
//do opening on the binary thresholded image
int erosionSize = 3;
Mat erodeElement =
getStructuringElement(cv::MORPH_ELLIPSE,cv::Size(2*erosionSize+1,2* erosionSize+1), cv::Point(erosionSize,erosionSize));
erode(imageCopy2, imageCopy2, erodeElement);
dilate(imageCopy2, imageCopy2, erodeElement);
//Acual line to find the contour
cv::findContours(imageCopy2, contour1, RETR_EXTERNAL, CHAIN_APPROX_NONE);
//set the color used to draw the conotour
Scalar color1 = Scalar(50,50,50);
//loop the contour to draw the contour
for(int i=0; i< contour1.size(); i++){
drawContours(image, contour1, i, color1);
}
/******END*****/
/****************************find the contour of the detected area abd draw it***********************************/
/****************************Appproximate the contour to polygon && get bounded Rectangle and Circle*************/
std::vector<std::vector<cv::Point>> contours_poly(contour1.size());
std::vector<cv::Rect> boundedRect(contour1.size());
std::vector<cv::Point2f> circleCenter(contour1.size());
std::vector<float> circleRadius(contour1.size());
for (int i=0; i< contour1.size(); i++){
approxPolyDP(Mat(contour1[i]), contours_poly[i], 3, true);
boundedRect[i] = boundingRect(Mat(contours_poly[i]));
minEnclosingCircle((Mat)contours_poly[i], circleCenter[i], circleRadius[i]);
}
/******END*******/
/*****************************draw the rectangle for detected area ***********************************************/
Scalar recColor = Scalar(121,200,60);
Scalar fontColor = Scalar(0,0,225);
//find the largest contour
int largestContourIndex=0;
for (int i=0; i<contour1.size(); i++){
if(boundedRect[i].area()> boundedRect[largestContourIndex].area())
largestContourIndex=i;
}
int j=largestContourIndex;
if(boundedRect[j].area()>40){
rectangle(image, boundedRect[j].tl(), boundedRect[j].br(), recColor);
//show text at tl corner
cv::Point fontPoint = boundedRect[j].tl();
putText(image, "Black", fontPoint, FONT_HERSHEY_COMPLEX, 3, fontColor);
}
// cvtColor(imageCopy, image, COLOR_HLS2BGR);
}
#endif
Finally figure out why :
Just as #SolaWing said, there exist some null pointer. For future Viewers, just want to make it more clear:
Problem is at the following code:
if(boundedRect[j].area()>40){
rectangle(image, boundedRect[j].tl(), boundedRect[j].br(), recColor);
//show text at tl corner
cv::Point fontPoint = boundedRect[j].tl();
putText(image, "Black", fontPoint, FONT_HERSHEY_COMPLEX, 3, fontColor);
}
for this block of code, it already assume that there always exist detected areas, But Actually, when there is no target area in front of Phone Camera, the contour.size() is zero, that being said, for std::vector<cv::Rect> boundedRect(contour1.size()); boundedRect is null pointer, then there will be a problem when I use if(boundedRect[j].area()>40){}, which is using first pointer of the null pointer.
First of all sorry if question was asked. I am working on an app that can detect the corner of a document. I am right now using openCV to detect edge. I have achieved this using openCV but I am not getting the perfect result.
I have also tried the BradLarson GPUImage but I am able how to start with this.
My code that detect the corner of the document but not a perfect result.
void find_squares(Mat& image, cv::vector<cv::vector<cv::Point>>&squares)
{
// blur will enhance edge detection
Mat blurred(image);
//cv::resize(image, image, cvSize(0.25, 0.25));
Mat gray0(blurred.size(), CV_8U), gray;
//medianBlur(image, blurred, 9); //default 9;
GaussianBlur(image, blurred, cvSize(9, 9), 2.0,2.0);
vector<vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 4;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
}
So my questions are:
1) is there any other library that can do this.
2) is there any problem in above code? Should I add some image processing before detection?
3) Can BradLarson GPUImage do this? And if it can then are there sources of sample code for edge detection?
Hi currently i am working on an OCR reading app where i have successfully able to capture the card image by using AVFoundation framework.
For next step, i need to find out edges of the card , so that i can crop the card image from main captured image & later i can sent it to OCR engine for processing.
The main problem is now to find the edges of the card & i am using below code(taken from another open source project) which uses OpenCV for this purpose.It is working fine if the card is pure rectangular Card or Paper. But when i use a card with rounded corner (e.g Driving License), it is failed to detect . Also i dont have much expertise in OpenCV , Can any one help me in solving this issue?
- (void)detectEdges
{
cv::Mat original = [MAOpenCV cvMatFromUIImage:_adjustedImage];
CGSize targetSize = _sourceImageView.contentSize;
cv::resize(original, original, cvSize(targetSize.width, targetSize.height));
cv::vector<cv::vector<cv::Point>>squares;
cv::vector<cv::Point> largest_square;
find_squares(original, squares);
find_largest_square(squares, largest_square);
if (largest_square.size() == 4)
{
// Manually sorting points, needs major improvement. Sorry.
NSMutableArray *points = [NSMutableArray array];
NSMutableDictionary *sortedPoints = [NSMutableDictionary dictionary];
for (int i = 0; i < 4; i++)
{
NSDictionary *dict = [NSDictionary dictionaryWithObjectsAndKeys:[NSValue valueWithCGPoint:CGPointMake(largest_square[i].x, largest_square[i].y)], #"point" , [NSNumber numberWithInt:(largest_square[i].x + largest_square[i].y)], #"value", nil];
[points addObject:dict];
}
int min = [[points valueForKeyPath:#"#min.value"] intValue];
int max = [[points valueForKeyPath:#"#max.value"] intValue];
int minIndex;
int maxIndex;
int missingIndexOne;
int missingIndexTwo;
for (int i = 0; i < 4; i++)
{
NSDictionary *dict = [points objectAtIndex:i];
if ([[dict objectForKey:#"value"] intValue] == min)
{
[sortedPoints setObject:[dict objectForKey:#"point"] forKey:#"0"];
minIndex = i;
continue;
}
if ([[dict objectForKey:#"value"] intValue] == max)
{
[sortedPoints setObject:[dict objectForKey:#"point"] forKey:#"2"];
maxIndex = i;
continue;
}
NSLog(#"MSSSING %i", i);
missingIndexOne = i;
}
for (int i = 0; i < 4; i++)
{
if (missingIndexOne != i && minIndex != i && maxIndex != i)
{
missingIndexTwo = i;
}
}
if (largest_square[missingIndexOne].x < largest_square[missingIndexTwo].x)
{
//2nd Point Found
[sortedPoints setObject:[[points objectAtIndex:missingIndexOne] objectForKey:#"point"] forKey:#"3"];
[sortedPoints setObject:[[points objectAtIndex:missingIndexTwo] objectForKey:#"point"] forKey:#"1"];
}
else
{
//4rd Point Found
[sortedPoints setObject:[[points objectAtIndex:missingIndexOne] objectForKey:#"point"] forKey:#"1"];
[sortedPoints setObject:[[points objectAtIndex:missingIndexTwo] objectForKey:#"point"] forKey:#"3"];
}
[_adjustRect topLeftCornerToCGPoint:[(NSValue *)[sortedPoints objectForKey:#"0"] CGPointValue]];
[_adjustRect topRightCornerToCGPoint:[(NSValue *)[sortedPoints objectForKey:#"1"] CGPointValue]];
[_adjustRect bottomRightCornerToCGPoint:[(NSValue *)[sortedPoints objectForKey:#"2"] CGPointValue]];
[_adjustRect bottomLeftCornerToCGPoint:[(NSValue *)[sortedPoints objectForKey:#"3"] CGPointValue]];
}
original.release();
}
This naive implementation is based on some of the techniques demonstrated in squares.cpp, available in the OpenCV sample directory. The following posts also discuss similar applications:
OpenCV C++/Obj-C: Detecting a sheet of paper / Square Detection
Square detection doesn't find squares
Find corner of papers
#John, the code below has been tested with the sample image you provided and another one I created:
The processing pipeline starts with findSquares(), a simplification of the same function implemented by OpenCV's squares.cpp demo. This function converts the input image to grayscale and applies a blur to improve the detection of the edges (Canny):
The edge detection is good, but a morphological operation (dilation) is needed to join nearby lines:
After that we try to find the contours (edges) and assemble squares out of them. If we tried to draw all the detected squares on the input images, this would be the result:
It looks good, but it's not exactly what we are looking for since there are too many detected squares. However, the largest square is actually the card, so from here on it's pretty simple and we just figure out which of the squares is the largest. That's exactly what findLargestSquare() does.
Once we know the largest square, we simply paint red dots at the corners of the square for debugging purposes:
As you can see, the detection is not perfect but it seems good enough for most uses. This is not a robust solution and I only wanted to share one approach to solve the problem. I'm sure that there are other ways to deal with this that might be more interesting to you. Good luck!
#include <iostream>
#include <cmath>
#include <vector>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgproc/imgproc_c.h>
/* angle: finds a cosine of angle between vectors, from pt0->pt1 and from pt0->pt2
*/
double angle(cv::Point pt1, cv::Point pt2, cv::Point pt0)
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
/* findSquares: returns sequence of squares detected on the image
*/
void findSquares(const cv::Mat& src, std::vector<std::vector<cv::Point> >& squares)
{
cv::Mat src_gray;
cv::cvtColor(src, src_gray, cv::COLOR_BGR2GRAY);
// Blur helps to decrease the amount of detected edges
cv::Mat filtered;
cv::blur(src_gray, filtered, cv::Size(3, 3));
cv::imwrite("out_blur.jpg", filtered);
// Detect edges
cv::Mat edges;
int thresh = 128;
cv::Canny(filtered, edges, thresh, thresh*2, 3);
cv::imwrite("out_edges.jpg", edges);
// Dilate helps to connect nearby line segments
cv::Mat dilated_edges;
cv::dilate(edges, dilated_edges, cv::Mat(), cv::Point(-1, -1), 2, 1, 1); // default 3x3 kernel
cv::imwrite("out_dilated.jpg", dilated_edges);
// Find contours and store them in a list
std::vector<std::vector<cv::Point> > contours;
cv::findContours(dilated_edges, contours, cv::RETR_LIST, cv::CHAIN_APPROX_SIMPLE);
// Test contours and assemble squares out of them
std::vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional to the contour perimeter
cv::approxPolyDP(cv::Mat(contours[i]), approx, cv::arcLength(cv::Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 && std::fabs(contourArea(cv::Mat(approx))) > 1000 &&
cv::isContourConvex(cv::Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = std::fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
/* findLargestSquare: find the largest square within a set of squares
*/
void findLargestSquare(const std::vector<std::vector<cv::Point> >& squares,
std::vector<cv::Point>& biggest_square)
{
if (!squares.size())
{
std::cout << "findLargestSquare !!! No squares detect, nothing to do." << std::endl;
return;
}
int max_width = 0;
int max_height = 0;
int max_square_idx = 0;
for (size_t i = 0; i < squares.size(); i++)
{
// Convert a set of 4 unordered Points into a meaningful cv::Rect structure.
cv::Rect rectangle = cv::boundingRect(cv::Mat(squares[i]));
//std::cout << "find_largest_square: #" << i << " rectangle x:" << rectangle.x << " y:" << rectangle.y << " " << rectangle.width << "x" << rectangle.height << endl;
// Store the index position of the biggest square found
if ((rectangle.width >= max_width) && (rectangle.height >= max_height))
{
max_width = rectangle.width;
max_height = rectangle.height;
max_square_idx = i;
}
}
biggest_square = squares[max_square_idx];
}
int main()
{
cv::Mat src = cv::imread("cc.png");
if (src.empty())
{
std::cout << "!!! Failed to open image" << std::endl;
return -1;
}
std::vector<std::vector<cv::Point> > squares;
findSquares(src, squares);
// Draw all detected squares
cv::Mat src_squares = src.clone();
for (size_t i = 0; i < squares.size(); i++)
{
const cv::Point* p = &squares[i][0];
int n = (int)squares[i].size();
cv::polylines(src_squares, &p, &n, 1, true, cv::Scalar(0, 255, 0), 2, CV_AA);
}
cv::imwrite("out_squares.jpg", src_squares);
cv::imshow("Squares", src_squares);
std::vector<cv::Point> largest_square;
findLargestSquare(squares, largest_square);
// Draw circles at the corners
for (size_t i = 0; i < largest_square.size(); i++ )
cv::circle(src, largest_square[i], 4, cv::Scalar(0, 0, 255), cv::FILLED);
cv::imwrite("out_corners.jpg", src);
cv::imshow("Corners", src);
cv::waitKey(0);
return 0;
}
instead of "pure" rectangular blobs, try to go for nearly rectangular ones.
1- gaussian blur
2- grayscale and canny edge detection
3- extract all blobs (contours) in your image and filter out small ones. you will use findcontours and contourarea functions for that purpose.
4- using moments, filter out non-rectangular ones. First you need to check out moments of rectangle-like objects. You can do it by yourself or google it. Then list those moments and find similarity between objects, create your filter as such.
Ex: After test, say you found out central moment m30's are similar for rectangle-like objects -> filter out objects having inaccurate m30.
I know maybe it's too late for this post, but I am posting this in case it might help someone else.
The iOS Core Image framework already has a good tool to detect features such as rectangles (since iOS 5), faces, QR codes and even regions containing text in a still image. If you check out the CIDetector class you'll find what you need. I am using it for an OCR app too, it's super easy and very reliable compared to what you can do with OpenCV (I am not good with OpenCV, but the CIDetector gives much better results with 3-5 lines of code).
I don't know if it is an option, but you could have the user define the edges of it rather than trying to do it programatically.
Here is the out put of square-detection example my problem is filter this squares
first problem is its drawing one than more lines for same area;
second one is i just need to detect object not all image.
The other problem is i have to take just biggest object except all image.
Here is a code for detection:
static void findSquares( const Mat& image, vector >& squares ){
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for( int l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
gray = gray0 >= (l+1)*255/N;
}
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
if( approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if( maxCosine < 0.3 )
squares.push_back(approx);
}
}
}
}
}
You need to take a look at the flags for findContours(). You can set a flag called CV_RETR_EXTERNAL which will return only the outer-most contour (all contours inside of it are thrown away). This will probably return the entire frame, so you'll need to narrow down the search so that it doesnt check your frame boundaries. Use the function copyMakeBorder() to accomplish this. I would also recommend removing your dilate function as it is probably causing duplicate contours on either side of a line (you might not even need the border if you remove the dilate). Here is my output: