Extracting the fingers from the hand - opencv

I am using this code to *extract the finger*s when users show their hands to the camera.
The following error is being thrown after inserting these lines to find the convexity.
if (contours[i].size() >3 )
{
cout<<"inside"<<endl;
convexityDefects(contours[i], hull[i], defects[i]);
}
And the error is :
inside
OpenCV Error: Assertion failed (hull.checkVector(1, CV_32S) > 2) in convexityDefects, file C:/slave/builds/WinInstallerMegaPack/src/opencv/modules/imgproc/src/contours.cpp, line 1971
This application has requested the Runtime to terminate it in an unusual way.
Please contact the application's support team for more information.
terminate called after throwing an instance of 'cv::Exception'
what(): C:/slave/builds/WinInstallerMegaPack/src/opencv/modules/imgproc/src/contours.cpp:1971: error: (-215) hull.checkVector(1, CV_32S) > 2 in function convexityDefects
Can anybody help me to resolve this issue?
Code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 147;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/** #function main */
int main( int argc, char** argv )
{
src = imread( "D:\\a.jpg", 1 );
/// Convert image to gray and blur it
resize(src,src,Size(640,480),0,0,INTER_LINEAR);
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
char* source_window = "Knuckle Extractor";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
// createTrackbar( " Threshold:", source_window, &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
/** #function thresh_callback */
void thresh_callback(int, void* )
{
Mat src_copy = src.clone();
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY||CV_THRESH_OTSU );
// imshow("Grey",src_gray);
imshow("Threshold",threshold_output);
/// Find contours
// findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
//findContours( threshold_output, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, Point(0, 0) );
findContours( threshold_output, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Find the convex hull object for each contour
vector<vector<Point> >hull( contours.size() );
vector<vector<Vec4i> >defects( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
convexHull( Mat(contours[i]), hull[i], false );
if (contours[i].size() >3 )
{
cout<<"inside"<<endl;
convexityDefects(contours[i], hull[i], defects[i]);
}
}
/// Draw contours + hull results
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
}
/// Show in a window
namedWindow( "Result", CV_WINDOW_AUTOSIZE );
imshow( "Result", drawing );
}

The problem (or at least the main one I can see), is the way you construct your contours. You should use:
vector<vector<int> >hull( contours.size() );
Instead of:
vector<vector<Point> >hull( contours.size() );
This is because the convexityDefects function only works on convex hulls represented by a series of indices rather than a series of points.

Related

detecting edges(connected edges) and finding edge length and connected component Radius of gyration

[original image][1]
[1]: https://i.stack.imgur.com/j7brr.jpg
I am trying to detect the clusters of connected boundaries in this image. I need to find the length of these edges and also the radius of gyration of the individual clusters.
I am using opencv 2.4.13.
I used the following code to detect the mass clusters using contours.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/** #function main */
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
/** #function thresh_callback */
void thresh_callback(int, void* )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Get the moments
vector<Moments> mu(contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mu[i] = moments( contours[i], false ); }
/// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
Mat drawing2 = Mat::zeros( canny_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{if(arcLength( contours[i], true )>900)
{Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );}
}
int length=0;
int j=0;
for( int i = 0; i< contours.size(); i++ )
{
if(arcLength( contours[i], true )>length)
{
length=arcLength( contours[i], true );
j=i;
}
}
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing2, contours, j, color, 2, 8, hierarchy, 0, Point() );
circle( drawing2, mc[j], 4, color, -1, 8, 0 );
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
namedWindow( "Contours2", CV_WINDOW_AUTOSIZE );
imshow( "Contours_max", drawing2 );
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function
printf("\t Info: Area and Contour Length \n");
for( int i = 0; i< contours.size(); i++ )
{
if(arcLength( contours[i], true )>900)
{printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );}
}
}
The problem is the contours are getting different for common sharing edges and logically they should be of the same cluster. The following contour image I am giving.
We can see that many contours having same sharing edges are taken separately as different contours. I want them as a part of the same boundary clusters. Also suggest me how to detect the length of the boundaries and the radius of gyration.
Please help.
I am incredibly confused by your question (would ask for clarification in a comment, but I am too noob to comment)
My only advice based on what I see and understand is that you may not want to be using a canny filter. To be clear, your original image already has edges... running a canny filter on that gives you "double edges" which i do not think you want, but again, I am not even sure what you are trying to achieve.

how to get bounding rectangle on both source and contour window?

I am using this link to get bound rectangle for contour
I want to know how can I get the same rectangle on source window, which is detected in the contour window.
Thank you very much in advance.
Create your polygons and circles in main( int argc, char** argv ) function.
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, drawing );
3.Comment
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
Answered here. Code by sturkmen:
/**
* #function generalContours_demo1.cpp
* #brief Demo code to find contours in an image
* #author OpenCV team
*/
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/**
* #function main
*/
int main( int, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
const char* source_window = "Source";
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
/**
* #function thresh_callback
*/
void thresh_callback(int, void* )
{
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Approximate contours to polygons + get bounding rects and circles
vector<vector<Point> > contours_poly( contours.size() );
vector<Rect> boundRect( contours.size() );
vector<Point2f>center( contours.size() );
vector<float>radius( contours.size() );
for( size_t i = 0; i < contours.size(); i++ )
{ approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
minEnclosingCircle( contours_poly[i], center[i], radius[i] );
}
/// Draw polygonal contour + bonding rects + circles
Mat src2 = src.clone(); //add this line
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( size_t i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours_poly, (int)i, color, 1, 8, vector<Vec4i>(), 0, Point() );
rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0 );
circle( drawing, center[i], (int)radius[i], color, 2, 8, 0 );
drawContours( src2, contours_poly, (int)i, color, 1, 8, vector<Vec4i>(), 0, Point() );//add this line
rectangle( src2, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0 );//add this line
circle( src2, center[i], (int)radius[i], color, 2, 8, 0 );//add this line
}
/// Show in a window
namedWindow( "Contours", WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
imshow( "Source", src2 );//add this line
}

Opencv Surf Bug error

Anyone familiar with this error? I tested a surf descriptor in real-time and it worked well but after few seconds it crashes and I got this error.
It was related when no points were detected. I run my code again and have the detected object stays for more than 2 mins and still no error. but when I removed the object and there were no points, it crashes again after 40 secs.
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/legacy/legacy.hpp"
using namespace cv;
using namespace std;
char key = 'a';
int framecount = 0;
SurfFeatureDetector detector(1000);
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;
Mat frame, des_object, image;
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
int main()
{
//reference image
Mat object = imread("D:/milo.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//compute detectors and descriptors of reference image
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
//create video capture object
CvCapture* capture = cvCaptureFromCAM(0);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 270);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 190);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
//wile loop for real time detection
while (key != 27)
{
Mat frame;
frame = cvQueryFrame(capture);
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
int goodMatchesCounter =0;
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if(((int)matches[i].size()<=2 && (int)matches[i].size()>0) && (matches[i}
[0].distance<0.6*(matches[i][1].distance)))
{
// good_matches.push_back(matches[i][0]);
obj.push_back( kp_object[ matches[i][0].queryIdx ].pt );
scene.push_back( kp_image[ matches[i][0].trainIdx ].pt );
goodMatchesCounter++;
}
}
//Draw only "good" matches
// drawMatches( object, kp_object, image, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (goodMatchesCounter >= 4)
{
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( image, scene_corners[0], scene_corners[1], Scalar( 0, 0, 0), 4 );
line( image, scene_corners[1], scene_corners[2], Scalar( 0, 0, 0),
4 );
line( image, scene_corners[2], scene_corners[3], Scalar( 0, 0, 0),
4 );
line( image, scene_corners[3], scene_corners[0], Scalar( 0, 0, 0),
4 );
}
//Show detected matches
imshow( "Good Matches", image );
key = waitKey(1);
}
return 0;
}

Fill Color on ConvexHull OpenCV

Everyone, please help me. I have a problem.
I make a Convex Hull detection from image.
Then, i have a problem to fill a color on area within the boundary of ConvexHull.
Is there someone who can help me to fill a color on area within the boundary of ConvexHull?
Please help me with implementation on source code.
I use OpenCV 2.3.
This is a source code:
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 240;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/** #function main */
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
src = imread( "kubis1.jpg", 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
/** #function thresh_callback */
void thresh_callback(int, void* )
{
Mat src_copy = src.clone();
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the convex hull object for each contour
vector<vector<Point> >hull( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ convexHull( Mat(contours[i]), hull[i], false );
}
/// Draw contours + hull results
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
drawContours( drawing, hull, i, color, 1, 8, vector<Vec4i>(), 0, Point() );
}
/// Show in a window
namedWindow( "Hull demo", CV_WINDOW_AUTOSIZE );
imshow( "Hull demo", drawing );
}
You should use drawContours function with negative ’thickness’ parameter:
The function draws contour outlines in the image if thickness >= 0 or
fills the area bounded by the contours if thickness <= 0

openCV usage cvFindDominantPoints

Does anyone know how to use the cvFindDominantPoints API of openCV? I basically have a 1 channel, binary image from which I get a set of contours. Judging from the image, I seem to be getting the correct contours. Now, I am selecting one of these contours to get dominant points of. This contour has about 60 vertices. However, the API call to cvFindDominantPoints is giving me a sequence of points (about 15) that does not even lie on the contour. It is quite far from it. Any insight?
my usage:
CvSeq *dominantpoints = cvFindDominantPoints(targetSeq, tristorage, CV_DOMINANT_IPAN, 7, 9, 9, 150);
#include "cv.h"
#include "highgui.h"
CvSeq* contours = 0;
CvSeq* dps = 0;
int main( int argc, char** argv )
{
int i, idx;
CvPoint p;
CvMemStorage* storage_ct = cvCreateMemStorage(0);
CvMemStorage* storage_dp = cvCreateMemStorage(0);
IplImage* img = cvLoadImage("contour.bmp", CV_LOAD_IMAGE_GRAYSCALE);
cvNamedWindow( "image" );
cvShowImage( "image", img );
cvFindContours( img, storage_ct, &contours, sizeof(CvContour),
CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE );
dps = cvFindDominantPoints( contours, storage_dp, CV_DOMINANT_IPAN, 7, 20, 9, 150 );
contours = cvApproxPoly( contours, sizeof(CvContour), storage_ct, CV_POLY_APPROX_DP, 3, 1 );
printf("found %d DPs and %d Contours \n", dps->total, contours->total );
for ( i = 0; i < dps->total; i++)
{
idx = *(int *) cvGetSeqElem(dps, i);
p = *(CvPoint *) cvGetSeqElem(contours, idx);
cvDrawCircle( img, p , 1, cvScalarAll(255) );
printf("%d %d %d\n", idx, p.x, p.y);
}
cvDrawContours(img, contours, cvScalarAll(100), cvScalarAll(200), 100 );
cvNamedWindow( "contours" );
cvShowImage( "contours", img );
cvWaitKey(0);
cvReleaseMemStorage( &storage_ct );
cvReleaseMemStorage( &storage_dp );
cvReleaseImage( &img );
return 0;
}

Resources