I am currently working on extracting Contours path attributes from a particular image file. I am able to extract Contours using Open CV function findContours() the output look like this
[98, 81][97, 80][95, 80][94, 79][93, 79][92, 78][91, 78][88, 75][87, 75][85, 73][84, 73][83, 72][82, 72]
But my desired output is look like this
M 398.7,106.8 c -5.5,-2.7 -20.7,-4.7 -36.1,-4.6 -15.4,0.1
How can I get it
This is my code:
using namespace cv;
using namespace std;
Mat src_grays;
int threshs = 100;
int max_threshs = 255;
RNG rng(12345);
void thresh_callbacks(int, void* );
void main( )
{
Mat src = imread( "F:/academic/pro4/t/download.jpg" );
imshow("real Image", src);
Mat gray,edge,edges, draw,draws;
Mat samples(src.rows * src.cols, 3, CV_32F);
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
for( int z = 0; z < 3; z++)
samples.at<float>(y + x*src.rows, z) = src.at<Vec3b>(y,x)[z];
int clusterCount = 5;
Mat labels;
int attempts = 10;
Mat centers;
kmeans(samples, clusterCount, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10000, 0.0001), attempts, KMEANS_PP_CENTERS, centers );
Mat new_image( src.size(), src.type() );
for( int y = 0; y < src.rows; y++ )
for( int x = 0; x < src.cols; x++ )
{
int cluster_idx = labels.at<int>(y + x*src.rows,0);
new_image.at<Vec3b>(y,x)[0] = centers.at<float>(cluster_idx, 0);
new_image.at<Vec3b>(y,x)[1] = centers.at<float>(cluster_idx, 1);
new_image.at<Vec3b>(y,x)[2] = centers.at<float>(cluster_idx, 2);
}
imshow( "clustered image", new_image );
char filename[80];
sprintf(filename,"F:/academic/pro4/t/seg.png");
imwrite(filename, new_image);
cvtColor(src, gray, CV_BGR2GRAY);
Canny( new_image, edges, 50, 150, 3);
edges.convertTo(draws, CV_8U);
namedWindow("imageAfterSegmnetation", CV_WINDOW_AUTOSIZE);
imshow("imagesAfterCluster", draws);
cvtColor( new_image, src_grays, CV_BGR2GRAY );
blur( src_grays, src_grays, Size(3,3) );
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &threshs, max_threshs, thresh_callbacks );
thresh_callbacks( 0, 0 );
waitKey( 0 );
}
void thresh_callbacks(int, void* )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( src_grays, canny_output, threshs, threshs*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
for(int i= 0; i < contours.size(); i++)
{
for(int j= 0; j < contours[i].size();j++) // run until j < contours[i].size();
{
int a= contours[i][j].x ;
int b =contours[i][j].y ;
// printf("Point(x,y)=" + a, b);
std::cout << contours[i][j] << std::endl;
}
printf ("%i", i + "\n");
}
/// Draw contours
int a=contours.size();
for( int i = 0; i<contours.size(); i++ )
{
Mat drawing_i = Mat::zeros( canny_output.size(), CV_8UC3 );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing_i, contours, i, color, 2, 8, hierarchy, 0, Point() );
namedWindow( "Contours_i", CV_WINDOW_AUTOSIZE );
imshow( "Contours_i", drawing_i );
}
}
Note:
I need Contours path, that mean how to contours connected for example it can be M = moveto L = lineto H = horizontal lineto V = vertical lineto C = curveto S = smooth curveto Q = quadratic Bézier curve T = smooth quadratic Bézier curveto A = elliptical Arc Z = closepath just like SVG path
Related
[original image][1]
[1]: https://i.stack.imgur.com/j7brr.jpg
I am trying to detect the clusters of connected boundaries in this image. I need to find the length of these edges and also the radius of gyration of the individual clusters.
I am using opencv 2.4.13.
I used the following code to detect the mass clusters using contours.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/** #function main */
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
/** #function thresh_callback */
void thresh_callback(int, void* )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Get the moments
vector<Moments> mu(contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mu[i] = moments( contours[i], false ); }
/// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
Mat drawing2 = Mat::zeros( canny_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{if(arcLength( contours[i], true )>900)
{Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );}
}
int length=0;
int j=0;
for( int i = 0; i< contours.size(); i++ )
{
if(arcLength( contours[i], true )>length)
{
length=arcLength( contours[i], true );
j=i;
}
}
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing2, contours, j, color, 2, 8, hierarchy, 0, Point() );
circle( drawing2, mc[j], 4, color, -1, 8, 0 );
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
namedWindow( "Contours2", CV_WINDOW_AUTOSIZE );
imshow( "Contours_max", drawing2 );
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function
printf("\t Info: Area and Contour Length \n");
for( int i = 0; i< contours.size(); i++ )
{
if(arcLength( contours[i], true )>900)
{printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );}
}
}
The problem is the contours are getting different for common sharing edges and logically they should be of the same cluster. The following contour image I am giving.
We can see that many contours having same sharing edges are taken separately as different contours. I want them as a part of the same boundary clusters. Also suggest me how to detect the length of the boundaries and the radius of gyration.
Please help.
I am incredibly confused by your question (would ask for clarification in a comment, but I am too noob to comment)
My only advice based on what I see and understand is that you may not want to be using a canny filter. To be clear, your original image already has edges... running a canny filter on that gives you "double edges" which i do not think you want, but again, I am not even sure what you are trying to achieve.
I have a problem with the execution of my code.
I am trying to make this work:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
int main( )
{
Mat src1,src2;
namedWindow( "corner", CV_WINDOW_AUTOSIZE );
namedWindow( "result", CV_WINDOW_AUTOSIZE );
src1 = imread("RGB_32.png", CV_LOAD_IMAGE_COLOR);
src2 = imread("RGB_33.png", CV_LOAD_IMAGE_COLOR);
//*********************************************************************************
Mat im1,im2;
cvtColor(src1, im1, CV_RGB2GRAY);
cvtColor(src2, im2, CV_RGB2GRAY);
//******************ReduceImageSizeAndDetectCorner**********************************
Mat im2c,
tmp = im2;
pyrDown(tmp,im2c,Size( tmp.cols/2, tmp.rows/2));
Mat dst = Mat::zeros( im2c.size(), CV_32FC1 );
int blockSize = 3;
int apertureSize = 3;
double c = 0.09;
cornerHarris(im2c,dst,blockSize, apertureSize, c, BORDER_DEFAULT);
Mat dst_norm, dst_norm_scaled;
normalize( dst, dst_norm, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
convertScaleAbs( dst_norm, dst_norm_scaled );
int w=23,nb_pt=0,l=0, alpha=143;
/// Drawing a circle around corners
for( int j = 0+w; j < dst_norm.rows-w ; j++ ){
for( int i = 0+w; i < dst_norm.cols-w; i++ ){
if( (int) dst_norm.at<float>(j,i) > alpha )
{
circle( im2c, Point( i, j ), 0.5, Scalar(0,0,255), 4, 8, 0 );
nb_pt ++ ;
}
}
}
Mat C= Mat:: zeros(nb_pt,2,CV_32FC1) ;
for( int j = 0+w; j < dst_norm.rows-w ; j++ ){
for( int i = 0+w; i < dst_norm.cols-w; i++ ){
if( (int) dst_norm.at<float>(j,i) > alpha ){
C.at <float> (l,0)=j;
C.at <float> (l,1)=i;
l++;
}
}
}
C=2*C;
//******************ImplementLucas&KanadeMethod**************************************
Mat1f Cx,Cy;
Mat Ix_m,Iy_m,It_m,It1,It2;
Cx = (Mat_<float>(2,2) << -1,1,-1,1);
Cy= (Mat_<float>(2,2) << -1,-1,1,1);
Mat Ct1 = (Mat_<float>(2,2) << -1,-1,-1,-1);
Mat Ct2 = Mat::ones( 2, 2, CV_8U );
filter2D(im1,Ix_m,-1,Cx,Point(-1,-1),0,BORDER_DEFAULT);
filter2D(im1,Iy_m,-1,Cy,Point(-1,-1),0,BORDER_DEFAULT);
filter2D(im1,It1,-1,Ct1,Point(-1,-1),0,BORDER_DEFAULT);
filter2D(im1,It2,-1,Ct2,Point(-1,-1),0,BORDER_DEFAULT);
add(It1,It2,It_m);
//initialiser le flot
//Mat u = Mat::zeros( 1, nb_pt, CV_32FC1 );
//Mat v = Mat::zeros( 1, nb_pt, CV_32FC1 );
Mat Ix,Ixd,Iy,Iyd,It,Itd,b,nu,A;
int u,v ;
cv::Scalar color(0,0,255);
int size = 10 ;
int thickness = 10 ;
for (int k=0 ; k < nb_pt ; ++k) {
int j= C.at <float> (k,0);
int i= C.at <float> (k,1);
Ix= Ix_m(Range(j-w,j+w),Range(i-w,i+w));
Iy= Iy_m(Range(j-w,j+w),Range(i-w,i+w));
It= It_m(Range(j-w,j+w),Range(i-w,i+w));
redim(Ix,Ixd);
redim(Iy,Iyd);
redim(It,Itd);
multi(Itd,b);
funct(Ixd,Iyd,A);
Mat inv = A.inv(DECOMP_SVD);
nu = inv * b ;
u= nu.at<float> (0,0);
v= nu.at<float> (0,1);
//cout << "u = "<< u << endl ;
//cout << "v = "<< v << endl ;
cvQuiver(src2,i,j,u,v,color, size, thickness);
}
imshow( "result", src2 );*/
waitKey();
}
I have a problem with the for loop. When k = 2, I get the error "core dumped". But, when I run the code case by case, that is to say I take the case k=0 then k=1,2,..., it works.
Any help please? when k=2
I need to draw rectangle with 2 largest object from webcam. I already got to draw contours with 2 largest object from webcam but now i confuse in how to draw 2 largest Rectangle.
Someone can show me the code Please~
//find and draw contours
void showconvex(Mat &thresh,Mat &frame) {
int largestIndex = 0;
int largestContour = 0;
int secondLargestIndex = 0;
int secondLargestContour = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
//find contours
findContours(thresh, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
/// Find the convex hull object for each contour
vector<vector<Point> >hull(contours.size());
vector<vector<int> >inthull(contours.size());
vector<vector<Vec4i> >defects(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(Mat(contours[i]), hull[i], false);
convexHull(Mat(contours[i]),inthull[i], false);
if (inthull[i].size()>3)
convexityDefects(contours[i], inthull[i], defects[i]);
}
//find 2 largest contour
for( int i = 0; i< contours.size(); i++ )
{
if(contours[i].size() > largestContour)
{
secondLargestContour = largestContour;
secondLargestIndex = largestIndex;
largestContour = contours[i].size();
largestIndex = i;
}
else if(contours[i].size() > secondLargestContour)
{
secondLargestContour = contours[i].size();
secondLargestIndex = i;
}
}
//show contours of 2 biggest and hull as well
if(contours.size()>0)
{
//check for contouraea function if error occur
//draw the 2 largest contour using previously stored index.
drawContours(frame, contours, largestIndex, CV_RGB(0,255,0), 2, 8, hierarchy);
drawContours(frame, contours, secondLargestIndex, CV_RGB(0,255,0), 2, 8, hierarchy);
}
}
take a look at the code below
based on sorting contours by bounding boxes or by areas.
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std;
struct contour_sorter_dsc // sorts contours by their bounding boxes descending
{
bool operator ()( const vector<Point>& a, const vector<Point> & b )
{
Rect ra( boundingRect(a) );
Rect rb( boundingRect(b) );
return ( ( rb.width * rb.height ) < ( ra.width * ra.height ) );
}
};
struct contour_sorter_dsc_area // sorts contours by their areas descending
{
bool operator ()( const vector<Point>& a, const vector<Point> & b )
{
double area_a = contourArea( a );
double area_b = contourArea( b );
return ( area_b < area_a );
}
};
int main( int argc, char** argv )
{
Mat src = imread( argv[1] );
if( src.empty() )
{
return -1;
}
Mat canvas1 = src.clone();
Mat canvas2 = src.clone();
Mat gray;
cvtColor( src, gray, COLOR_BGR2GRAY );
gray = gray > 127; // binarize image
vector<vector<Point> > contours;
findContours( gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE );
sort(contours.begin(), contours.end(), contour_sorter_dsc());
for( size_t i = 0; i< 2; i++ )
{ // checks if the first contour is image boundary
if( contours[0][0] == Point( 1, 1 ) & contours[0][1] == Point( 1, gray.rows -2 )
& contours[0][2] == Point( gray.cols - 2, gray.rows -2 ) & contours[0][3] == Point( gray.cols - 2, 1 ) )
{
contours[0] = contours[1];
contours[1] = contours[2];
}
if( i < contours.size())
{
drawContours( canvas1, contours, i, Scalar( 255,255,0 ) );
Rect minRect = boundingRect( Mat(contours[i]) );
rectangle( canvas1, minRect, Scalar( 0, 0, 255 ) );
}
}
imshow( "result of sorting contours by bounding boxes ", canvas1 );
sort(contours.begin(), contours.end(), contour_sorter_dsc_area());
for( size_t i = 0; i< 2; i++ )
{ // checks if the first contour is image boundary
if( contours[0][0] == Point( 1, 1 ) & contours[0][1] == Point( 1, gray.rows -2 )
& contours[0][2] == Point( gray.cols - 2, gray.rows -2 ) & contours[0][3] == Point( gray.cols - 2, 1 ) )
{
contours[0] = contours[1];
contours[1] = contours[2];
}
if( i < contours.size())
{
drawContours( canvas2, contours, i, Scalar( 255,255,0 ) );
Rect minRect = boundingRect( Mat(contours[i]) );
rectangle( canvas2, minRect, Scalar( 0, 0, 255 ) );
}
}
imshow( "result of sorting contours by areas ", canvas2 );
waitKey();
return 0;
}
Input image
Result Images according sort type
OpenCV 2.4.5, CUDA 5.0
I tried to transfer my SURF matcher from the CPU to the GPU and got such a strange result. I use knnMatch and findHomography + perspectiveTransform together with my function, which checks the corners of the bounding box for the result to more precision.
GPU part:
const int baseImagesSize = baseImages.size();
SURF_GPU surf(1500);
surf.extended = false;
GpuMat keypoints_test_GPU, descriptors_test_GPU;
surf(frame, GpuMat(), keypoints_test_GPU, descriptors_test_GPU);
vector<float> descriptors_test_CPU;
surf.downloadDescriptors(descriptors_test_GPU, descriptors_test_CPU);
Mat descriptors_test_CPU_Mat(descriptors_test_CPU);
vector<Point2f> objs_corners(4);
BruteForceMatcher_GPU< L2<float> > matcher;
vector<KeyPoint> keypoints_test_CPU;
surf.downloadKeypoints(keypoints_test_GPU, keypoints_test_CPU);
for (int i = 0; i < baseImagesSize; ++i)
{
//Get the corners from the object
objs_corners[0] = cvPoint(0,0);
objs_corners[1] = cvPoint( baseImages[i].cols, 0 );
objs_corners[2] = cvPoint( baseImages[i].cols, baseImages[i].rows );
objs_corners[3] = cvPoint( 0, baseImages[i].rows );
//cout<<endl<<objs_corners[0]<<" "<<objs_corners[1]<<" "<<objs_corners[2]<<" "<<objs_corners[3]<<endl;
GpuMat keypoints_tmp_GPU, descriptors_tmp_GPU;
surf(baseImages[i], GpuMat(), keypoints_tmp_GPU, descriptors_tmp_GPU);
GpuMat trainIdx, distance;
vector< vector<DMatch> > matches;
matcher.knnMatch(descriptors_test_GPU, descriptors_tmp_GPU, matches, 2);
vector<KeyPoint> keypoints_tmp_CPU;
surf.downloadKeypoints(keypoints_tmp_GPU, keypoints_tmp_CPU);
std::vector<DMatch > good_matches;
for(int k = 0; k < min(descriptors_test_CPU_Mat.rows-1,(int) matches.size()); k++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[k][0].distance < 0.6*(matches[k][1].distance)) && ((int) matches[k].size()<=2 && (int) matches[k].size()>0))
{
good_matches.push_back(matches[k][0]);
}
}
vector<Point2f> obj;
vector<Point2f> scene;
vector<Point2f> scene_corners(4);
Mat H;
Mat img (baseImages[i]), img_matches, frame_cpu (frame);
std::ostringstream o_stream;
o_stream<<"Logo_save/"<<baseImagesNames[i];
try
{
drawMatches( img, keypoints_tmp_CPU, frame_cpu, keypoints_test_CPU, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
imwrite(o_stream.str(),img_matches);
}
catch(...)
{
cout<<"Error in drawMatches name: "<< baseImagesNames[i]<<endl;
}
if (good_matches.size() >= 4)
{
for( int k = 0; k < good_matches.size(); k++ )
{
//Get the keypoints from the good matches
obj.push_back( (keypoints_tmp_CPU)[ good_matches[k].queryIdx ].pt );
scene.push_back( keypoints_test_CPU[ good_matches[k].trainIdx ].pt );
}
cout<<good_matches.size()<<" "<<baseImagesNames[i]<<endl;
H = findHomography( obj, scene, CV_RANSAC);
perspectiveTransform( objs_corners, scene_corners, H);
bool falseDetect = isSmallAngle(scene_corners);
//cout<< falseDetect<< endl;
if(!falseDetect)
{
cout<<"DETECT "<<baseImagesNames[i]<<endl;
}
}
matcher.clear();
}
Bad result on GPU (MIN_HESSIAN==1500):
Bad result on GPU (MIN_HESSIAN==400):
CPU part:
SurfFeatureDetector detector( MIN_HESSIAN );//MIN_HESSIAN==400
const int baseImagesSize = baseImages.size();
vector< vector<KeyPoint> > kp_objects(baseImagesSize);
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
vector<Mat> des_objects(baseImagesSize);
FlannBasedMatcher matcher;
//namedWindow("SURF feature detector");
vector< vector<Point2f> > objs_corners(baseImagesSize,vector<Point2f>(4));
for (int i = 0; i < baseImagesSize; ++i)
{
detector.detect(baseImages[i], kp_objects[i]);
extractor.compute(baseImages[i], kp_objects[i], des_objects[i]);
//Get the corners from the object
(objs_corners[i])[0] = cvPoint(0,0);
(objs_corners[i])[1] = cvPoint( baseImages[i].cols, 0 );
(objs_corners[i])[2] = cvPoint( baseImages[i].cols, baseImages[i].rows );
(objs_corners[i])[3] = cvPoint( 0, baseImages[i].rows );
}
Mat des_image;
std::vector<KeyPoint> kp_image;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
for (int i = 0; i < baseImagesSize; ++i)
{
Mat img_matches;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
matcher.knnMatch(des_objects[i], des_image, matches, 2);
for(int k = 0; k < min(des_image.rows-1,(int) matches.size()); k++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[k][0].distance < 0.6*(matches[k][4].distance)) && ((int) matches[k].size()<=2 && (int) matches[k].size()>0))
{
good_matches.push_back(matches[k][0]);
}
}
//Draw only "good" matches
std::ostringstream o_stream;
o_stream<<"Logo_save/"<<baseImagesNames[i];
try
{
drawMatches( baseImages[i], kp_objects[i], image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
imwrite(o_stream.str(),img_matches);
}
catch(...)
{
cout<<"Error in drawMatches name: "<< baseImagesNames[i]<<endl;
}
if (good_matches.size() >= 4)
{
for( int k = 0; k < good_matches.size(); k++ )
{
//Get the keypoints from the good matches
obj.push_back( (kp_objects[i])[ good_matches[k].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[k].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC);
perspectiveTransform( objs_corners[i], scene_corners, H);
bool falseDetect = isSmallAngle(scene_corners);
if(!falseDetect)
{
cout<<"DETECT "<<baseImagesNames[i]<<endl;
}
}
}
Good result on CPU (MIN_HESSIAN==400):
Problem solved here topic
Swap parameters in drawMatches call:
drawMatches(frame_cpu, keypoints_test_CPU, img, keypoints_tmp_CPU, ...);
Swap keypoints_tmp_CPU and keypoints_test_CPU:
obj.push_back( keypoints_test_CPU[ good_matches[k].trainIdx ].pt );
scene.push_back( (keypoints_tmp_CPU)[ good_matches[k].queryIdx ].pt );
Does anyone know how to use the cvFindDominantPoints API of openCV? I basically have a 1 channel, binary image from which I get a set of contours. Judging from the image, I seem to be getting the correct contours. Now, I am selecting one of these contours to get dominant points of. This contour has about 60 vertices. However, the API call to cvFindDominantPoints is giving me a sequence of points (about 15) that does not even lie on the contour. It is quite far from it. Any insight?
my usage:
CvSeq *dominantpoints = cvFindDominantPoints(targetSeq, tristorage, CV_DOMINANT_IPAN, 7, 9, 9, 150);
#include "cv.h"
#include "highgui.h"
CvSeq* contours = 0;
CvSeq* dps = 0;
int main( int argc, char** argv )
{
int i, idx;
CvPoint p;
CvMemStorage* storage_ct = cvCreateMemStorage(0);
CvMemStorage* storage_dp = cvCreateMemStorage(0);
IplImage* img = cvLoadImage("contour.bmp", CV_LOAD_IMAGE_GRAYSCALE);
cvNamedWindow( "image" );
cvShowImage( "image", img );
cvFindContours( img, storage_ct, &contours, sizeof(CvContour),
CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE );
dps = cvFindDominantPoints( contours, storage_dp, CV_DOMINANT_IPAN, 7, 20, 9, 150 );
contours = cvApproxPoly( contours, sizeof(CvContour), storage_ct, CV_POLY_APPROX_DP, 3, 1 );
printf("found %d DPs and %d Contours \n", dps->total, contours->total );
for ( i = 0; i < dps->total; i++)
{
idx = *(int *) cvGetSeqElem(dps, i);
p = *(CvPoint *) cvGetSeqElem(contours, idx);
cvDrawCircle( img, p , 1, cvScalarAll(255) );
printf("%d %d %d\n", idx, p.x, p.y);
}
cvDrawContours(img, contours, cvScalarAll(100), cvScalarAll(200), 100 );
cvNamedWindow( "contours" );
cvShowImage( "contours", img );
cvWaitKey(0);
cvReleaseMemStorage( &storage_ct );
cvReleaseMemStorage( &storage_dp );
cvReleaseImage( &img );
return 0;
}