cvFindContours Always return 1 - opencv

I am using OpenCV 2.4.6 and Visual C++ 2010. I am trying to track RED color in the below code. I'm using cvInRangeS to get the RED object from the image and that image is a binary image. Then using cvFindContours(..) I want to find the contours of the object.But cvFindcontours(..) always returning 1. I am also using cvBoundingRect(..) to get the contour's position in the image. The rectangle position i am getting is always (1,1,638,478). Means the cvFindcontour(..) is taking whole image as a single contour. How can I get only the RED objects in contour?
int main()
{
IplImage* mCVImageColor = cvCreateImageHeader(cvSize(640,480), IPL_DEPTH_8U, 3);
IplImage* imgTracking;
CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
namedWindow( "Display image", CV_WINDOW_AUTOSIZE );
namedWindow( "Display window", CV_WINDOW_AUTOSIZE );
namedWindow( "Display tracking", CV_WINDOW_AUTOSIZE );
namedWindow( "Display thresh", CV_WINDOW_AUTOSIZE );
CvRect rect;
CvMemStorage *storage = cvCreateMemStorage(0);
CvSeq *contours=0;
while(1)
{
IplImage* mCVImageColor = cvQueryFrame( capture );
cvSmooth(mCVImageColor, mCVImageColor, CV_GAUSSIAN,3,3);
imgTracking=cvCreateImage(cvGetSize(mCVImageColor),IPL_DEPTH_8U, 1);
IplImage* imgHSV = cvCreateImage(cvGetSize(mCVImageColor), IPL_DEPTH_8U, 3);
IplImage* imgThresh=cvCreateImage(cvGetSize(mCVImageColor),8, 1);
cvCvtColor(mCVImageColor, imgHSV, CV_BGR2HSV);
cvInRangeS(imgHSV, cvScalar(170,160,60), cvScalar(180,255,256), imgThresh);
cvAdd(imgThresh, imgTracking, imgTracking);
cvConvertScale(imgTracking, imgTracking, 1.0, 0.0);
int x = cvFindContours(imgTracking,storage,&contours,sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(2, 2));
printf("%d\n",contours->total);
for (; contours != 0; contours = contours->h_next)
{
rect = cvBoundingRect(contours); //extract bounding box for current contour
//drawing rectangle
cvRectangle(mCVImageColor,cvPoint(rect.x, rect.y),cvPoint(rect.x+rect.width, rect.y+rect.height),cvScalar(0, 0, 255),2, 8, 0);
printf("%d %d %d %d\n",rect.x, rect.y,rect.width,rect.height);
if(rect.width*rect.height<1400)
{
CvPoint centroid[1];
centroid[0].x = cvRound((rect.x+rect.width)/2);
centroid[0].y = cvRound((rect.y+rect.height)/2);
cvCircle( mCVImageColor, centroid[0], 5, CV_RGB(255, 0, 0),10, -1);
}
}
cvShowImage("Display tracking",imgTracking);
cvShowImage("Display window",imgHSV);
cvShowImage("Display image",mCVImageColor);
cvShowImage("Display thresh",imgThresh);
cvClearMemStorage( storage );
contours = 0;
//cvReleaseImage(&imgHSV);
//cvReleaseImage(&imgThresh);
int c = waitKey(20);
if(c=='q')
break;
}
}

Related

detecting edges(connected edges) and finding edge length and connected component Radius of gyration

[original image][1]
[1]: https://i.stack.imgur.com/j7brr.jpg
I am trying to detect the clusters of connected boundaries in this image. I need to find the length of these edges and also the radius of gyration of the individual clusters.
I am using opencv 2.4.13.
I used the following code to detect the mass clusters using contours.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/** #function main */
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
/** #function thresh_callback */
void thresh_callback(int, void* )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Get the moments
vector<Moments> mu(contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mu[i] = moments( contours[i], false ); }
/// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
Mat drawing2 = Mat::zeros( canny_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{if(arcLength( contours[i], true )>900)
{Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );}
}
int length=0;
int j=0;
for( int i = 0; i< contours.size(); i++ )
{
if(arcLength( contours[i], true )>length)
{
length=arcLength( contours[i], true );
j=i;
}
}
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing2, contours, j, color, 2, 8, hierarchy, 0, Point() );
circle( drawing2, mc[j], 4, color, -1, 8, 0 );
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
namedWindow( "Contours2", CV_WINDOW_AUTOSIZE );
imshow( "Contours_max", drawing2 );
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function
printf("\t Info: Area and Contour Length \n");
for( int i = 0; i< contours.size(); i++ )
{
if(arcLength( contours[i], true )>900)
{printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );}
}
}
The problem is the contours are getting different for common sharing edges and logically they should be of the same cluster. The following contour image I am giving.
We can see that many contours having same sharing edges are taken separately as different contours. I want them as a part of the same boundary clusters. Also suggest me how to detect the length of the boundaries and the radius of gyration.
Please help.
I am incredibly confused by your question (would ask for clarification in a comment, but I am too noob to comment)
My only advice based on what I see and understand is that you may not want to be using a canny filter. To be clear, your original image already has edges... running a canny filter on that gives you "double edges" which i do not think you want, but again, I am not even sure what you are trying to achieve.

Why Contour area only return 0?

I'm trying to find the contour area, but contour area only returns 0
whatever the contour is.
no error shows.
void CMFC_DEMODlg::OnBnClickedRun()
{
CString str;
IplImage* src1 = cvLoadImage("onlyone001.JPG",1);
IplImage* src2 = cvLoadImage("b004.JPG",1);
IplImage* grey = cvCreateImage( cvGetSize(src2), 8, 1 );
IplImage* dst = cvCreateImage( cvGetSize(src2), 8, 3 );
IplImage* F = cvCreateImage( cvGetSize(src2), 8, 1 );
IplImage* W = cvCreateImage( cvGetSize(src2), 8, 1 );
cvCvtColor( src2, grey, CV_BGR2GRAY);
cvThreshold( grey, W,200,255,CV_THRESH_BINARY);
CvSeq* c;
CvSeq* contour;
CvSeq* result;
CvMemStorage* storage = cvCreateMemStorage(0);
int Nc = cvFindContours(
W,
storage,
&contour,
sizeof(CvContour),
CV_RETR_LIST,
CV_CHAIN_APPROX_SIMPLE,
cvPoint(0,0));
double Area;
result = cvApproxPoly(contour, sizeof(CvContour), storage,
CV_POLY_APPROX_DP, cvContourPerimeter(contour)*0.02, 0);
Area = fabs(cvContourArea(result, CV_WHOLE_SEQ));
str.Format("Area: %d\n", Area);
GetDlgItem(IDC_STATIC02)->SetWindowText(str);
str.Format("Nc: %d\n", Nc);
GetDlgItem(IDC_STATIC01)->SetWindowText(str);
m_CvvImage.CopyOf(src2,1);
m_CvvImage.DrawToHDC(Disp_hDC1,Disp_Rect);
m_CvvImage.CopyOf(src1,1);
m_CvvImage.DrawToHDC(Disp_hDC2,Disp_Rect);
m_CvvImage.CopyOf(W,1);
m_CvvImage.DrawToHDC(Disp_hDC3,Disp_Rect);
}
I simply want to calculate the area of each contour in my image. What am I doing wrong?
The points should be placed in the vector in a clock-wise sequence.
Take a square for example:
corners2.push_back(Point(0,0));
corners2.push_back(Point(src.rows-1,0));
corners2.push_back(Point(src.rows-1,src.cols-1));
corners2.push_back(Point(1,src.cols-1));

Read HSV value of pixel in opencv

how would you go about reading the pixel value in HSV format rather than RGB? The code below reads the pixel value of the circles' centers in RGB format. Is there much difference when it comes to reading value in HSV?
int main(int argc, char** argv)
{
//load image from directory
IplImage* img = cvLoadImage("C:\\Users\\Nathan\\Desktop\\SnookerPic.png");
IplImage* gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
CvMemStorage* storage = cvCreateMemStorage(0);
//covert to grayscale
cvCvtColor(img, gray, CV_BGR2GRAY);
// This is done so as to prevent a lot of false circles from being detected
cvSmooth(gray, gray, CV_GAUSSIAN, 7, 7);
IplImage* canny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
IplImage* rgbcanny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
cvCanny(gray, canny, 50, 100, 3);
//detect circles
CvSeq* circles = cvHoughCircles(gray, storage, CV_HOUGH_GRADIENT, 1, 35.0, 75, 60,0,0);
cvCvtColor(canny, rgbcanny, CV_GRAY2BGR);
//draw all detected circles
for (int i = 0; i < circles->total; i++)
{
// round the floats to an int
float* p = (float*)cvGetSeqElem(circles, i);
cv::Point center(cvRound(p[0]), cvRound(p[1]));
int radius = cvRound(p[2]);
//uchar* ptr;
//ptr = cvPtr2D(img, center.y, center.x, NULL);
//printf("B: %d G: %d R: %d\n", ptr[0],ptr[1],ptr[2]);
CvScalar s;
s = cvGet2D(img,center.y, center.x);//colour of circle
printf("B: %f G: %f R: %f\n",s.val[0],s.val[1],s.val[2]);
// draw the circle center
cvCircle(img, center, 3, CV_RGB(0,255,0), -1, 8, 0 );
// draw the circle outline
cvCircle(img, center, radius+1, CV_RGB(0,0,255), 2, 8, 0 );
//display coordinates
printf("x: %d y: %d r: %d\n",center.x,center.y, radius);
}
//create window
//cvNamedWindow("circles", 1);
cvNamedWindow("SnookerImage", 1);
//show image in window
//cvShowImage("circles", rgbcanny);
cvShowImage("SnookerImage", img);
cvSaveImage("out.png", img);
//cvDestroyWindow("SnookerImage");
//cvDestroyWindow("circles");
//cvReleaseMemStorage("storage");
cvWaitKey(0);
return 0;
}
If you use the C++ interface, you can use
cv::cvtColor(img, img, CV_BGR2HSV);
See the documentation for cvtColor for more information.
Update:
Reading and writing pixels the slow way (assuming that the HSV values are stored as a cv::Vec3b (doc))
cv::Vec3b pixel = image.at<cv::Vec3b>(0,0); // read pixel (0,0) (make copy)
pixel[0] = 0; // H
pixel[1] = 0; // S
pixel[2] = 0; // V
image.at<cv::Vec3b>(0,0) = pixel; // write pixel (0,0) (copy pixel back to image)
Using the image.at<...>(x, y) (doc, scroll down a lot) notation is quite slow, if you want to manipulate every pixel. There is an article in the documentation on how to access the pixels faster. You can apply the iterator method also like this:
cv::MatIterator_<cv::Vec3b> it = image.begin<cv::Vec3b>(),
it_end = image.end<cv::Vec3b>();
for(; it != it_end; ++it)
{
// work with pixel in here, e.g.:
cv::Vec3b& pixel = *it; // reference to pixel in image
pixel[0] = 0; // changes pixel in image
}

Color detection on HoughCircles using OpenCV

I have detected 22 balls and am struggling to find a way to run a color detection algorithm on these circles to get their colors. I am using HoughCircles to detect the circles but don't know how to check what color these circles are?
Source Code:
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <math.h>
int main(int argc, char** argv)
{
//load image from directory
IplImage* img = cvLoadImage("C:\\Users\\Nathan\\Desktop\\SnookerPic.png");
IplImage* gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
CvMemStorage* storage = cvCreateMemStorage(0);
//covert to grayscale
cvCvtColor(img, gray, CV_BGR2GRAY);
// This is done so as to prevent a lot of false circles from being detected
cvSmooth(gray, gray, CV_GAUSSIAN, 7, 7);
IplImage* canny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
IplImage* rgbcanny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
cvCanny(gray, canny, 50, 100, 3);
//detect circles
CvSeq* circles = cvHoughCircles(gray, storage, CV_HOUGH_GRADIENT, 1, 35.0, 75, 60,0,0);
cvCvtColor(canny, rgbcanny, CV_GRAY2BGR);
//draw all detected circles
for (int i = 0; i < circles->total; i++)
{
// round the floats to an int
float* p = (float*)cvGetSeqElem(circles, i);
cv::Point center(cvRound(p[0]), cvRound(p[1]));
int radius = cvRound(p[2]);
cvScalar c = cvGet2D(center.x, center.y);//colour of circle
// draw the circle center
cvCircle(img, center, 3, CV_RGB(0,255,0), -1, 8, 0 );
// draw the circle outline
cvCircle(img, center, radius+1, CV_RGB(0,0,255), 2, 8, 0 );
//display coordinates
printf("x: %d y: %d r: %d\n",center.x,center.y, radius);
}
//create window
cvNamedWindow("circles", 1);
cvNamedWindow("SnookerImage", 1);
//show image in window
cvShowImage("circles", rgbcanny);
cvShowImage("SnookerImage", img);
cvSaveImage("out.png", rgbcanny);
cvWaitKey(0);
return 0;
}
If the balls each have a uniform color, you can check the color at the center:
CvMemStorage* storage = cvCreateMemStorage(0);
cvSmooth(image, image, CV_GAUSSIAN, 5, 5 );
CvSeq* results = cvHoughCircles(
image,
storage,
CV_HOUGH_GRADIENT,
2,
image->width/10
);
for( int i = 0; i < results->total; i++ )
{
float* p = (float*) cvGetSeqElem( results, i );
CvPoint center = cvPoint( cvRound( p[0] ), cvRound( p[1] ) );
CvScalar c = cvGet2D(image, center.x, center.y); //color of the center
}
Haven't tested the code but it should be ok.
EDIT:
Ooops, I forgot one parameter from the Get2D method, the actual image from which to get the color. Changed to the correct form.
We have written our own blob detection library in the open source vision framework:
http://www.simplecv.org
The code to do what you want is as easy as:
img = Image("/path/to/image.png")
blobs = img.findBlobs()
circle_blobs = blobs.filter(blobs.isCircle() == True)
list_of_blobs_colors = circle_blobs.meanColor()

openCV usage cvFindDominantPoints

Does anyone know how to use the cvFindDominantPoints API of openCV? I basically have a 1 channel, binary image from which I get a set of contours. Judging from the image, I seem to be getting the correct contours. Now, I am selecting one of these contours to get dominant points of. This contour has about 60 vertices. However, the API call to cvFindDominantPoints is giving me a sequence of points (about 15) that does not even lie on the contour. It is quite far from it. Any insight?
my usage:
CvSeq *dominantpoints = cvFindDominantPoints(targetSeq, tristorage, CV_DOMINANT_IPAN, 7, 9, 9, 150);
#include "cv.h"
#include "highgui.h"
CvSeq* contours = 0;
CvSeq* dps = 0;
int main( int argc, char** argv )
{
int i, idx;
CvPoint p;
CvMemStorage* storage_ct = cvCreateMemStorage(0);
CvMemStorage* storage_dp = cvCreateMemStorage(0);
IplImage* img = cvLoadImage("contour.bmp", CV_LOAD_IMAGE_GRAYSCALE);
cvNamedWindow( "image" );
cvShowImage( "image", img );
cvFindContours( img, storage_ct, &contours, sizeof(CvContour),
CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE );
dps = cvFindDominantPoints( contours, storage_dp, CV_DOMINANT_IPAN, 7, 20, 9, 150 );
contours = cvApproxPoly( contours, sizeof(CvContour), storage_ct, CV_POLY_APPROX_DP, 3, 1 );
printf("found %d DPs and %d Contours \n", dps->total, contours->total );
for ( i = 0; i < dps->total; i++)
{
idx = *(int *) cvGetSeqElem(dps, i);
p = *(CvPoint *) cvGetSeqElem(contours, idx);
cvDrawCircle( img, p , 1, cvScalarAll(255) );
printf("%d %d %d\n", idx, p.x, p.y);
}
cvDrawContours(img, contours, cvScalarAll(100), cvScalarAll(200), 100 );
cvNamedWindow( "contours" );
cvShowImage( "contours", img );
cvWaitKey(0);
cvReleaseMemStorage( &storage_ct );
cvReleaseMemStorage( &storage_dp );
cvReleaseImage( &img );
return 0;
}

Resources