Displaying an outline on top of an image - opencv

I am trying to draw a face outline and overlay it on top of a webcam image.
But towards the end, I think I am using addWeighted in a wrong way, because my program crashes.
Could you please help me understand what I am doing wrong with imshow and addWeighted?
int main( int argc, const char** argv )
{
VideoCapture camera;
camera.open(0);
if( !camera.isOpened() )
{
cerr << "Could not access the camera!" << endl;
return 1;
}
while( true )
{
Mat cameraFrame;
camera >> cameraFrame;
if( cameraFrame.empty() )
{
cerr << "Could not grab a camera frame!" << endl;
return 1;
}
Mat gray;
cvtColor( cameraFrame, gray, CV_BGR2GRAY );
Size size = cameraFrame.size();
Mat faceOutline = Mat::zeros( size, CV_8UC3 ); // Draw a black canvas.
Scalar color = CV_RGB( 255, 255, 0 ); // Yellow
int thickness = 4;
ellipse( faceOutline, Point(320, 240), Size(320, 240), 0, 0, 360, color, thickness, CV_AA );
addWeighted( gray, 1.0, faceOutline, 0.7, 0, gray, CV_8UC3 );
imshow( "final image", gray );
char keypress = waitKey(20);
if( keypress == 27 ) break;
}
}

This works fine:
int main( int argc, const char** argv )
{
VideoCapture camera;
camera.open(0);
if( !camera.isOpened() )
{
cerr << "Could not access the camera!" << endl;
return 1;
}
while( true )
{
Mat cameraFrame;
camera >> cameraFrame;
if( cameraFrame.empty() )
{
cerr << "Could not grab a camera frame!" << endl;
return 1;
}
Mat gray;
cvtColor( cameraFrame, gray, cv::COLOR_BGR2GRAY );
Size size = cameraFrame.size();
Mat faceOutline = Mat::zeros( size, CV_8UC3 ); // Draw a black canvas.
Scalar color = Scalar( 255, 255, 0 ); // Yellow
int thickness = 4;
cvtColor( gray, gray, cv::COLOR_GRAY2BGR );
ellipse( faceOutline, Point(320, 240), Size(320, 240), 0, 0, 360, color, thickness );
addWeighted( gray, 1.0, faceOutline, 0.7, 0, gray, CV_8UC3 );
imshow( "final image", gray );
char keypress = waitKey(20);
if( keypress == 27 ) break;
}
}

why not just draw the ellipse into the cameraFrame ?
ellipse( cameraFrame , Point(320, 240), Size(320, 240), 0, 0, 360, color, thickness, CV_AA );
and if you want to use addWeighted,
the type of both input images has to match ( you can't add a color to a grayscale image )
the factors have to sum up to 1.0
the last argument is depth, not type ( i.e you could convert it to float here, but not change the number of channels)
addWeighted( cameraFrame , 0.7, faceOutline, 0.3, 0, cameraFrame );

I suppose your gray image is single-channel and your faceOutline image has 3 channels.
From the documentation:
src2 – second input array of the same size and channel number as src1.
Try mixChannels to switch a single channel of a multichannel image

Related

detecting edges(connected edges) and finding edge length and connected component Radius of gyration

[original image][1]
[1]: https://i.stack.imgur.com/j7brr.jpg
I am trying to detect the clusters of connected boundaries in this image. I need to find the length of these edges and also the radius of gyration of the individual clusters.
I am using opencv 2.4.13.
I used the following code to detect the mass clusters using contours.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 100;
int max_thresh = 255;
RNG rng(12345);
/// Function header
void thresh_callback(int, void* );
/** #function main */
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
/** #function thresh_callback */
void thresh_callback(int, void* )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Get the moments
vector<Moments> mu(contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mu[i] = moments( contours[i], false ); }
/// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
Mat drawing2 = Mat::zeros( canny_output.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{if(arcLength( contours[i], true )>900)
{Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );}
}
int length=0;
int j=0;
for( int i = 0; i< contours.size(); i++ )
{
if(arcLength( contours[i], true )>length)
{
length=arcLength( contours[i], true );
j=i;
}
}
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing2, contours, j, color, 2, 8, hierarchy, 0, Point() );
circle( drawing2, mc[j], 4, color, -1, 8, 0 );
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
namedWindow( "Contours2", CV_WINDOW_AUTOSIZE );
imshow( "Contours_max", drawing2 );
/// Calculate the area with the moments 00 and compare with the result of the OpenCV function
printf("\t Info: Area and Contour Length \n");
for( int i = 0; i< contours.size(); i++ )
{
if(arcLength( contours[i], true )>900)
{printf(" * Contour[%d] - Area (M_00) = %.2f - Area OpenCV: %.2f - Length: %.2f \n", i, mu[i].m00, contourArea(contours[i]), arcLength( contours[i], true ) );
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
circle( drawing, mc[i], 4, color, -1, 8, 0 );}
}
}
The problem is the contours are getting different for common sharing edges and logically they should be of the same cluster. The following contour image I am giving.
We can see that many contours having same sharing edges are taken separately as different contours. I want them as a part of the same boundary clusters. Also suggest me how to detect the length of the boundaries and the radius of gyration.
Please help.
I am incredibly confused by your question (would ask for clarification in a comment, but I am too noob to comment)
My only advice based on what I see and understand is that you may not want to be using a canny filter. To be clear, your original image already has edges... running a canny filter on that gives you "double edges" which i do not think you want, but again, I am not even sure what you are trying to achieve.

Regarding a specific Object Detection in OpenCV using WebCam and comparing it with an input Image

I am new to OpenCV and want to develop a program which takes the camera input and compares it with a known image of an object which would be input to it as a .jpg image and if the input of the Webcam matches with the fed in image upto a certain level of accuracy, then some message etc should be displayed that the required object has been found.
Eg: If I get a Computer Cable before the webcam, it needs to be detected and compared to the image of the Computer cable I have fed into the program.
I've tried many techniques and find Template matching to be effective as mentioned in the foll0wing link---
Real-time template matching - OpenCV, C++
However after drawing the rectangle and getting the roiImage..I want to compare its likeliness with a known image on my disk(in the opencv working directory). For this I am trying to convert the roiImg and my other images in HSV format and get 4 values according to the Algorithms.
I have tried to combine the 2 codes but it doesn;t seem to work as roiImg is being made at runtime and is not being able to compare with the other 2 Images using imread.
#include <iostream>
#include "opencv2/opencv.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <sstream>
using namespace cv;
using namespace std;
Point point1, point2; /* vertical points of the bounding box */
int drag = 0;
Rect rect; /* bounding box */
Mat img, roiImg; /* roiImg - the part of the image in the bounding box */
int select_flag = 0;
bool go_fast = false;
Mat mytemplate;
Mat src_base, hsv_base;
Mat src_test1, hsv_test1;
Mat src_test2, hsv_test2;
Mat hsv_half_down;
///------- template matching -----------------------------------------------------------------------------------------------
Mat TplMatch( Mat &img, Mat &mytemplate )
{
Mat result;
matchTemplate( img, mytemplate, result, CV_TM_SQDIFF_NORMED );
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
return result;
}
///------- Localizing the best match with minMaxLoc ------------------------------------------------------------------------
Point minmax( Mat &result )
{
double minVal, maxVal;
Point minLoc, maxLoc, matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
matchLoc = minLoc;
return matchLoc;
}
///------- tracking --------------------------------------------------------------------------------------------------------
void track()
{
if (select_flag)
{
//roiImg.copyTo(mytemplate);
// select_flag = false;
go_fast = true;
}
// imshow( "mytemplate", mytemplate ); waitKey(0);
Mat result = TplMatch( img, mytemplate );
Point match = minmax( result );
rectangle( img, match, Point( match.x + mytemplate.cols , match.y + mytemplate.rows ), CV_RGB(255, 255, 255), 0.5 );
std::cout << "match: " << match << endl;
/// latest match is the new template
Rect ROI = cv::Rect( match.x, match.y, mytemplate.cols, mytemplate.rows );
roiImg = img( ROI );
roiImg.copyTo(mytemplate);
imshow( "roiImg", roiImg ); //waitKey(0);
//Compare the roiImg with a know image to calculate resemblence
/*Method Base - Base Base - Half Base - Test 1 Base - Test 2
Correlation 1.000000 0.930766 0.182073 0.120447
Chi-square 0.000000 4.940466 21.184536 49.273437
Intersection 24.391548 14.959809 3.889029 5.775088
Bhattacharyya 0.000000 0.222609 0.646576 0.801869
For the Correlation and Intersection methods, the higher the metric, the more accurate the match. As we can see,
the match base-base is the highest of all as expected. Also we can observe that the match base-half is the second best match (as we predicted).
For the other two metrics, the less the result, the better the match. We can observe that the matches between the test 1 and test 2 with respect
to the base are worse, which again, was expected.)*/
src_base = imread("roiImg");
src_test1 = imread("Samarth.jpg");
src_test2 = imread("Samarth2.jpg");
//double l2_norm = cvNorm( src_base, src_test1 );
/// Convert to HSV
cvtColor( src_base, hsv_base, COLOR_BGR2HSV );
cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV );
cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV );
hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) );
/// Using 50 bins for hue and 60 for saturation
int h_bins = 50; int s_bins = 60;
int histSize[] = { h_bins, s_bins };
// hue varies from 0 to 179, saturation from 0 to 255
float h_ranges[] = { 0, 180 };
float s_ranges[] = { 0, 256 };
const float* ranges[] = { h_ranges, s_ranges };
// Use the o-th and 1-st channels
int channels[] = { 0, 1 };
/// Histograms
MatND hist_base;
MatND hist_half_down;
MatND hist_test1;
MatND hist_test2;
/// Calculate the histograms for the HSV images
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
normalize( hist_base, hist_base, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_half_down, 1, channels, Mat(), hist_half_down, 2, histSize, ranges, true, false );
normalize( hist_half_down, hist_half_down, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_test1, 1, channels, Mat(), hist_test1, 2, histSize, ranges, true, false );
normalize( hist_test1, hist_test1, 0, 1, NORM_MINMAX, -1, Mat() );
calcHist( &hsv_test2, 1, channels, Mat(), hist_test2, 2, histSize, ranges, true, false );
normalize( hist_test2, hist_test2, 0, 1, NORM_MINMAX, -1, Mat() );
/// Apply the histogram comparison methods
for( int i = 0; i < 4; i++ )
{
int compare_method = i;
double base_base = compareHist( hist_base, hist_base, compare_method );
double base_half = compareHist( hist_base, hist_half_down, compare_method );
double base_test1 = compareHist( hist_base, hist_test1, compare_method );
double base_test2 = compareHist( hist_base, hist_test2, compare_method );
printf( " Method [%d] Perfect, Base-Half, Base-Test(1), Base-Test(2) : %f, %f, %f, %f \n", i, base_base, base_half , base_test1, base_test2 );
}
printf( "Done \n" );
}
///------- MouseCallback function ------------------------------------------------------------------------------------------
void mouseHandler(int event, int x, int y, int flags, void *param)
{
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/// left button clicked. ROI selection begins
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/// mouse dragged. ROI being selected
Mat img1 = img.clone();
point2 = Point(x, y);
rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
imshow("image", img1);
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
point2 = Point(x, y);
rect = Rect(point1.x, point1.y, x - point1.x, y - point1.y);
drag = 0;
roiImg = img(rect);
roiImg.copyTo(mytemplate);
// imshow("MOUSE roiImg", roiImg); waitKey(0);
}
if (event == CV_EVENT_LBUTTONUP)
{
/// ROI selected
select_flag = 1;
drag = 0;
}
}
///------- Main() ----------------------------------------------------------------------------------------------------------
int main()
{
int k;
///open webcam
VideoCapture cap(0);
if (!cap.isOpened())
return 1;
/* ///open video file
VideoCapture cap;
cap.open( "Wildlife.wmv" );
if ( !cap.isOpened() )
{ cout << "Unable to open video file" << endl; return -1; }*/
/*
/// Set video to 320x240
cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);*/
cap >> img;
GaussianBlur( img, img, Size(7,7), 3.0 );
imshow( "image", img );
while (1)
{
cap >> img;
if ( img.empty() )
break;
// Flip the frame horizontally and add blur
cv::flip( img, img, 1 );
GaussianBlur( img, img, Size(7,7), 3.0 );
if ( rect.width == 0 && rect.height == 0 )
cvSetMouseCallback( "image", mouseHandler, NULL );
else
track();
imshow("image", img);
// waitKey(100); k = waitKey(75);
k = waitKey(go_fast ? 30 : 10000);
if (k == 27)
break;
}
return 0;
}
if you want to detect a object in live feed , detecting the object in each frame is not efficient .. for the first time you have to detect after you have to track the object.
so this process involving both detection and tracking..
for detection you have to segment the object from the rest, opencv provides many algorithms for segmenting an object from background based on colors color based detection.other than color you can use the objects's shape to segment the object from backgroundshape based segmentation.
you can use lk optical flow algorithm as a starting to tracking.
additionally, you can use template matching or camshift or medial flow tracker.. etc to obtain quick results.all the above algorithm will be useful based on scale change of the object and lighting change of the feed. opencv has sample programs to the above algorithms.

cvFindContours Always return 1

I am using OpenCV 2.4.6 and Visual C++ 2010. I am trying to track RED color in the below code. I'm using cvInRangeS to get the RED object from the image and that image is a binary image. Then using cvFindContours(..) I want to find the contours of the object.But cvFindcontours(..) always returning 1. I am also using cvBoundingRect(..) to get the contour's position in the image. The rectangle position i am getting is always (1,1,638,478). Means the cvFindcontour(..) is taking whole image as a single contour. How can I get only the RED objects in contour?
int main()
{
IplImage* mCVImageColor = cvCreateImageHeader(cvSize(640,480), IPL_DEPTH_8U, 3);
IplImage* imgTracking;
CvCapture* capture = cvCaptureFromCAM( CV_CAP_ANY );
namedWindow( "Display image", CV_WINDOW_AUTOSIZE );
namedWindow( "Display window", CV_WINDOW_AUTOSIZE );
namedWindow( "Display tracking", CV_WINDOW_AUTOSIZE );
namedWindow( "Display thresh", CV_WINDOW_AUTOSIZE );
CvRect rect;
CvMemStorage *storage = cvCreateMemStorage(0);
CvSeq *contours=0;
while(1)
{
IplImage* mCVImageColor = cvQueryFrame( capture );
cvSmooth(mCVImageColor, mCVImageColor, CV_GAUSSIAN,3,3);
imgTracking=cvCreateImage(cvGetSize(mCVImageColor),IPL_DEPTH_8U, 1);
IplImage* imgHSV = cvCreateImage(cvGetSize(mCVImageColor), IPL_DEPTH_8U, 3);
IplImage* imgThresh=cvCreateImage(cvGetSize(mCVImageColor),8, 1);
cvCvtColor(mCVImageColor, imgHSV, CV_BGR2HSV);
cvInRangeS(imgHSV, cvScalar(170,160,60), cvScalar(180,255,256), imgThresh);
cvAdd(imgThresh, imgTracking, imgTracking);
cvConvertScale(imgTracking, imgTracking, 1.0, 0.0);
int x = cvFindContours(imgTracking,storage,&contours,sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(2, 2));
printf("%d\n",contours->total);
for (; contours != 0; contours = contours->h_next)
{
rect = cvBoundingRect(contours); //extract bounding box for current contour
//drawing rectangle
cvRectangle(mCVImageColor,cvPoint(rect.x, rect.y),cvPoint(rect.x+rect.width, rect.y+rect.height),cvScalar(0, 0, 255),2, 8, 0);
printf("%d %d %d %d\n",rect.x, rect.y,rect.width,rect.height);
if(rect.width*rect.height<1400)
{
CvPoint centroid[1];
centroid[0].x = cvRound((rect.x+rect.width)/2);
centroid[0].y = cvRound((rect.y+rect.height)/2);
cvCircle( mCVImageColor, centroid[0], 5, CV_RGB(255, 0, 0),10, -1);
}
}
cvShowImage("Display tracking",imgTracking);
cvShowImage("Display window",imgHSV);
cvShowImage("Display image",mCVImageColor);
cvShowImage("Display thresh",imgThresh);
cvClearMemStorage( storage );
contours = 0;
//cvReleaseImage(&imgHSV);
//cvReleaseImage(&imgThresh);
int c = waitKey(20);
if(c=='q')
break;
}
}

Real-time face detection in OpenCV

I am trying to write some simple real time face detection code, but somehow it doesn't work. (I tried face detection code on an image and it works but with the code below i get a grey image onscreen and the code fails)
here is the code i have tried (it prints 'face detected!' one time to the output window)
CvHaarClassifierCascade *cascade;
CvMemStorage *storage;
char *face_cascade="haarcascade_frontalface_alt2.xml";
CvRect* r;
const CvArr* img_size;
IplImage *grayscale;
void detectFacialFeatures( IplImage *img)
{
grayscale = cvCreateImage(cvGetSize(img), 8, 1);
cvCvtColor(img, grayscale, CV_BGR2GRAY);
CvMemStorage* storage=cvCreateMemStorage(0);
cvClearMemStorage( storage );
cvEqualizeHist(grayscale, grayscale);
cascade = ( CvHaarClassifierCascade* )cvLoad( face_cascade, 0, 0, 0 );
CvSeq* faces = cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize( 50, 50 ) );
if(faces)
{
printf("face detected!");
r = ( CvRect* )cvGetSeqElem( faces, 0 );
cvRectangle( img,cvPoint( r->x, r->y ),cvPoint( r->x + r->width, r->y + r->height ), CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
}
int _tmain(int argc, _TCHAR* argv[])
{
int c;
IplImage* color_img;
CvCapture* cv_cap = cvCreateCameraCapture(0);
cvSetCaptureProperty(cv_cap, CV_CAP_PROP_FRAME_WIDTH, 640);
cvSetCaptureProperty(cv_cap, CV_CAP_PROP_FRAME_HEIGHT, 480);
cvNamedWindow("Video",1); // create window
for(;;) {
color_img = cvQueryFrame(cv_cap); // get frame
if(color_img==0)
break;
cvFlip(color_img, 0, 1); //mirror image
detectFacialFeatures(color_img);
cvShowImage("Video", color_img); // show frame
c = cvWaitKey(10); // wait 10 ms or for key stroke
if(c == 27)
break; // if ESC, break and quit
}
/* clean up */
cvReleaseCapture( &cv_cap );
cvDestroyWindow("Video");
}
Try without calling functions cvFlip and cvEqualizeHistogram.
Look at(just use cvShowImage) result of each operation(cvFlip, cvCvtColor, cvEqualizeHistogram) - it's possible that result of one of these operations is gray image.
You don't have to load haar classifier each time you try to find a face - load it at the beginning. Operations on files are slow so it should makes you code faster.

Color detection on HoughCircles using OpenCV

I have detected 22 balls and am struggling to find a way to run a color detection algorithm on these circles to get their colors. I am using HoughCircles to detect the circles but don't know how to check what color these circles are?
Source Code:
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <math.h>
int main(int argc, char** argv)
{
//load image from directory
IplImage* img = cvLoadImage("C:\\Users\\Nathan\\Desktop\\SnookerPic.png");
IplImage* gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
CvMemStorage* storage = cvCreateMemStorage(0);
//covert to grayscale
cvCvtColor(img, gray, CV_BGR2GRAY);
// This is done so as to prevent a lot of false circles from being detected
cvSmooth(gray, gray, CV_GAUSSIAN, 7, 7);
IplImage* canny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
IplImage* rgbcanny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
cvCanny(gray, canny, 50, 100, 3);
//detect circles
CvSeq* circles = cvHoughCircles(gray, storage, CV_HOUGH_GRADIENT, 1, 35.0, 75, 60,0,0);
cvCvtColor(canny, rgbcanny, CV_GRAY2BGR);
//draw all detected circles
for (int i = 0; i < circles->total; i++)
{
// round the floats to an int
float* p = (float*)cvGetSeqElem(circles, i);
cv::Point center(cvRound(p[0]), cvRound(p[1]));
int radius = cvRound(p[2]);
cvScalar c = cvGet2D(center.x, center.y);//colour of circle
// draw the circle center
cvCircle(img, center, 3, CV_RGB(0,255,0), -1, 8, 0 );
// draw the circle outline
cvCircle(img, center, radius+1, CV_RGB(0,0,255), 2, 8, 0 );
//display coordinates
printf("x: %d y: %d r: %d\n",center.x,center.y, radius);
}
//create window
cvNamedWindow("circles", 1);
cvNamedWindow("SnookerImage", 1);
//show image in window
cvShowImage("circles", rgbcanny);
cvShowImage("SnookerImage", img);
cvSaveImage("out.png", rgbcanny);
cvWaitKey(0);
return 0;
}
If the balls each have a uniform color, you can check the color at the center:
CvMemStorage* storage = cvCreateMemStorage(0);
cvSmooth(image, image, CV_GAUSSIAN, 5, 5 );
CvSeq* results = cvHoughCircles(
image,
storage,
CV_HOUGH_GRADIENT,
2,
image->width/10
);
for( int i = 0; i < results->total; i++ )
{
float* p = (float*) cvGetSeqElem( results, i );
CvPoint center = cvPoint( cvRound( p[0] ), cvRound( p[1] ) );
CvScalar c = cvGet2D(image, center.x, center.y); //color of the center
}
Haven't tested the code but it should be ok.
EDIT:
Ooops, I forgot one parameter from the Get2D method, the actual image from which to get the color. Changed to the correct form.
We have written our own blob detection library in the open source vision framework:
http://www.simplecv.org
The code to do what you want is as easy as:
img = Image("/path/to/image.png")
blobs = img.findBlobs()
circle_blobs = blobs.filter(blobs.isCircle() == True)
list_of_blobs_colors = circle_blobs.meanColor()

Resources