Detect/Fitting Circles using Hough Transform in OpenCV 2.4.6 - opencv

The objective is to detect the 5 white circles in the image.The test image in which the circles have to be detected is the one shown here 640x480
Please download the original image here,1280x1024
I am using different methods to bring out a evaluation of various circle/ellipse detection methods. But somehow I am not able to fix my simple Hough transform code. It does not detect any circles. I am not clear whether the problem is with pre-processing step, or the parameters of the HoughCircle. I have gone through all the similar questions in the forum, but still not able to fix the issue. This is my code. Please help me in this regards..
Header file
#ifndef IMGPROCESSOR_H
#define IMGPROCESSOR_H
// OpenCV Library
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
class ImgProcessor{
public:
Mat OpImg ;
ImgProcessor();
~ImgProcessor();
//aquire filter methods to image
int Do_Hough(Mat IpImg);
};
#endif /* ImgProcessor_H */
Source file
#include "ImgProcessor.h"
#include <opencv2\opencv.hpp>
#include "opencv2\imgproc\imgproc.hpp"
#include "opencv2\imgproc\imgproc_c.h"
#include <vector>
using namespace cv;
ImgProcessor::ImgProcessor(){
return;
}
ImgProcessor::~ImgProcessor(){
return;
}
//Apply filtering for the input image
int ImgProcessor::Do_Hough(Mat IpImg)
{
//Parameter Initialization________________________________________________________
double sigma_x, sigma_y, thresh=250, max_thresh = 255;
int ksize_w = 5 ;
int ksize_h = 5;
sigma_x = 0.3*((ksize_w-1)*0.5 - 1) + 0.8 ;
sigma_y = 0.3*((ksize_h-1)*0.5 - 1) + 0.8 ;
vector<Vec3f> circles;
//Read the image as a matrix
Mat TempImg;
//resize(IpImg, IpImg ,Size(), 0.5,0.5, INTER_AREA);
//Preprocessing__________________________________________________________
//Perform initial smoothing
GaussianBlur( IpImg, TempImg, Size(ksize_w, ksize_h),2,2);
//perform thresholding
threshold(TempImg,TempImg, thresh,thresh, 0);
//Remove noise by gaussian smoothing
GaussianBlur( TempImg, TempImg, Size(ksize_w, ksize_h),2,2);
/*imshow("Noisefree Image", TempImg);
waitKey(10000);*/
//Obtain edges
Canny(TempImg, TempImg, 255,240 , 3);
imshow("See Edges", TempImg);
waitKey(10000);
//Increase the line thickness
//dilate(TempImg,TempImg,0,Point(-1,-1),3);
//Hough Circle Method______________________________________________________________
// Apply the Hough Transform to find the circles
HoughCircles( TempImg, circles, 3, 1, TempImg.rows/32, 255, 240, 5, 0 );
// Draw the circles detected
for( size_t i = 0; i < circles.size(); i++ )
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// circle center
circle( IpImg, center, 3, Scalar(0,255,0), -1, 8, 0 );
// circle outline
circle( IpImg, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
// Show your results
namedWindow( "Hough Circle Transform", WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform", IpImg );
// waitKey(0);
return 0;
}
int main(int argc, char** argv)
{
ImgProcessor Iclass;
//char* imageName = argv[1];
string imageName = "D:/Projects/test_2707/test_2707/1.bmp";
Mat IpImg = imread( imageName );
cvtColor(IpImg, IpImg,6,CV_8UC1);
Iclass.Do_Hough(IpImg);
/*Iclass.Do_Contours(IpImg);*/
return 0;
}

The code seems fine, other than for:
HoughCircles( TempImg, circles, 3, 1, TempImg.rows/32, 255, 240, 5, 0 );
Does number 3 in the parameter list correspond to CV_HOUGH_GRADIENT ? It is always better to use definitions instead of numbers.
May be you should test it first with an image with bigger circles. Once you are sure that the rest of the code is correct, you can tune the parameters of HoughCircles.

Related

How can i draw boundary across a particular colour in opencv?

Suppose I have an image. I basically want to make boundary across a particular colour that I want. I know the hsv minimum and maximum scalar values of that colour. But I don't know how to proceed further.
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include<stdio.h>
#include<opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
VideoCapture cap(0);
while(true)
{
Mat img;
cap.read(img);
Mat dst;
Mat imghsv;
cvtColor(img, imghsv, COLOR_BGR2HSV);
inRange(imghsv,
Scalar(0, 30, 0),
Scalar(20, 150, 255),
dst
);
imshow("name",dst);
if (waitKey(30) == 27) //wait for 'esc' key press for 30ms
{
cout << "esc key is pressed by user" << endl;
break;
}
}
}
The inrange function works well but I am not able to draw a boundary across whatever is white (I mean whichever pixel is in the range specified)
You need to first segment the color, and then find the contours of the segmented image.
SEGMENT THE COLOR
Working in HSV is in general a good idea to segment colors. Once you have the correct lower and upper boundary, you can easily segment the color.
A simple approach is to use inRange.
You can find how to use it here for example.
FIND BOUNDARIES
Once you have the binary mask (obtained through segmentation), you can find its boundaries using findContours. You can refer to this or this to know how to use findContours to detect the boundary, and drawContours to draw it.
UPDATE
Here a working example on how to draw a contour on segmented objects.
I used some morphology to clean the mask, and changed to tracked color to be blue, but you can put your favorite color.
#include<opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
VideoCapture cap(0);
while (true)
{
Mat img;
cap.read(img);
Mat dst;
Mat imghsv;
cvtColor(img, imghsv, COLOR_BGR2HSV);
inRange(imghsv, Scalar(110, 100, 100), Scalar(130, 255, 255), dst); // Detect blue objects
// Remove some noise using morphological operators
Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(7,7));
morphologyEx(dst, dst, MORPH_OPEN, kernel);
// Find contours
vector<vector<Point>> contours;
findContours(dst.clone(), contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Draw all contours (green)
// This
drawContours(img, contours, -1, Scalar(0,255,0));
// If you want to draw a contour for a particular one, say the biggest...
// Find the biggest object
if (!contours.empty())
{
int idx_biggest = 0;
int val_biggest = contours[0].size();
for (int i = 0; i < contours.size(); ++i)
{
if (val_biggest < contours[i].size())
{
val_biggest = contours[i].size();
idx_biggest = i;
}
}
// Draw a single contour (blue)
drawContours(img, contours, idx_biggest, Scalar(255,0,0));
// You want also the rotated rectangle (blue) ?
RotatedRect r = minAreaRect(contours[idx_biggest]);
Point2f pts[4];
r.points(pts);
for (int j = 0; j < 4; ++j)
{
line(img, pts[j], pts[(j + 1) % 4], Scalar(0, 0, 255), 2);
}
}
imshow("name", dst);
imshow("image", img);
if (waitKey(30) == 27) //wait for 'esc' key press for 30ms
{
cout << "esc key is pressed by user" << endl;
break;
}
}
}
If you want a particular hue to be detected then you can create a mask to select only the particular color from your original image.
on the hue channel (img):
cv::Mat mask = cv::Mat::zeros(img.size(),CV_8UC1);
for(int i=0;i<img.rows;i++){
for(int j=0;j<img.cols;i++){
if(img.at<uchar>(i,j)==(uchar)specific_hue){
mask.at<uchar>(i,j)=(uchar)255;
}
}
}
color_img.copyTo(masked_image, mask);
If you want something less rigorous, you can define a range around the color to allow more image to pass through the mask.
cv::Mat mask = cv::Mat::zeros(img.size(),CV_8UC1);
int threshold = 5;
for(int i=0;i<img.rows;i++){
for(int j=0;j<img.cols;i++){
if((img.at<uchar>(i,j)>(uchar)(specific_hue - threshold)) && (img.at<uchar>(i,j)<(uchar)(specific_hue + threshold))){
mask.at<uchar>(i,j)=(uchar)255;
}
}
}
color_img.copyTo(masked_image, mask);

How to get better results with OpenCV face recognition Module

I'm trying to use OpenCV's face recognition module to recognize 2 subjects from a video. I cropped 30 face images of the first subject and 20 face images of the second subject from the video and I use these as my training set.
I've tested all three approaches (Eigenfaces, Fisherfaces and LBP histograms), but I'm not getting good results in neither of the approaches. Sometimes the first subject is classified as the second subject and vice-verse, sometimes false detections are classified as one of the two subjects and sometimes other people in the video are classified as one of the two subjects.
How can I improve performance? Would enlarging the training set help in improving the results? Are there any other packages I can consider that performs face recognition in C++? I think it should be an easy task as I'm trying to recognize only two different subjects.
Here is my code (I'm using OpenCV 2.4.7 on windows 8 with VS2012):
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#include <iostream>
#include <stdio.h>
#include <fstream>
#include <sstream>
#define EIGEN 0
#define FISHER 0
#define LBPH 1;
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame , int i,Ptr<FaceRecognizer> model);
static Mat toGrayscale(InputArray _src) {
Mat src = _src.getMat();
// only allow one channel
if(src.channels() != 1) {
CV_Error(CV_StsBadArg, "Only Matrices with one channel are supported");
}
// create and return normalized image
Mat dst;
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
return dst;
}
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if(!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
/** Global variables */
String face_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\haarcascade_frontalface_alt.xml";
//String face_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\NewCascade.xml";
//String face_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\haarcascade_eye_tree_eyeglasses.xml";
String eyes_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
/** #function main */
int main( int argc, const char** argv )
{
string fn_csv = "C:\\OIM\\faces_org.csv";
// These vectors hold the images and corresponding labels.
vector<Mat> images;
vector<int> labels;
// Read in the data. This can fail if no valid
// input filename is given.
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);
}
// Quit if there are not enough images for this demo.
if(images.size() <= 1) {
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
CV_Error(CV_StsError, error_message);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size:
int height = images[0].rows;
// The following lines create an Eigenfaces model for
// face recognition and train it with the images and
// labels read from the given CSV file.
// This here is a full PCA, if you just want to keep
// 10 principal components (read Eigenfaces), then call
// the factory method like this:
//
// cv::createEigenFaceRecognizer(10);
//
// If you want to create a FaceRecognizer with a
// confidennce threshold, call it with:
//
// cv::createEigenFaceRecognizer(10, 123.0);
//
//Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
#if EIGEN
Ptr<FaceRecognizer> model = createEigenFaceRecognizer(10,2000000000);
#elif FISHER
Ptr<FaceRecognizer> model = createFisherFaceRecognizer(0, 200000000);
#elif LBPH
Ptr<FaceRecognizer> model =createLBPHFaceRecognizer(1,8,8,8,200000000);
#endif
model->train(images, labels);
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
// Get the frame rate
bool stop(false);
int count=1;
char filename[512];
for (int i=1;i<=517;i++){
sprintf(filename,"C:\\OIM\\original_frames2\\image%d.jpg",i);
Mat frame=imread(filename);
detectAndDisplay(frame,i,model);
waitKey(0);
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame ,int i, Ptr<FaceRecognizer> model)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
//face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 1, 0|CV_HAAR_SCALE_IMAGE, Size(10, 10) );
for( size_t i = 0; i < faces.size(); i++ )
{
Rect roi = Rect(faces[i].x,faces[i].y,faces[i].width,faces[i].height);
Mat face=frame_gray(roi);
resize(face,face,Size(200,200));
int predictedLabel = -1;
double confidence = 0.0;
model->predict(face, predictedLabel, confidence);
//imshow("gil",face);
//waitKey(0);
#if EIGEN
int M=10000;
#elif FISHER
int M=500;
#elif LBPH
int M=300;
#endif
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
if ((predictedLabel==1)&& (confidence<M))
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 0, 0, 255 ), 4, 8, 0 );
if ((predictedLabel==0)&& (confidence<M))
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 0), 4, 8, 0 );
if (confidence>M)
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 0, 255, 0), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
//circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
//imshow( window_name, frame );
char filename[512];
sprintf(filename,"C:\\OIM\\FaceRecognitionResults\\image%d.jpg",i);
imwrite(filename,frame);
}
Thanks in advance,
Gil.
First thing, as commented, increase the number of samples if possible. Also include the variations (like illumination, slight poses etc) you expect to be in the video. However, especially for eigenfaces/ fisherfaces so many images will not help to increase performance. Sadly, the best number of training samples can depend on your data.
The more important point is the hardness of the problem is totally depends on your video. If your video contains variations like illumination, pose; then you can't expect using purely appearance based methods(e.g Eigenfaces) and texture descriptor(LBP) will be succesful. First, you might want to detect faces. Then:
You might want to estimate face position and warp to frontal; check
for Active Appearance Model and Active Shape Model
Use histogram of equalization to attenuate illumination problem
Fitting an ellipse to detected face region will help against background noise.
Of course, there are many other methods available in literature; the steps I wrote is implemented in OpenCV and commonly known.
Hope it helps.

Finding the count of metal spheres in an image

I need to count the number of metal balls inside a small metal cup.
I tried template matching but it showed only one result having most probability.
But i need the count of total metal balls visible.
Since background too is metallic i was unable to do color thresholding.
I tried a method of finding the first occurrence using template matching and then fill that area with RGB(0,0,0) and again did the template matching on that image, but several false detections are occurring.
My primary requirement is to find the images that have three balls filled inside the cup and any other quantities other than three should not be detected.
Please see the images of different quantities filled inside the cup
Use Hough circles - see the OpenCV documentation for how to do this. Then just count the circles that are with some empirically determined radius range.
Here are some results and code that will enable you to do what you want:
#include <iostream> // std::cout
#include <algorithm> // std::sort
#include <vector> // std::vector
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
using namespace std;
using namespace cv;
bool circle_compare (Vec3f i,Vec3f j) { return (i[2]>j[2]); }
int main(int argc, char** argv)
{
/// Read the image
Mat one = imread("one.jpg", 1 );
Mat two = imread("two.jpg", 1 );
Mat three = imread("three.jpg", 1 );
Mat four = imread("four.jpg", 1 );
if(!one.data || !two.data || !three.data || !four.data)
{
return -1;
}
// put all the images into one
Mat src(one.rows * 2, one.cols * 2, one.type());
Rect roi1(0, 0, one.cols, one.rows);
one.copyTo(src(roi1));
Rect roi2(one.cols, 0, one.cols, one.rows);
two.copyTo(src(roi2));
Rect roi3(0, one.rows, one.cols, one.rows);
three.copyTo(src(roi3));
Rect roi4(one.cols, one.rows, one.cols, one.rows);
four.copyTo(src(roi4));
// extract the blue channel because the circles show up better there
vector<cv::Mat> channels;
cv::split(src, channels);
cv::Mat blue;
GaussianBlur( channels[0], blue, Size(7, 7), 4, 4 );
vector<Vec3f> circles;
vector<Vec3f> candidate_circles;
/// Find the circles
HoughCircles( blue, candidate_circles, CV_HOUGH_GRADIENT, 1, 1, 30, 55);//, 0, 200 );
// sort candidate cirles by size, largest first
// so the accepted circles are the largest that meet other criteria
std::sort (candidate_circles.begin(), candidate_circles.end(), circle_compare);
/// Draw the circles detected
for( size_t i = 0; i < candidate_circles.size(); ++i )
{
Point center(cvRound(candidate_circles[i][0]), cvRound(candidate_circles[i][4]));
int radius = cvRound(candidate_circles[i][5]);
// skip over big circles
if(radius > 35)
continue;
// test whether centre of candidate_circle is inside of accepted circle
bool inside = false;
for( size_t j = 0; j < circles.size(); ++j )
{
Point c(cvRound(circles[j][0]), cvRound(circles[j][6]));
int r = cvRound(circles[j][7]);
int d = sqrt((center.x - c.x) * (center.x - c.x) + (center.y - c.y) * (center.y - c.y));
if(d <= r)
{
inside = true; // candidate is inside an existing circle
}
}
if(inside)
continue;
// accept the current candidate circle then draw it
circles.push_back(candidate_circles[i]);
circle( src, center, 3, Scalar(0,255,0), -1, 8, 0 );
circle( src, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
// now fill the circles in the quadrant that has three balls
vector<Vec3f> tl, tr, bl, br;
for( size_t i = 0; i < circles.size(); ++i )
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][8]));
int radius = cvRound(circles[i][9]);
if(center.x < one.cols)
{
if(center.y < one.rows)
{
tl.push_back(circles[i]);
}
else
{
bl.push_back(circles[i]);
}
}
else
{
if(center.y < one.rows)
{
tr.push_back(circles[i]);
}
else
{
br.push_back(circles[i]);
}
}
vector<vector<Vec3f>> all;
all.push_back(tl);
all.push_back(tr);
all.push_back(bl);
all.push_back(bl);
for( size_t k = 0; k < all.size(); ++k )
{
if(all[k].size() == 3)
{
for( size_t i = 0; i < all[k].size(); ++i )
{
Point center(cvRound(all[k][i][0]), cvRound(all[k][i][10]));
int radius = cvRound(all[k][i][11]);
circle( src, center, radius, Scalar(0,255, 255), -1, 4, 0 );
}
}
}
}
// resize for easier display
resize(src, src, one.size());
/// Save results and display them
imwrite("balls.png", src);
//namedWindow( "Balls", CV_WINDOW_AUTOSIZE );
imshow( "Balls", src );
waitKey(0);
return 0;
}
Maybe you can try the template matching algorithm, but with a twist. Don't look for circles (balls). But look for the small triangle in center of the 3 balls.
You have to take into account the rotation of the triangle, but simple contour processing should do the job.
define ROI in center of the image (center of cup)
run some edge detector and contour detection
simplify every suitable contour found
check if found contour has 3 corners with angle sharp enough to form an triangle
To distinguish case with more than 3 balls check also overall intensity of the image. Photo of 3 balls only should have quite low intensity compared to one with more balls.
EDIT:
2013-11-08 6.15PM GMT
In this case of image, might be actually helpfull to use watershed segmentation algorithm.
This algorithm is part of OpenCV, I don't now which version is the first one, but it seems it's in OCV 3.0.0: http://docs.opencv.org/trunk/modules/imgproc/doc/miscellaneous_transformations.html?highlight=watershed#cv2.watershed
Some basic for watershed on wiki: http://en.wikipedia.org/wiki/Watershed_%28image_processing%29

find shape and color circle

I have detected just all the red contours and am struggling to find a way to run a shape detection algorithm on these contours to get just red circules but don't know how to extract just red circle and eliminate the undesirable rest of contours ? Source Code:
#include "stdafx.h"
#include"math.h"
#include"conio.h"
#include"cv.h"
#include"highgui.h"
#include"stdio.h"
#include <math.h>
int main()
{
int i,j,k;
int h,w,seuill,channels;
int seuilr, channelsr;
int temp=0;
uchar *data,*datar;
i=j=k=0;
IplImage *frame=cvLoadImage("Mon_image.jpg",1);
IplImage *result=cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
IplImage *gray=cvCreateImage( cvGetSize(frame), IPL_DEPTH_8U, 1 );
cvCvtColor(frame, result, CV_BGR2GRAY );
//IplImage* gray;
cvNamedWindow("original",CV_WINDOW_AUTOSIZE);
cvNamedWindow("Result",CV_WINDOW_AUTOSIZE);
h = frame->height;
w = frame->width;
seuill =frame->widthStep;
channels = frame->nChannels;
data = (uchar *)frame->imageData;
seuilr=result->widthStep;
channelsr=result->nChannels;
datar = (uchar *)result->imageData;
for(i=0;i < (h);i++)
for(j=0;j <(w);j++)
{
if(((data[i*seuill+j*channels+2]) >(19+data[i*seuill+j*channels]))&& ((data[i*seuill+j*channels+2]) > (19+data[i*seuill+j*channels+1])))
datar[i*seuilr+j*channelsr]=255;
else
datar[i*seuilr+j*channelsr]=0;
}
cvCanny(result,result, 50, 100, 3);
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* circles = cvHoughCircles(result, storage, CV_HOUGH_GRADIENT, 1, 40.0, 100, 100,0,0);
cvShowImage("original",frame);
cvShowImage("Result",result);
cvSaveImage("result.jpg",result);
cvWaitKey(0);
cvDestroyWindow("original");
cvDestroyWindow("Result");
return 0;
}
I will rather use RANSAC algorithm to detect circle in your set of contours but, Hough transform will also do the work.
See here for an explication of the both process. Solution in matlab in given.

Not detecting multiple circles in Image

My code is straightforward. i am trying to detect 22 balls but ionly getting a few. I think it has something to do with the CvSeq* circles = cvHoughCircles Can anyone help me please and thank you!
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <math.h>
int main(int argc, char** argv)
{
IplImage* img = cvLoadImage("C:\\Users\\Nathan\\Desktop\\SnookerPic.png");
IplImage* gray = cvCreateImage
(cvGetSize(img), IPL_DEPTH_8U, 1);
CvMemStorage* storage = cvCreateMemStorage(0);
cvCvtColor(img, gray, CV_BGR2GRAY);
// This is done so as to prevent a lot of false circles from being detected
cvSmooth(gray, gray, CV_GAUSSIAN, 7, 7);
IplImage* canny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
IplImage* rgbcanny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
cvCanny(gray, canny, 50, 100, 3);
CvSeq* circles = cvHoughCircles(gray, storage, CV_HOUGH_GRADIENT, 1, 40.0, 100, 100,0,0);
cvCvtColor(canny, rgbcanny, CV_GRAY2BGR);
for (int i = 0; i < circles->total; i++)
{
// round the floats to an int
float* p = (float*)cvGetSeqElem(circles, i);
cv::Point center(cvRound(p[0]), cvRound(p[1]));
int radius = cvRound(p[2]);
// draw the circle center
cvCircle(img, center, 3, CV_RGB(0,255,0), -1, 8, 0 );
// draw the circle outline
cvCircle(img, center, radius+1, CV_RGB(0,0,255), 2, 8, 0 );
printf("x: %d y: %d r: %d\n",center.x,center.y, radius);
}
cvNamedWindow("circles", 1);
cvNamedWindow("Image", 1);
cvShowImage("circles", rgbcanny);
cvShowImage("Image", img);
cvSaveImage("out.png", rgbcanny);
cvWaitKey(0);
return 0;
}
I believe that the problem comes from your cvHoughCircles parameters:
CvSeq* cvHoughCircles(CvArr* image, CvMemStorage* circleStorage, int method, double dp, double minDist, double param1=100, double param2=100, int minRadius=0, int maxRadius=0 )
minDist – Minimum distance between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed.
You are using maybe a too large minDist (in your case max 2-3 balls will be detected vertically and probably also horizontally).

Resources