I have got this circle detection working but only detects 1 circle. How would I adjust code to detect multiple circles(max circles that will be detected is 22 as using it for snooker). I presume i would be editing the circle detectoin method but i am stuck:(
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
#include <iostream>
#include <math.h>
#include <string.h>
#include <conio.h>
using namespace std;
IplImage* img = 0;
CvMemStorage * cstorage;
CvMemStorage * hstorage;
void detectCircle( IplImage *frame );
int main( int argc, char **argv )
{
CvCapture *capture = 0;
IplImage *frame = 0;
int key = 0;
hstorage = cvCreateMemStorage( 0 );
cstorage = cvCreateMemStorage( 0 );
//CvVideoWriter *writer = 0;
//int colour = 1;
//int fps = 25;
//int frameW = 640;
//int frameH = 480;
//writer = cvCreateVideoWriter("test.avi",CV_FOURCC('P', 'I', 'M', '1'),fps,cvSize(frameW,frameH),colour);
//initialise camera
capture = cvCaptureFromCAM( 0 );
//check if camera present
if ( !capture )
{
fprintf( stderr, "cannot open webcam\n");
return 1;
}
//create a window
cvNamedWindow( "Snooker", CV_WINDOW_AUTOSIZE );
while(key !='q')
{
//get frame
frame = cvQueryFrame(capture);
//int nFrames = 50;
//for (int i=0; i<nFrames;i++){
//cvGrabFrame(capture);
//frame = cvRetrieveFrame(capture);
//cvWriteFrame(writer, frame);
//}
//check for frame
if( !frame ) break;
detectCircle(frame);
//display current frame
//cvShowImage ("Snooker", frame );
//exit if Q pressed
key = cvWaitKey( 20 );
}
// free memory
cvDestroyWindow( "Snooker" );
cvReleaseCapture( &capture );
cvReleaseMemStorage( &cstorage);
cvReleaseMemStorage( &hstorage);
//cvReleaseVideoWriter(&writer);
return 0;
}
**void detectCircle( IplImage * img )
{
int px;
int py;
int edge_thresh = 1;
IplImage *gray = cvCreateImage( cvSize(img->width,img->height), 8, 1);
IplImage *edge = cvCreateImage( cvSize(img->width,img->height), 8, 1);
cvCvtColor(img, gray, CV_BGR2GRAY);
gray->origin = 1;
// color threshold
cvThreshold(gray,gray,100,255,CV_THRESH_BINARY);
// smooths out image
cvSmooth(gray, gray, CV_GAUSSIAN, 11, 11);
// get edges
cvCanny(gray, edge, (float)edge_thresh, (float)edge_thresh*3, 5);
// detects circle
CvSeq* circle = cvHoughCircles(gray, cstorage, CV_HOUGH_GRADIENT, 1, gray->height/50, 5, 35);
// draws circle and its centerpoint
float* p = (float*)cvGetSeqElem( circle, 0 );
if( p==null ){ return;}
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), 3, CV_RGB(255,0,0), -1, 8, 0 );
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), cvRound(p[2]), CV_RGB(200,0,0), 1, 8, 0 );
px=cvRound(p[0]);
py=cvRound(p[1]);**
cvShowImage ("Snooker", img );
}
Your code finds all circles - you just draw one:
// draws circle and its centerpoint
float* p = (float*)cvGetSeqElem( circle, 0 );
if( p==null ){ return;}
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), 3, CV_RGB(255,0,0), -1, 8, 0 );
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), cvRound(p[2]), CV_RGB(200,0,0), 1, 8, 0);
px=cvRound(p[0]);
py=cvRound(p[1]);
You should do it in cycle, something like:
for( int i=0; i < circles->total; i++ )
{
float* p = (float*) cvGetSeqElem( circles, i );
// ... d draw staff
}
Related
I want to find subpixel and I resarched this topic However I think that subpixels must be such as 152.6 , 49.3 ...
I find this document in opencv http://docs.opencv.org/2.4/modules/imgproc/doc/feature_detection.html?highlight=cornersubpix#cornersubpix
And I try this code
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src, src_gray;
int maxCorners = 10;
int maxTrackbar = 50;
RNG rng(11111);
char* source_window = "Image";
void goodFeaturesToTrack_Demo( int, void* );
int main( int argc, char** argv )
{
src = imread( "a.png", 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey(0);
return(0);
}
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 )
{ maxCorners = 1; }
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
Mat copy;
copy = src.clone();
goodFeaturesToTrack( src_gray,corners,maxCorners,qualityLevel,minDistance,Mat(),blockSize,useHarrisDetector,k );
cout<<"** Number of corners detected: "<<corners.size()<<endl;
int r = 4;
for( int i = 0; i < corners.size(); i++ )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),rng.uniform(0,255)), -1, 8, 0 ); }
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, copy );
Size winSize = Size( 10, 10 );
Size zeroZone = Size( -1, -1 );
TermCriteria criteria = TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 );
cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
for( int i = 0; i < corners.size(); i++ )
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
}
But I have this result:
this code ofind only corner's subpixel I want to find edge's subpixel
I'm trying to use the optical flow, but optical flow lines are not drawn and instead only points, what's the problem ?
Here is the source code of the project. Looked through the debugger. GDB shows that always p0.x = p1.x and p0.y = p1.y. but why ? Sorry for my bad English.
#include "opencv/cv.h"
#include "opencv2/core/core.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <stdio.h>
std::vector<cv::Point2f> corners;
std::vector<cv::Point2f> corners_b;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
int maxCorners = 200;
int maxTrackbar = 100;
void MotionDetection(cv::Mat frame1, cv::Mat frame2)
{
cv::Mat prev, next;
cvtColor(frame1, prev, CV_BGR2GRAY);
cvtColor(frame2, next, CV_BGR2GRAY);
goodFeaturesToTrack( prev,
corners,
maxCorners,
qualityLevel,
minDistance,
cv::Mat(),
blockSize,
useHarrisDetector,
k );
cornerSubPix(prev,
corners,
cvSize( 10, 10 ) ,
cvSize( -1, -1 ),
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
std::vector<uchar> features_found;
features_found.reserve(maxCorners);
std::vector<float> feature_errors;
feature_errors.reserve(maxCorners);
calcOpticalFlowPyrLK(prev, next, corners, corners_b, features_found,
feature_errors, cvSize( 10, 10 ), 5, cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0);
IplImage g = next;
for( int i = 0; i < maxCorners; ++i )
{
CvPoint p0 = cvPoint( cvRound( corners[i].x ), cvRound( corners[i].y ) );
CvPoint p1 = cvPoint( cvRound( corners_b[i].x ), cvRound( corners_b[i].y ) );
cvLine( &g, p0, p1, CV_RGB(255,0,0), 3, CV_AA );
}
cv::Mat rs(&g);
imshow( "result window", rs );
int key = cv::waitKey(5);
}
int main(int argc, char* argv[])
{
cv::VideoCapture cap(0);
if(!cap.isOpened())
{
std::cout<<"[!] Error: cant open camera!"<<std::endl;
return -1;
}
cv::Mat edges;
cv::namedWindow("result window", 1);
cv::Mat frame, frame2;
cap >> frame;
while(1)
{
cap >> frame2;
MotionDetection(frame, frame2);
}
return 0;
}
in you Main function frame is clone frame2.
I think, that
cap >> frame2;
frame2.copyTo( frame );
instead of
cap >> frame;
thats all
hey i tried to made a histogram that shows frames substraction, the code is running but i got gray window without result.
the message on the command window is:
Compiler did not align stack variables. Libavcodec has been miscompiled
and may be very slow or crash. This is not a bug in libavcodec,
but in the compiler. You may try recompiling using gcc >= 4.2.
Do not report crashes to FFmpeg developers.
OpenCV Error: Assertion failed (images[j].channels() == 1) in unknown function,
file ........\ocv\opencv\src\cv\cvhistogram.cpp, line 137
here is the code someone have an idea?thanks for help.....
int main()
{
int key = 0;
CvCapture* capture = cvCaptureFromAVI( "macroblock.mpg" );
IplImage* frame = cvQueryFrame( capture );
IplImage* currframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
IplImage* destframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
IplImage* imgHistogram = 0;
CvHistogram* hist;
if ( !capture )
{
fprintf( stderr, "Cannot open AVI!\n" );
return 1;
}
int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
cvNamedWindow( "dest", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "imgHistogram", CV_WINDOW_AUTOSIZE );
while( key != 'x' )
{
frame = cvQueryFrame( capture );
currframe = cvCloneImage( frame );
frame = cvQueryFrame( capture );
cvSub(frame,currframe,destframe);
int bins = 256;
int hsize[] = {bins};
float max_value = 0, min_value = 0;
float value;
int normalized;
float xranges[] = {0, 256};
float* ranges[] = {xranges};
IplImage* planes[] = {destframe};
hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, ranges,1);
cvCalcHist(planes, hist, 0, NULL);
cvGetMinMaxHistValue(hist, &min_value, &max_value);
// printf("Minimum Histogram Value: %f, Maximum Histogram Value: %f\n", min_value, max_value);
imgHistogram = cvCreateImage(cvSize(bins, 50),IPL_DEPTH_8U,3);
cvRectangle(imgHistogram, cvPoint(0,0), cvPoint(256,50), CV_RGB(255,255,255),-1);
for(int i=0; i < bins; i++){
value = cvQueryHistValue_1D(hist, i);
normalized = cvRound(value*50/max_value);
cvLine(imgHistogram,cvPoint(i,50), cvPoint(i,50-normalized), CV_RGB(0,0,0));
}
if(key==27 )break;
cvShowImage( "dest",destframe);
cvShowImage( "imgHistogram",imgHistogram);
key = cvWaitKey( 1000 / 10 );
}
cvDestroyWindow( "dest" );
cvReleaseCapture( &capture );
return 0;
}
Since you are trying to show a 1D histogram, the histogram plane needs to be in grayscale. So, you need to convert the resulting image from cvSub() to grayscale first. Try
IplImage *gray = NULL;
gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
while(key != 'x') {
...
cvSub(frame, currframe, destframe);
cvCvtColor(destframe, gray, CV_BGR2GRAY);
...
IplImage* planes[] = {gray};
..
}
Let me know if it works for you.
I am using the code available in this website: http://nashruddin.com/OpenCV_Face_Detection to do face detection.
I would like to increase the size of the detected face region. I am not sure how to do it. Need some help on it..
The code i am using is this:
//
#include "stdafx.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
CvHaarClassifierCascade *cascade;
CvMemStorage *storage;
void detectFaces( IplImage *img );
int main( int argc, char** argv )
{
CvCapture *capture;
IplImage *frame;
int key;
char *filename = "C:/OpenCV2.1/data/haarcascades/haarcascade_frontalface_alt.xml";
cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );
storage = cvCreateMemStorage( 0 );
capture = cvCaptureFromCAM( 0 );
assert( cascade && storage && capture );
cvNamedWindow( "video", 1 );
while( key != 'q' ) {
frame = cvQueryFrame( capture );
if( !frame ) {
fprintf( stderr, "Cannot query frame!\n" );
break;
}
cvFlip( frame, frame, -1 );
frame->origin = 0;
detectFaces( frame );
key = cvWaitKey( 10 );
}
cvReleaseCapture( &capture );
cvDestroyWindow( "video" );
cvReleaseHaarClassifierCascade( &cascade );
cvReleaseMemStorage( &storage );
return 0;
}
void detectFaces( IplImage *img )
{
int i;
CvSeq *faces = cvHaarDetectObjects(
img,
cascade,
storage,
1.1,
3,
0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
cvSize( 40, 40 ) );
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
cvRectangle( img,
cvPoint( r->x, r->y ),
cvPoint( r->x + r->width, r->y + r->height ),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
cvShowImage( "video", img );
}
This increases the size of the rectangle around the face. If you meant increasing the haar detector's window size, please update your question.
int padding_width = 30; // pixels
int padding_height = 30; // pixels
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
// Yes yes, all of this could be written much more compactly.
// It was written like this for clarity.
int topleft_x = r->x - (padding_width / 2);
int topleft_y = r->y - (padding_height / 2);
if (topleft_x < 0)
topleft_x = 0;
if (topleft_y < 0)
topleft_y = 0;
int bottomright_x = r->x + r->width + (padding_width / 2);
int bottomright_y = r->y + r->height + (padding_height / 2);
if (bottomright_x >= img->width)
bottomright_x = img->width - 1;
if (bottomright_y >= img->height)
bottomright_y = img->height - 1;
cvRectangle( img,
cvPoint(topleft_x, topleft_y),
cvPoint(bottomright_x, bottomright_y),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
I'm trying to display a video from my webcam (which was working grand) and now I'm trying to implement circle detection into this video stream.
Unhandled exception at 0x001a1a4d in test.exe: 0xC0000005: Access violation reading location 0x00000004.)
The error is linked to the line of code:
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), 3, CV_RGB(255,0,0), -1, 8, 0 );.
Can anyone help please?
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
#include <iostream>
#include <math.h>
#include <string.h>
#include <conio.h>
using namespace std;
IplImage* img = 0;
CvMemStorage * cstorage;
CvMemStorage * hstorage;
void detectCircle( IplImage *frame );
int main( int argc, char **argv )
{
CvCapture *capture = 0;
IplImage *frame = 0;
int key = 0;
hstorage = cvCreateMemStorage( 0 );
cstorage = cvCreateMemStorage( 0 );
//CvVideoWriter *writer = 0;
//int colour = 1;
//int fps = 25;
//int frameW = 640;
//int frameH = 480;
//writer = cvCreateVideoWriter("test.avi",CV_FOURCC('P', 'I', 'M', '1'),fps,cvSize(frameW,frameH),colour);
//initialise camera
capture = cvCaptureFromCAM( 0 );
//check if camera present
if ( !capture )
{
fprintf( stderr, "cannot open webcam\n");
return 1;
}
//create a window
cvNamedWindow( "Snooker", CV_WINDOW_AUTOSIZE );
while(key !='q')
{
//get frame
frame = cvQueryFrame(capture);
//int nFrames = 50;
//for (int i=0; i<nFrames;i++){
//cvGrabFrame(capture);
//frame = cvRetrieveFrame(capture);
//cvWriteFrame(writer, frame);
//}
//check for frame
if( !frame ) break;
detectCircle(frame);
//display current frame
//cvShowImage ("Snooker", frame );
//exit if Q pressed
key = cvWaitKey( 20 );
}
// free memory
cvDestroyWindow( "Snooker" );
cvReleaseCapture( &capture );
cvReleaseMemStorage( &cstorage);
cvReleaseMemStorage( &hstorage);
//cvReleaseVideoWriter(&writer);
return 0;
}
void detectCircle( IplImage * img )
{
int px;
int py;
int edge_thresh = 1;
IplImage *gray = cvCreateImage( cvSize(img->width,img->height), 8, 1);
IplImage *edge = cvCreateImage( cvSize(img->width,img->height), 8, 1);
cvCvtColor(img, gray, CV_BGR2GRAY);
gray->origin = 1;
// color threshold
cvThreshold(gray,gray,100,255,CV_THRESH_BINARY);
// smooths out image
cvSmooth(gray, gray, CV_GAUSSIAN, 11, 11);
// get edges
cvCanny(gray, edge, (float)edge_thresh, (float)edge_thresh*3, 5);
// detects circle
CvSeq* circle = cvHoughCircles(gray, cstorage, CV_HOUGH_GRADIENT, 1, gray->height/50, 5, 35);
// draws circle and its centerpoint
float* p = (float*)cvGetSeqElem( circle, 0 );
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), 3, CV_RGB(255,0,0), -1, 8, 0 );
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), cvRound(p[2]), CV_RGB(200,0,0), 1, 8, 0 );
px=cvRound(p[0]);
py=cvRound(p[1]);
cvShowImage ("Snooker", img );
}