display a video from my webcam trying to implement circle detection - opencv

I'm trying to display a video from my webcam (which was working grand) and now I'm trying to implement circle detection into this video stream.
Unhandled exception at 0x001a1a4d in test.exe: 0xC0000005: Access violation reading location 0x00000004.)
The error is linked to the line of code:
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), 3, CV_RGB(255,0,0), -1, 8, 0 );.
Can anyone help please?
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
#include <iostream>
#include <math.h>
#include <string.h>
#include <conio.h>
using namespace std;
IplImage* img = 0;
CvMemStorage * cstorage;
CvMemStorage * hstorage;
void detectCircle( IplImage *frame );
int main( int argc, char **argv )
{
CvCapture *capture = 0;
IplImage *frame = 0;
int key = 0;
hstorage = cvCreateMemStorage( 0 );
cstorage = cvCreateMemStorage( 0 );
//CvVideoWriter *writer = 0;
//int colour = 1;
//int fps = 25;
//int frameW = 640;
//int frameH = 480;
//writer = cvCreateVideoWriter("test.avi",CV_FOURCC('P', 'I', 'M', '1'),fps,cvSize(frameW,frameH),colour);
//initialise camera
capture = cvCaptureFromCAM( 0 );
//check if camera present
if ( !capture )
{
fprintf( stderr, "cannot open webcam\n");
return 1;
}
//create a window
cvNamedWindow( "Snooker", CV_WINDOW_AUTOSIZE );
while(key !='q')
{
//get frame
frame = cvQueryFrame(capture);
//int nFrames = 50;
//for (int i=0; i<nFrames;i++){
//cvGrabFrame(capture);
//frame = cvRetrieveFrame(capture);
//cvWriteFrame(writer, frame);
//}
//check for frame
if( !frame ) break;
detectCircle(frame);
//display current frame
//cvShowImage ("Snooker", frame );
//exit if Q pressed
key = cvWaitKey( 20 );
}
// free memory
cvDestroyWindow( "Snooker" );
cvReleaseCapture( &capture );
cvReleaseMemStorage( &cstorage);
cvReleaseMemStorage( &hstorage);
//cvReleaseVideoWriter(&writer);
return 0;
}
void detectCircle( IplImage * img )
{
int px;
int py;
int edge_thresh = 1;
IplImage *gray = cvCreateImage( cvSize(img->width,img->height), 8, 1);
IplImage *edge = cvCreateImage( cvSize(img->width,img->height), 8, 1);
cvCvtColor(img, gray, CV_BGR2GRAY);
gray->origin = 1;
// color threshold
cvThreshold(gray,gray,100,255,CV_THRESH_BINARY);
// smooths out image
cvSmooth(gray, gray, CV_GAUSSIAN, 11, 11);
// get edges
cvCanny(gray, edge, (float)edge_thresh, (float)edge_thresh*3, 5);
// detects circle
CvSeq* circle = cvHoughCircles(gray, cstorage, CV_HOUGH_GRADIENT, 1, gray->height/50, 5, 35);
// draws circle and its centerpoint
float* p = (float*)cvGetSeqElem( circle, 0 );
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), 3, CV_RGB(255,0,0), -1, 8, 0 );
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), cvRound(p[2]), CV_RGB(200,0,0), 1, 8, 0 );
px=cvRound(p[0]);
py=cvRound(p[1]);
cvShowImage ("Snooker", img );
}

Related

Accurate subpixel edge location in C (OPENCV)

I want to find subpixel and I resarched this topic However I think that subpixels must be such as 152.6 , 49.3 ...
I find this document in opencv http://docs.opencv.org/2.4/modules/imgproc/doc/feature_detection.html?highlight=cornersubpix#cornersubpix
And I try this code
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src, src_gray;
int maxCorners = 10;
int maxTrackbar = 50;
RNG rng(11111);
char* source_window = "Image";
void goodFeaturesToTrack_Demo( int, void* );
int main( int argc, char** argv )
{
src = imread( "a.png", 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey(0);
return(0);
}
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 )
{ maxCorners = 1; }
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
Mat copy;
copy = src.clone();
goodFeaturesToTrack( src_gray,corners,maxCorners,qualityLevel,minDistance,Mat(),blockSize,useHarrisDetector,k );
cout<<"** Number of corners detected: "<<corners.size()<<endl;
int r = 4;
for( int i = 0; i < corners.size(); i++ )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),rng.uniform(0,255)), -1, 8, 0 ); }
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, copy );
Size winSize = Size( 10, 10 );
Size zeroZone = Size( -1, -1 );
TermCriteria criteria = TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 );
cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
for( int i = 0; i < corners.size(); i++ )
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
}
But I have this result:
this code ofind only corner's subpixel I want to find edge's subpixel

Why is the lines of optical flow are not drawn in my code

I'm trying to use the optical flow, but optical flow lines are not drawn and instead only points, what's the problem ?
Here is the source code of the project. Looked through the debugger. GDB shows that always p0.x = p1.x and p0.y = p1.y. but why ? Sorry for my bad English.
#include "opencv/cv.h"
#include "opencv2/core/core.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <stdio.h>
std::vector<cv::Point2f> corners;
std::vector<cv::Point2f> corners_b;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
int maxCorners = 200;
int maxTrackbar = 100;
void MotionDetection(cv::Mat frame1, cv::Mat frame2)
{
cv::Mat prev, next;
cvtColor(frame1, prev, CV_BGR2GRAY);
cvtColor(frame2, next, CV_BGR2GRAY);
goodFeaturesToTrack( prev,
corners,
maxCorners,
qualityLevel,
minDistance,
cv::Mat(),
blockSize,
useHarrisDetector,
k );
cornerSubPix(prev,
corners,
cvSize( 10, 10 ) ,
cvSize( -1, -1 ),
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
std::vector<uchar> features_found;
features_found.reserve(maxCorners);
std::vector<float> feature_errors;
feature_errors.reserve(maxCorners);
calcOpticalFlowPyrLK(prev, next, corners, corners_b, features_found,
feature_errors, cvSize( 10, 10 ), 5, cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0);
IplImage g = next;
for( int i = 0; i < maxCorners; ++i )
{
CvPoint p0 = cvPoint( cvRound( corners[i].x ), cvRound( corners[i].y ) );
CvPoint p1 = cvPoint( cvRound( corners_b[i].x ), cvRound( corners_b[i].y ) );
cvLine( &g, p0, p1, CV_RGB(255,0,0), 3, CV_AA );
}
cv::Mat rs(&g);
imshow( "result window", rs );
int key = cv::waitKey(5);
}
int main(int argc, char* argv[])
{
cv::VideoCapture cap(0);
if(!cap.isOpened())
{
std::cout<<"[!] Error: cant open camera!"<<std::endl;
return -1;
}
cv::Mat edges;
cv::namedWindow("result window", 1);
cv::Mat frame, frame2;
cap >> frame;
while(1)
{
cap >> frame2;
MotionDetection(frame, frame2);
}
return 0;
}
in you Main function frame is clone frame2.
I think, that
cap >> frame2;
frame2.copyTo( frame );
instead of
cap >> frame;
thats all

histogram on opencv

hey i tried to made a histogram that shows frames substraction, the code is running but i got gray window without result.
the message on the command window is:
Compiler did not align stack variables. Libavcodec has been miscompiled
and may be very slow or crash. This is not a bug in libavcodec,
but in the compiler. You may try recompiling using gcc >= 4.2.
Do not report crashes to FFmpeg developers.
OpenCV Error: Assertion failed (images[j].channels() == 1) in unknown function,
file ........\ocv\opencv\src\cv\cvhistogram.cpp, line 137
here is the code someone have an idea?thanks for help.....
int main()
{
int key = 0;
CvCapture* capture = cvCaptureFromAVI( "macroblock.mpg" );
IplImage* frame = cvQueryFrame( capture );
IplImage* currframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
IplImage* destframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
IplImage* imgHistogram = 0;
CvHistogram* hist;
if ( !capture )
{
fprintf( stderr, "Cannot open AVI!\n" );
return 1;
}
int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
cvNamedWindow( "dest", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "imgHistogram", CV_WINDOW_AUTOSIZE );
while( key != 'x' )
{
frame = cvQueryFrame( capture );
currframe = cvCloneImage( frame );
frame = cvQueryFrame( capture );
cvSub(frame,currframe,destframe);
int bins = 256;
int hsize[] = {bins};
float max_value = 0, min_value = 0;
float value;
int normalized;
float xranges[] = {0, 256};
float* ranges[] = {xranges};
IplImage* planes[] = {destframe};
hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, ranges,1);
cvCalcHist(planes, hist, 0, NULL);
cvGetMinMaxHistValue(hist, &min_value, &max_value);
// printf("Minimum Histogram Value: %f, Maximum Histogram Value: %f\n", min_value, max_value);
imgHistogram = cvCreateImage(cvSize(bins, 50),IPL_DEPTH_8U,3);
cvRectangle(imgHistogram, cvPoint(0,0), cvPoint(256,50), CV_RGB(255,255,255),-1);
for(int i=0; i < bins; i++){
value = cvQueryHistValue_1D(hist, i);
normalized = cvRound(value*50/max_value);
cvLine(imgHistogram,cvPoint(i,50), cvPoint(i,50-normalized), CV_RGB(0,0,0));
}
if(key==27 )break;
cvShowImage( "dest",destframe);
cvShowImage( "imgHistogram",imgHistogram);
key = cvWaitKey( 1000 / 10 );
}
cvDestroyWindow( "dest" );
cvReleaseCapture( &capture );
return 0;
}
Since you are trying to show a 1D histogram, the histogram plane needs to be in grayscale. So, you need to convert the resulting image from cvSub() to grayscale first. Try
IplImage *gray = NULL;
gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
while(key != 'x') {
...
cvSub(frame, currframe, destframe);
cvCvtColor(destframe, gray, CV_BGR2GRAY);
...
IplImage* planes[] = {gray};
..
}
Let me know if it works for you.

Adjust code to detect multiple circles instead of just 1 in OPENCV

I have got this circle detection working but only detects 1 circle. How would I adjust code to detect multiple circles(max circles that will be detected is 22 as using it for snooker). I presume i would be editing the circle detectoin method but i am stuck:(
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
#include <iostream>
#include <math.h>
#include <string.h>
#include <conio.h>
using namespace std;
IplImage* img = 0;
CvMemStorage * cstorage;
CvMemStorage * hstorage;
void detectCircle( IplImage *frame );
int main( int argc, char **argv )
{
CvCapture *capture = 0;
IplImage *frame = 0;
int key = 0;
hstorage = cvCreateMemStorage( 0 );
cstorage = cvCreateMemStorage( 0 );
//CvVideoWriter *writer = 0;
//int colour = 1;
//int fps = 25;
//int frameW = 640;
//int frameH = 480;
//writer = cvCreateVideoWriter("test.avi",CV_FOURCC('P', 'I', 'M', '1'),fps,cvSize(frameW,frameH),colour);
//initialise camera
capture = cvCaptureFromCAM( 0 );
//check if camera present
if ( !capture )
{
fprintf( stderr, "cannot open webcam\n");
return 1;
}
//create a window
cvNamedWindow( "Snooker", CV_WINDOW_AUTOSIZE );
while(key !='q')
{
//get frame
frame = cvQueryFrame(capture);
//int nFrames = 50;
//for (int i=0; i<nFrames;i++){
//cvGrabFrame(capture);
//frame = cvRetrieveFrame(capture);
//cvWriteFrame(writer, frame);
//}
//check for frame
if( !frame ) break;
detectCircle(frame);
//display current frame
//cvShowImage ("Snooker", frame );
//exit if Q pressed
key = cvWaitKey( 20 );
}
// free memory
cvDestroyWindow( "Snooker" );
cvReleaseCapture( &capture );
cvReleaseMemStorage( &cstorage);
cvReleaseMemStorage( &hstorage);
//cvReleaseVideoWriter(&writer);
return 0;
}
**void detectCircle( IplImage * img )
{
int px;
int py;
int edge_thresh = 1;
IplImage *gray = cvCreateImage( cvSize(img->width,img->height), 8, 1);
IplImage *edge = cvCreateImage( cvSize(img->width,img->height), 8, 1);
cvCvtColor(img, gray, CV_BGR2GRAY);
gray->origin = 1;
// color threshold
cvThreshold(gray,gray,100,255,CV_THRESH_BINARY);
// smooths out image
cvSmooth(gray, gray, CV_GAUSSIAN, 11, 11);
// get edges
cvCanny(gray, edge, (float)edge_thresh, (float)edge_thresh*3, 5);
// detects circle
CvSeq* circle = cvHoughCircles(gray, cstorage, CV_HOUGH_GRADIENT, 1, gray->height/50, 5, 35);
// draws circle and its centerpoint
float* p = (float*)cvGetSeqElem( circle, 0 );
if( p==null ){ return;}
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), 3, CV_RGB(255,0,0), -1, 8, 0 );
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), cvRound(p[2]), CV_RGB(200,0,0), 1, 8, 0 );
px=cvRound(p[0]);
py=cvRound(p[1]);**
cvShowImage ("Snooker", img );
}
Your code finds all circles - you just draw one:
// draws circle and its centerpoint
float* p = (float*)cvGetSeqElem( circle, 0 );
if( p==null ){ return;}
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), 3, CV_RGB(255,0,0), -1, 8, 0 );
cvCircle( img, cvPoint(cvRound(p[0]),cvRound(p[1])), cvRound(p[2]), CV_RGB(200,0,0), 1, 8, 0);
px=cvRound(p[0]);
py=cvRound(p[1]);
You should do it in cycle, something like:
for( int i=0; i < circles->total; i++ )
{
float* p = (float*) cvGetSeqElem( circles, i );
// ... d draw staff
}

OpenCV 2: How to save a ROI

I am new to OpenCV. Currently, trying to load and save a defined ROI of an image.
For OpenCV 1.x, I got it working with the following function...
#include <cv.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
void SaveROI(const CStringA& inputFile, const CStringA& outputFile)
{
if (ATLPath::FileExists(inputFile))
{
CvRect rect;
rect.x = 8;
rect.y = 90;
rect.width = 26;
rect.height = 46;
IplImage* imgInput = cvLoadImage(inputFile.GetString(), 1);
IplImage* imgRoi = cvCloneImage(imgInput);
cvSetImageROI(imgRoi, rect);
cvSaveImage(outputFile.GetString(), imgRoi);
cvReleaseImage(&imgInput);
cvReleaseImage(&imgRoi);
}
}
How can this be done with the OpenCV 2 or C++. I tried the following without a success, the whole image is saved.
void SaveROICPP(const CStringA& inputFile, const CStringA& outputFile)
{
if (ATLPath::FileExists(inputFile))
{
cv::Mat imgInput = cv::imread(inputFile.GetString());
if (imgInput.data != NULL)
{
cv::Mat imgRoi = imgInput(cv::Rect(8, 90, 26, 46));
imgInput.copyTo(imgRoi);
cv::imwrite(outputFile.GetString(), imgRoi);
}
}
}
Any help or suggestion?
You just don't need to call copyTo:
void SaveROICPP(const CStringA& inputFile, const CStringA& outputFile)
{
if (ATLPath::FileExists(inputFile))
{
cv::Mat imgInput = cv::imread(inputFile.GetString());
if (imgInput.data != NULL)
{
cv::Mat imgRoi = imgInput(cv::Rect(8, 90, 26, 46));
cv::imwrite(outputFile.GetString(), imgRoi);
}
}
}
In your version copyTo sees that imgInput is bigger then imgRoi and reallocates a new full-size matrix to make the copy. imgRoi is already a sub-image and you can simply pass it to any OpenCV function.
Here is some tested code for blending, cropping and saving new images.
You crop and then save that region in a new file.
#include <cv.h>
#include <highgui.h>
#include <math.h>
// alphablend <imageA> <image B> <x> <y> <width> <height>
// <alpha> <beta>
IplImage* crop( IplImage* src, CvRect roi){
// Must have dimensions of output image
IplImage* cropped = cvCreateImage( cvSize(roi.width,roi.height), src->depth, src->nChannels );
// Say what the source region is
cvSetImageROI( src, roi );
// Do the copy
cvCopy( src, cropped );
cvResetImageROI( src );
cvNamedWindow( "check", 1 );
cvShowImage( "check", cropped );
cvSaveImage ("style.jpg" , cropped);
return cropped;
}
int main(int argc, char** argv){
IplImage *src1, *src2;
CvRect myRect;
// IplImage* cropped ;
src1=cvLoadImage(argv[1],1);
src2=cvLoadImage(argv[2],1);
{
int x = atoi(argv[3]);
int y = atoi(argv[4]);
int width = atoi(argv[5]);
int height = atoi(argv[6]);
double alpha = (double)atof(argv[7]);
double beta = (double)atof(argv[8]);
cvSetImageROI(src1, cvRect(x,y,width,height));
cvSetImageROI(src2, cvRect(100,200,width,height));
myRect = cvRect(x,y,width,height) ;
cvAddWeighted(src1, alpha, src2, beta,0.0,src1);
cvResetImageROI(src1);
crop (src1 , myRect);
cvNamedWindow( "Alpha_blend", 1 );
cvShowImage( "Alpha_blend", src1 );
cvWaitKey(0);
}
return 0;
}

Resources