OpenCV Object detection tutorial - opencv

I'm new to OpenCV platform. I've installed and successfully run some tutorial codes(inbuilt). But I'm facing some issues with the codes involving haarcascade.The code builds perfectly and also the webcam seems to flash light and work but the code displays blank window as attached.image
Can anyone suggest some rectification for the problem?
(I'm using opencv version 3.0.0 and Visual Studios 2013.)
#include "stdafx.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame );
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
String window_name = "Capture - Face detection";
/** #function main */
int main( void )
{
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
int c = waitKey(10);
if( (char)c == 27 ) { break; } // escape
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t j = 0; j < eyes.size(); j++ )
{
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}

i think it is a known bug
could you try my updated code ( i pointed out the changes by comments)
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <opencv2/core/ocl.hpp> // additional header ************************
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame );
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
String window_name = "Capture - Face detection";
/** #function main */
int main( void )
{
ocl::setUseOpenCL(false); // disable OpenCL *******************
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture.open( 0 ); // Open webcam 0 ***************************
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
int c = waitKey(10);
if( (char)c == 27 ) { break; } // escape
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t j = 0; j < eyes.size(); j++ )
{
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}

Related

Accurate subpixel edge location in C (OPENCV)

I want to find subpixel and I resarched this topic However I think that subpixels must be such as 152.6 , 49.3 ...
I find this document in opencv http://docs.opencv.org/2.4/modules/imgproc/doc/feature_detection.html?highlight=cornersubpix#cornersubpix
And I try this code
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src, src_gray;
int maxCorners = 10;
int maxTrackbar = 50;
RNG rng(11111);
char* source_window = "Image";
void goodFeaturesToTrack_Demo( int, void* );
int main( int argc, char** argv )
{
src = imread( "a.png", 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey(0);
return(0);
}
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 )
{ maxCorners = 1; }
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
Mat copy;
copy = src.clone();
goodFeaturesToTrack( src_gray,corners,maxCorners,qualityLevel,minDistance,Mat(),blockSize,useHarrisDetector,k );
cout<<"** Number of corners detected: "<<corners.size()<<endl;
int r = 4;
for( int i = 0; i < corners.size(); i++ )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),rng.uniform(0,255)), -1, 8, 0 ); }
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, copy );
Size winSize = Size( 10, 10 );
Size zeroZone = Size( -1, -1 );
TermCriteria criteria = TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 );
cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
for( int i = 0; i < corners.size(); i++ )
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
}
But I have this result:
this code ofind only corner's subpixel I want to find edge's subpixel

opencv-face detection-save the video

I have code for detection of a face.. How do I save the video which is being captured? I'm posting the code for face detection below..
where do I insert the code which saves the the video?
thhis code works in detecting a face with ubuntu and opencv. Please do help!
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
VideoCapture cap;
cv::VideoWriter output_cap("/home/unifyadmin/Documents/MyVideo.avi",
input_cap.get(CV_CAP_PROP_FOURCC),
input_cap.get(CV_CAP_PROP_FPS),
cv::Size(input_cap.get(CV_CAP_PROP_FRAME_WIDTH),
input_cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
std::cout << "!!! Output video could not be opened" << std::endl;
return;
}
// This part would be similar to your imshow part of your code
while (true)
{
if (!input_cap.read(frame))
break;
output_cap.write(frame);
}
/** Function Headers */
void detectAndDisplay( Mat frame );
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
/** #function main */
int main( int argc, const char** argv )
{
CvCapture* capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( 0 );
if( capture )
{
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}
There are three library methods that you require:
VideoWriter::VideoWriter(const string& filename, int fourcc, double fps, Size frameSize, bool isColor=true)
VideoWriter::open(const string& filename, int fourcc, double fps, Size frameSize, bool isColor=true)
void VideoWriter::write(const Mat& image)
Here is a short example on how to use them:
// Do this near the start of your code
cv::VideoWriter output_cap(C:/MyVideo.avi,
input_cap.get(CV_CAP_PROP_FOURCC),
input_cap.get(CV_CAP_PROP_FPS),
cv::Size(input_cap.get(CV_CAP_PROP_FRAME_WIDTH),
input_cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
std::cout << "!!! Output video could not be opened" << std::endl;
return;
}
// This part would be similar to your imshow part of your code
while (true)
{
if (!input_cap.read(frame))
break;
output_cap.write(frame);
}
Edit: - My last contribution to this question:
You need your creation of objects into the main function, that includes the VideoWriter
Im not sure how any of this code even works:
VideoCapture cap;
cv::VideoWriter output_cap("/home/unifyadmin/Documents/MyVideo.avi",
input_cap.get(CV_CAP_PROP_FOURCC),
input_cap.get(CV_CAP_PROP_FPS),
cv::Size(input_cap.get(CV_CAP_PROP_FRAME_WIDTH),
input_cap.get(CV_CAP_PROP_FRAME_HEIGHT)));
if (!output_cap.isOpened())
{
std::cout << "!!! Output video could not be opened" << std::endl;
return;
}
// This part would be similar to your imshow part of your code
while (true)
{
if (!input_cap.read(frame))
break;
output_cap.write(frame);
}
You need to think about what you are actually coding and where you are writing code. Don't just take examples from the internet and try and stick them together.

Opencv Surf Bug error

Anyone familiar with this error? I tested a surf descriptor in real-time and it worked well but after few seconds it crashes and I got this error.
It was related when no points were detected. I run my code again and have the detected object stays for more than 2 mins and still no error. but when I removed the object and there were no points, it crashes again after 40 secs.
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/legacy/legacy.hpp"
using namespace cv;
using namespace std;
char key = 'a';
int framecount = 0;
SurfFeatureDetector detector(1000);
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;
Mat frame, des_object, image;
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
int main()
{
//reference image
Mat object = imread("D:/milo.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//compute detectors and descriptors of reference image
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
//create video capture object
CvCapture* capture = cvCaptureFromCAM(0);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 270);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 190);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
//wile loop for real time detection
while (key != 27)
{
Mat frame;
frame = cvQueryFrame(capture);
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
int goodMatchesCounter =0;
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if(((int)matches[i].size()<=2 && (int)matches[i].size()>0) && (matches[i}
[0].distance<0.6*(matches[i][1].distance)))
{
// good_matches.push_back(matches[i][0]);
obj.push_back( kp_object[ matches[i][0].queryIdx ].pt );
scene.push_back( kp_image[ matches[i][0].trainIdx ].pt );
goodMatchesCounter++;
}
}
//Draw only "good" matches
// drawMatches( object, kp_object, image, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (goodMatchesCounter >= 4)
{
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( image, scene_corners[0], scene_corners[1], Scalar( 0, 0, 0), 4 );
line( image, scene_corners[1], scene_corners[2], Scalar( 0, 0, 0),
4 );
line( image, scene_corners[2], scene_corners[3], Scalar( 0, 0, 0),
4 );
line( image, scene_corners[3], scene_corners[0], Scalar( 0, 0, 0),
4 );
}
//Show detected matches
imshow( "Good Matches", image );
key = waitKey(1);
}
return 0;
}

XML files for Haar Cascades not loading in Visual Studio 2010 using OpenCV 2.4.3

I am trying to run this simple face detection program that uses haar cascades in Visual Studio 2010 using OpenCV 2.4.3.Now every thing compiles fine but it seems like that the xml files for haar cascades are not loading.I did a lot of finding on Google but couldn't find any solution that works.I have tried every possible way to make them load.I gave absolute paths,I copied them into projects working directory,I used double backslashes as separators in absolute paths but they don't just load.I even downloaded them form other sources thinking may be the ones included with the library may be invalid or corrupt but no success.I know I am making some very basic mistake here.Here is the code I am using.Please help me out.Thank you.
<pre><code>
#include "stdio.h"
#include "cv.h"
#include "highgui.h"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
void detectAndDisplay( Mat frame );
/** Global variables */
String face_cascade_name = "E:\Downloads\IDM Downloads\opencv\data\haarcascades\haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "E:\Downloads\IDM Downloads\opencv\data\haarcascades\haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
CascadeClassifier face_cascade;
/** #function main */
int main( int argc, const char** argv )
{
// printf();
CvCapture* capture;
Mat frame;
// face_cascade
//-- 1. Load the cascades
if(!face_cascade.load( face_cascade_name ) ){
printf("--(!)Error loading\n");
return -1;
}
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; }
//-- 2. Read the video stream
capture = cvCaptureFromCAM( 0 );
if( capture )
{
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( int i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( int j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}
</code></pre>
The ouput command line window just prints
<pre><code>
--(!)Error loading
Press any key to continue . . .
Which means the xml files are not loading.
Here is my project directory listing.I have put the xml files in every folder but still no success.
OpenCVTest_2
|___Debug
|___ipch
|___opencvtest_2-365b6930
|___OpenCVTest_2
|___Debug
|___Release
|___Release
It looks like you forgot to escape your backslashes:
/** Global variables */
String face_cascade_name = "E:\\Downloads\\IDM Downloads\\opencv\\data\\haarcascades\\haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "E:\\Downloads\\IDM Downloads\\opencv\\data\\haarcascades\\haarcascade_eye_tree_eyeglasses.xml";
EDIT - sorry, I missed that in your original question - in any case the backslahes do need to be escaped, but it may be that the path is too long or must not contain spaces. So copy the xml files to the same folder as your .exe (and maybe your project folder too) and then use this
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
Probably it is a little bit late for the answer, but maybe someone will search for it and will find it (as i have just done...).
I was having same problem as Cheeta.
Trying to load with
String xmlFaceFilename = "haarcascade_frontalface_alt2.xml";
//or giving the exact path
String xmlFaceFilename = "C:\\...\\haarcascade_frontalface_alt2.xml";
doesn't work even for me.
I could make it work with:
String xmlFaceFilename = "..\\Debug\\haarcascade_frontalface_alt2.xml";
with the xml file copied into the Debug folder (where it can be found the .exe file).
Hope it could help someone! :)
Probably try this
/** Global variables */
String face_cascade_name ="E:/Downloads/IDMDownloads/opencv/data/haarcascades/haarcascade_frontalface_alt.xml";
String eyes_cascade_name ="E:/Downloads/IDMDownloads/opencv/data/haarcascades/haarcascade_eye_tree_eyeglasses.xml";

How to increase haar detector's window size in OpenCV

I am using the code available in this website: http://nashruddin.com/OpenCV_Face_Detection to do face detection.
I would like to increase the size of the detected face region. I am not sure how to do it. Need some help on it..
The code i am using is this:
//
#include "stdafx.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
CvHaarClassifierCascade *cascade;
CvMemStorage *storage;
void detectFaces( IplImage *img );
int main( int argc, char** argv )
{
CvCapture *capture;
IplImage *frame;
int key;
char *filename = "C:/OpenCV2.1/data/haarcascades/haarcascade_frontalface_alt.xml";
cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );
storage = cvCreateMemStorage( 0 );
capture = cvCaptureFromCAM( 0 );
assert( cascade && storage && capture );
cvNamedWindow( "video", 1 );
while( key != 'q' ) {
frame = cvQueryFrame( capture );
if( !frame ) {
fprintf( stderr, "Cannot query frame!\n" );
break;
}
cvFlip( frame, frame, -1 );
frame->origin = 0;
detectFaces( frame );
key = cvWaitKey( 10 );
}
cvReleaseCapture( &capture );
cvDestroyWindow( "video" );
cvReleaseHaarClassifierCascade( &cascade );
cvReleaseMemStorage( &storage );
return 0;
}
void detectFaces( IplImage *img )
{
int i;
CvSeq *faces = cvHaarDetectObjects(
img,
cascade,
storage,
1.1,
3,
0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
cvSize( 40, 40 ) );
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
cvRectangle( img,
cvPoint( r->x, r->y ),
cvPoint( r->x + r->width, r->y + r->height ),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
cvShowImage( "video", img );
}
This increases the size of the rectangle around the face. If you meant increasing the haar detector's window size, please update your question.
int padding_width = 30; // pixels
int padding_height = 30; // pixels
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
// Yes yes, all of this could be written much more compactly.
// It was written like this for clarity.
int topleft_x = r->x - (padding_width / 2);
int topleft_y = r->y - (padding_height / 2);
if (topleft_x < 0)
topleft_x = 0;
if (topleft_y < 0)
topleft_y = 0;
int bottomright_x = r->x + r->width + (padding_width / 2);
int bottomright_y = r->y + r->height + (padding_height / 2);
if (bottomright_x >= img->width)
bottomright_x = img->width - 1;
if (bottomright_y >= img->height)
bottomright_y = img->height - 1;
cvRectangle( img,
cvPoint(topleft_x, topleft_y),
cvPoint(bottomright_x, bottomright_y),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}

Resources