Real-time face detection in OpenCV - opencv

I am trying to write some simple real time face detection code, but somehow it doesn't work. (I tried face detection code on an image and it works but with the code below i get a grey image onscreen and the code fails)
here is the code i have tried (it prints 'face detected!' one time to the output window)
CvHaarClassifierCascade *cascade;
CvMemStorage *storage;
char *face_cascade="haarcascade_frontalface_alt2.xml";
CvRect* r;
const CvArr* img_size;
IplImage *grayscale;
void detectFacialFeatures( IplImage *img)
{
grayscale = cvCreateImage(cvGetSize(img), 8, 1);
cvCvtColor(img, grayscale, CV_BGR2GRAY);
CvMemStorage* storage=cvCreateMemStorage(0);
cvClearMemStorage( storage );
cvEqualizeHist(grayscale, grayscale);
cascade = ( CvHaarClassifierCascade* )cvLoad( face_cascade, 0, 0, 0 );
CvSeq* faces = cvHaarDetectObjects(grayscale, cascade, storage, 1.1, 3, CV_HAAR_DO_CANNY_PRUNING, cvSize( 50, 50 ) );
if(faces)
{
printf("face detected!");
r = ( CvRect* )cvGetSeqElem( faces, 0 );
cvRectangle( img,cvPoint( r->x, r->y ),cvPoint( r->x + r->width, r->y + r->height ), CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
}
int _tmain(int argc, _TCHAR* argv[])
{
int c;
IplImage* color_img;
CvCapture* cv_cap = cvCreateCameraCapture(0);
cvSetCaptureProperty(cv_cap, CV_CAP_PROP_FRAME_WIDTH, 640);
cvSetCaptureProperty(cv_cap, CV_CAP_PROP_FRAME_HEIGHT, 480);
cvNamedWindow("Video",1); // create window
for(;;) {
color_img = cvQueryFrame(cv_cap); // get frame
if(color_img==0)
break;
cvFlip(color_img, 0, 1); //mirror image
detectFacialFeatures(color_img);
cvShowImage("Video", color_img); // show frame
c = cvWaitKey(10); // wait 10 ms or for key stroke
if(c == 27)
break; // if ESC, break and quit
}
/* clean up */
cvReleaseCapture( &cv_cap );
cvDestroyWindow("Video");
}

Try without calling functions cvFlip and cvEqualizeHistogram.
Look at(just use cvShowImage) result of each operation(cvFlip, cvCvtColor, cvEqualizeHistogram) - it's possible that result of one of these operations is gray image.
You don't have to load haar classifier each time you try to find a face - load it at the beginning. Operations on files are slow so it should makes you code faster.

Related

Displaying an outline on top of an image

I am trying to draw a face outline and overlay it on top of a webcam image.
But towards the end, I think I am using addWeighted in a wrong way, because my program crashes.
Could you please help me understand what I am doing wrong with imshow and addWeighted?
int main( int argc, const char** argv )
{
VideoCapture camera;
camera.open(0);
if( !camera.isOpened() )
{
cerr << "Could not access the camera!" << endl;
return 1;
}
while( true )
{
Mat cameraFrame;
camera >> cameraFrame;
if( cameraFrame.empty() )
{
cerr << "Could not grab a camera frame!" << endl;
return 1;
}
Mat gray;
cvtColor( cameraFrame, gray, CV_BGR2GRAY );
Size size = cameraFrame.size();
Mat faceOutline = Mat::zeros( size, CV_8UC3 ); // Draw a black canvas.
Scalar color = CV_RGB( 255, 255, 0 ); // Yellow
int thickness = 4;
ellipse( faceOutline, Point(320, 240), Size(320, 240), 0, 0, 360, color, thickness, CV_AA );
addWeighted( gray, 1.0, faceOutline, 0.7, 0, gray, CV_8UC3 );
imshow( "final image", gray );
char keypress = waitKey(20);
if( keypress == 27 ) break;
}
}
This works fine:
int main( int argc, const char** argv )
{
VideoCapture camera;
camera.open(0);
if( !camera.isOpened() )
{
cerr << "Could not access the camera!" << endl;
return 1;
}
while( true )
{
Mat cameraFrame;
camera >> cameraFrame;
if( cameraFrame.empty() )
{
cerr << "Could not grab a camera frame!" << endl;
return 1;
}
Mat gray;
cvtColor( cameraFrame, gray, cv::COLOR_BGR2GRAY );
Size size = cameraFrame.size();
Mat faceOutline = Mat::zeros( size, CV_8UC3 ); // Draw a black canvas.
Scalar color = Scalar( 255, 255, 0 ); // Yellow
int thickness = 4;
cvtColor( gray, gray, cv::COLOR_GRAY2BGR );
ellipse( faceOutline, Point(320, 240), Size(320, 240), 0, 0, 360, color, thickness );
addWeighted( gray, 1.0, faceOutline, 0.7, 0, gray, CV_8UC3 );
imshow( "final image", gray );
char keypress = waitKey(20);
if( keypress == 27 ) break;
}
}
why not just draw the ellipse into the cameraFrame ?
ellipse( cameraFrame , Point(320, 240), Size(320, 240), 0, 0, 360, color, thickness, CV_AA );
and if you want to use addWeighted,
the type of both input images has to match ( you can't add a color to a grayscale image )
the factors have to sum up to 1.0
the last argument is depth, not type ( i.e you could convert it to float here, but not change the number of channels)
addWeighted( cameraFrame , 0.7, faceOutline, 0.3, 0, cameraFrame );
I suppose your gray image is single-channel and your faceOutline image has 3 channels.
From the documentation:
src2 – second input array of the same size and channel number as src1.
Try mixChannels to switch a single channel of a multichannel image

How to get better results with OpenCV face recognition Module

I'm trying to use OpenCV's face recognition module to recognize 2 subjects from a video. I cropped 30 face images of the first subject and 20 face images of the second subject from the video and I use these as my training set.
I've tested all three approaches (Eigenfaces, Fisherfaces and LBP histograms), but I'm not getting good results in neither of the approaches. Sometimes the first subject is classified as the second subject and vice-verse, sometimes false detections are classified as one of the two subjects and sometimes other people in the video are classified as one of the two subjects.
How can I improve performance? Would enlarging the training set help in improving the results? Are there any other packages I can consider that performs face recognition in C++? I think it should be an easy task as I'm trying to recognize only two different subjects.
Here is my code (I'm using OpenCV 2.4.7 on windows 8 with VS2012):
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#include <iostream>
#include <stdio.h>
#include <fstream>
#include <sstream>
#define EIGEN 0
#define FISHER 0
#define LBPH 1;
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame , int i,Ptr<FaceRecognizer> model);
static Mat toGrayscale(InputArray _src) {
Mat src = _src.getMat();
// only allow one channel
if(src.channels() != 1) {
CV_Error(CV_StsBadArg, "Only Matrices with one channel are supported");
}
// create and return normalized image
Mat dst;
cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
return dst;
}
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if(!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
/** Global variables */
String face_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\haarcascade_frontalface_alt.xml";
//String face_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\NewCascade.xml";
//String face_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\haarcascade_eye_tree_eyeglasses.xml";
String eyes_cascade_name = "C:\\OIM\\code\\OIM2 - face detection\\Debug\\haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
/** #function main */
int main( int argc, const char** argv )
{
string fn_csv = "C:\\OIM\\faces_org.csv";
// These vectors hold the images and corresponding labels.
vector<Mat> images;
vector<int> labels;
// Read in the data. This can fail if no valid
// input filename is given.
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);
}
// Quit if there are not enough images for this demo.
if(images.size() <= 1) {
string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
CV_Error(CV_StsError, error_message);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size:
int height = images[0].rows;
// The following lines create an Eigenfaces model for
// face recognition and train it with the images and
// labels read from the given CSV file.
// This here is a full PCA, if you just want to keep
// 10 principal components (read Eigenfaces), then call
// the factory method like this:
//
// cv::createEigenFaceRecognizer(10);
//
// If you want to create a FaceRecognizer with a
// confidennce threshold, call it with:
//
// cv::createEigenFaceRecognizer(10, 123.0);
//
//Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
#if EIGEN
Ptr<FaceRecognizer> model = createEigenFaceRecognizer(10,2000000000);
#elif FISHER
Ptr<FaceRecognizer> model = createFisherFaceRecognizer(0, 200000000);
#elif LBPH
Ptr<FaceRecognizer> model =createLBPHFaceRecognizer(1,8,8,8,200000000);
#endif
model->train(images, labels);
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
// Get the frame rate
bool stop(false);
int count=1;
char filename[512];
for (int i=1;i<=517;i++){
sprintf(filename,"C:\\OIM\\original_frames2\\image%d.jpg",i);
Mat frame=imread(filename);
detectAndDisplay(frame,i,model);
waitKey(0);
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame ,int i, Ptr<FaceRecognizer> model)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
//face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 1, 0|CV_HAAR_SCALE_IMAGE, Size(10, 10) );
for( size_t i = 0; i < faces.size(); i++ )
{
Rect roi = Rect(faces[i].x,faces[i].y,faces[i].width,faces[i].height);
Mat face=frame_gray(roi);
resize(face,face,Size(200,200));
int predictedLabel = -1;
double confidence = 0.0;
model->predict(face, predictedLabel, confidence);
//imshow("gil",face);
//waitKey(0);
#if EIGEN
int M=10000;
#elif FISHER
int M=500;
#elif LBPH
int M=300;
#endif
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
if ((predictedLabel==1)&& (confidence<M))
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 0, 0, 255 ), 4, 8, 0 );
if ((predictedLabel==0)&& (confidence<M))
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 0), 4, 8, 0 );
if (confidence>M)
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 0, 255, 0), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
//circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
//imshow( window_name, frame );
char filename[512];
sprintf(filename,"C:\\OIM\\FaceRecognitionResults\\image%d.jpg",i);
imwrite(filename,frame);
}
Thanks in advance,
Gil.
First thing, as commented, increase the number of samples if possible. Also include the variations (like illumination, slight poses etc) you expect to be in the video. However, especially for eigenfaces/ fisherfaces so many images will not help to increase performance. Sadly, the best number of training samples can depend on your data.
The more important point is the hardness of the problem is totally depends on your video. If your video contains variations like illumination, pose; then you can't expect using purely appearance based methods(e.g Eigenfaces) and texture descriptor(LBP) will be succesful. First, you might want to detect faces. Then:
You might want to estimate face position and warp to frontal; check
for Active Appearance Model and Active Shape Model
Use histogram of equalization to attenuate illumination problem
Fitting an ellipse to detected face region will help against background noise.
Of course, there are many other methods available in literature; the steps I wrote is implemented in OpenCV and commonly known.
Hope it helps.

using a custom haar cascade classifier

I have created a haar cascade classifier for detecting a hand with 1000 positive images and 2000 negative images. The xml file was created using convert_cascade.c from opencv samples. Now I am using the following code for detection, but the assert statement is giving an error as shown below
"assertion failed= cascade && storage && capture, line 21", which is the assertion call itself. I know that assertion fails when the expression evaluates to zero. so, any idea what could be wrong with classifier, because storage and capture should be working fine anyways,
#include <stdio.h>
#include "opencv/cv.h"
#include "opencv/highgui.h"
CvHaarClassifierCascade *cascade;
CvMemStorage *storage;
void detect( IplImage *img );
int main( )
{
CvCapture *capture;
IplImage *frame;
int key;
char *filename = "haar3.xml"; // name of my classifier
cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );
storage = cvCreateMemStorage(0);
capture = cvCaptureFromCAM(0);
assert( cascade && storage && capture );
cvNamedWindow("video", 1);
while(1) {
frame = cvQueryFrame( capture );
detect(frame);
key = cvWaitKey(50);
}
cvReleaseImage(&frame);
cvReleaseCapture(&capture);
cvDestroyWindow("video");
cvReleaseHaarClassifierCascade(&cascade);
cvReleaseMemStorage(&storage);
return 0;
}
void detect(IplImage *img)
{
int i;
CvSeq *object = cvHaarDetectObjects(
img,
cascade,
storage,
1.5, //-------------------SCALE FACTOR
2,//------------------MIN NEIGHBOURS
1,//----------------------
// CV_HAAR_DO_CANNY_PRUNING,
cvSize( 24,24), // ------MINSIZE
cvSize(640,480) );//---------MAXSIZE
for( i = 0 ; i < ( object ? object->total : 0 ) ; i++ )
{
CvRect *r = ( CvRect* )cvGetSeqElem( object, i );
cvRectangle( img,
cvPoint( r->x, r->y ),
cvPoint( r->x + r->width, r->y + r->height ),
CV_RGB( 255, 0, 0 ), 2, 8, 0 );
//printf("%d,%d\nnumber =%d\n",r->x,r->y,object->total);
}
cvShowImage( "video", img );
}

opencv- vehicle tracking using optical flow

I have implemented optical flow to track vehicles on road and it turned out to be very slow.
my code uses the functions:
cvGoodFeaturesToTrack
cvFindCornerSubPix
cvCalcOpticalFlowPyrLK
How do I make this tracking fast and efficient?
My code is:
#include "highgui.h"
#include "cv.h"
#include "cxcore.h"
#include <iostream>
using namespace std;
const int MAX_CORNERS = 500;
int main()
{
CvCapture* capture=cvCreateFileCapture("E:\cam1.avi");
IplImage* img_A;// = cvLoadImage("image0.png", CV_LOAD_IMAGE_GRAYSCALE);
IplImage* img_B;// = cvLoadImage("image1.png", CV_LOAD_IMAGE_GRAYSCALE);
img_A=cvQueryFrame(capture);
IplImage* imgA = cvCreateImage( cvGetSize(img_A), 8, 1 );
IplImage* imgB = cvCreateImage( cvGetSize(img_A), 8, 1 );
cvNamedWindow( "ImageA", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "ImageB", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "LKpyr_OpticalFlow", CV_WINDOW_AUTOSIZE );
while(1)
{
int couter=0;
for(int k=0;k<20;k++)
{
img_B=cvQueryFrame(capture);
}
//cvCvtColor(imgA,imgA,CV_BGR2GRAY);
//cvCvtColor(imgB,imgB,CV_BGR2GRAY);
// Load two images and allocate other structures
/*IplImage* imgA = cvLoadImage("image0.png", CV_LOAD_IMAGE_GRAYSCALE);
IplImage* imgB = cvLoadImage("image1.png", CV_LOAD_IMAGE_GRAYSCALE);*/
CvSize img_sz = cvGetSize( img_A );
int win_size = 10;
IplImage* imgC = cvCreateImage( cvGetSize(img_A), 8, 1 );
cvZero(imgC);
// Get the features for tracking
IplImage* eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
IplImage* tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
int corner_count = MAX_CORNERS;
CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
cvCvtColor(img_A,imgA,CV_BGR2GRAY);
cvCvtColor(img_B,imgB,CV_BGR2GRAY);
cvGoodFeaturesToTrack( imgA, eig_image, tmp_image, cornersA, &corner_count ,0.05, 5.0, 0, 3, 0, 0.04 );
cvFindCornerSubPix( imgA, cornersA, corner_count, cvSize( win_size, win_size ) ,cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03 ) );
// Call Lucas Kanade algorithm
char features_found[ MAX_CORNERS ];
float feature_errors[ MAX_CORNERS ];
CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];
/*int jk=0;
for(int i=0;i<imgA->width;i+=10)
{
for(int j=0;j<imgA->height;j+=10)
{
cornersA[jk].x=i;
cornersA[jk].y=j;
++jk;
}
}
*/
cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count,
cvSize( win_size, win_size ), 5, features_found, feature_errors,
cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );
// Make an image of the results
for( int i=0; i < corner_count; i++ )
{
if( features_found[i]==0|| feature_errors[i]>550 )
{
//printf("Error is %f/n",feature_errors[i]);
continue;
}
//printf("Got it/n");
CvPoint p0 = cvPoint( cvRound( cornersA[i].x ), cvRound( cornersA[i].y ) );
CvPoint p1 = cvPoint( cvRound( cornersB[i].x ), cvRound( cornersB[i].y ) );
cvLine( imgC, p0, p1, CV_RGB(255,0,0), 2 );
cout<<p0.x<<" "<<p0.y<<endl;
}
cvShowImage( "LKpyr_OpticalFlow", imgC );
cvShowImage( "ImageA", imgA );
cvShowImage( "ImageB", imgB );
//cvCopyImage(imgB,imgA);
delete[] cornersA;
delete[] cornersB;
cvWaitKey(33);
}
return 0;
}
I might be going a bit over the line here but I would suggest you to check out OpenTLD. OpenTLD (aka Predator) is one of the most efficient tracking algorithm. Zdenek Kalal has implemented OpenTLD in MATLAB. George Nebehay has made a very efficient C++ OpenCV port of OpenTLD.
It's very easy to install and tracking is really efficient.
OpenTLD uses Median Flow Tracker to track and implements PN learning algorithm. In this YouTube Video, Zdenek Kalal shows the use of OpenTLD.
If you just want to implement a Median Flow Tracker, follow this link https://github.com/gnebehay/OpenTLD/tree/master/src/mftracker
If you want to use it in Python, I have made a Median Flow Tracker and also made a Python port of OpenTLD. But python port isn't much efficient.
First of all to track a car you have to somehow detect it (using color segmentation/background subtraction for example). When car is detected you have to track it (track some points on it) using cvCalcOpticalFlowPyrLK. I didn't find code that responces for car detection.
Take a look at this and this articles. Your idea should be the same.
Also your code is a bit wrong. For example why do you call cvGoodFeaturesToTrack in the main loop? You have to call it once - before loop to detect good features to track. But this will also detect non-cars.
Take a look at default OpenCV example: OpenCV/samples/cpp/lkdemo.cpp.

openCV usage cvFindDominantPoints

Does anyone know how to use the cvFindDominantPoints API of openCV? I basically have a 1 channel, binary image from which I get a set of contours. Judging from the image, I seem to be getting the correct contours. Now, I am selecting one of these contours to get dominant points of. This contour has about 60 vertices. However, the API call to cvFindDominantPoints is giving me a sequence of points (about 15) that does not even lie on the contour. It is quite far from it. Any insight?
my usage:
CvSeq *dominantpoints = cvFindDominantPoints(targetSeq, tristorage, CV_DOMINANT_IPAN, 7, 9, 9, 150);
#include "cv.h"
#include "highgui.h"
CvSeq* contours = 0;
CvSeq* dps = 0;
int main( int argc, char** argv )
{
int i, idx;
CvPoint p;
CvMemStorage* storage_ct = cvCreateMemStorage(0);
CvMemStorage* storage_dp = cvCreateMemStorage(0);
IplImage* img = cvLoadImage("contour.bmp", CV_LOAD_IMAGE_GRAYSCALE);
cvNamedWindow( "image" );
cvShowImage( "image", img );
cvFindContours( img, storage_ct, &contours, sizeof(CvContour),
CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE );
dps = cvFindDominantPoints( contours, storage_dp, CV_DOMINANT_IPAN, 7, 20, 9, 150 );
contours = cvApproxPoly( contours, sizeof(CvContour), storage_ct, CV_POLY_APPROX_DP, 3, 1 );
printf("found %d DPs and %d Contours \n", dps->total, contours->total );
for ( i = 0; i < dps->total; i++)
{
idx = *(int *) cvGetSeqElem(dps, i);
p = *(CvPoint *) cvGetSeqElem(contours, idx);
cvDrawCircle( img, p , 1, cvScalarAll(255) );
printf("%d %d %d\n", idx, p.x, p.y);
}
cvDrawContours(img, contours, cvScalarAll(100), cvScalarAll(200), 100 );
cvNamedWindow( "contours" );
cvShowImage( "contours", img );
cvWaitKey(0);
cvReleaseMemStorage( &storage_ct );
cvReleaseMemStorage( &storage_dp );
cvReleaseImage( &img );
return 0;
}

Resources