How can I detect human body outline using OpenCV? - ios

I am developing a small demo with openCV which can detect the body with real time camera.
Currently My app detect the face successfully. So, My question is, How I can detect the human body like face detection.
Here is my Code (which is called at every frame):-
///1. Convert input UIImage to Mat
std::vector<cv::Rect> faces;
CGImageRef image = CGImageCreateCopy(source.CGImage);
CGFloat cols = CGImageGetWidth(image);
CGFloat rows = CGImageGetHeight(image);
cv::Mat frame(rows, cols, CV_8UC4);
CGBitmapInfo bitmapFlags = kCGImageAlphaNoneSkipLast | kCGBitmapByteOrderDefault;
size_t bitsPerComponent = 8;
size_t bytesPerRow = frame.step[0];
CGColorSpaceRef colorSpace = CGImageGetColorSpace(image);
CGContextRef context = CGBitmapContextCreate(frame.data, cols, rows, bitsPerComponent, bytesPerRow, colorSpace, bitmapFlags);
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, cols, rows), image);
CGContextRelease(context);
cv::Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
///2. detection
NSString *eyes_cascade_name = [[NSBundle mainBundle] pathForResource:#"haarcascade_eye" ofType:#"xml"];
NSString *face_cascade_name = [[NSBundle mainBundle] pathForResource:#"haarcascade_frontalface_default" ofType:#"xml"];
if(!cascade_loaded){
std::cout<<"loading ..";
if( !eyes_cascade.load( std::string([eyes_cascade_name UTF8String]) ) ){ printf("--(!)Error loading\n"); return source;};
if( !face_cascade.load( std::string([face_cascade_name UTF8String]) ) ){ printf("--(!)Error loading\n"); return source;};
cascade_loaded = true;
}
face_cascade.detectMultiScale(frame_gray, faces, 1.3, 5, CV_HAAR_SCALE_IMAGE);
for( size_t i = 0; i < faces.size(); i++ )
{
cv::Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar( 0, 100, 255 ), 4, 8, 0 );
cv::Mat faceROI = frame_gray( faces[i] );
std::vector<cv::Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
cv::Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, cv::Scalar( 5, 255, 0 ), 2, 8, 0 );
}
}

Related

How to make an area black, outside of a cgrect on a UIImage?

I am using OpenCV to detect face in an image, however my question is not related to opencv, i think its related to the CoreGraphics only.
Here is my code in a UIImageView (category) for detecting face and drawing a rectangle over it.
- (void)detectFeature:(NSString *)feature
{
NSUInteger scale;
IplImage * image;
IplImage * smallImage;
NSString * xmlPath;
CvHaarClassifierCascade * cascade;
CvMemStorage * storage;
CvSeq * faces;
UIAlertView * alert;
CGImageRef imageRef;
CGColorSpaceRef colorSpaceRef;
CGContextRef context;
CvRect rect;
CGRect faceRect;
scale = 2;
cvSetErrMode( CV_ErrModeParent );
xmlPath = [ [ NSBundle mainBundle ] pathForResource: feature ofType: #"xml" ];
if (xmlPath == nil) {
NSLog(#"we don't have an xml file for this feature. this feature is can not be detacted");
}
else {
[self scaleAndRotateImage:self.image];
image = [ self createIplImage: self.image ];
smallImage = cvCreateImage( cvSize( image->width / scale, image->height / scale ), IPL_DEPTH_8U, 3 );
cvPyrDown( image, smallImage, CV_GAUSSIAN_5x5 );
cascade = ( CvHaarClassifierCascade * )cvLoad( [ xmlPath cStringUsingEncoding: NSASCIIStringEncoding ], NULL, NULL, NULL );
storage = cvCreateMemStorage( 0 );
faces = cvHaarDetectObjects( smallImage, cascade, storage, ( float )1.2, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize( 20, 20 ) );
cvReleaseImage( &smallImage );
imageRef = self.image.CGImage;
colorSpaceRef = CGColorSpaceCreateDeviceRGB();
context = CGBitmapContextCreate
(
NULL,
self.image.size.width,
self.image.size.height,
8,
self.image.size.width * 4,
colorSpaceRef,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrderDefault
);
CGContextDrawImage
(
context,
CGRectMake( 0, 0, self.image.size.width, self.image.size.height ),
imageRef
);
CGContextSetLineWidth( context, 1 );
CGContextSetRGBStrokeColor( context, ( CGFloat )0, ( CGFloat )0, ( CGFloat )0, ( CGFloat )0.5 );
CGContextSetRGBFillColor( context, ( CGFloat )1, ( CGFloat )1, ( CGFloat )1, ( CGFloat )0.5 );
if( faces->total == 0 )
{
alert = [ [ UIAlertView alloc ] initWithTitle: #"No Feature" message: #"No features were detected in the picture. Please try with another one." delegate: NULL cancelButtonTitle: #"OK" otherButtonTitles: nil ];
[ alert show ];
}
else
{
for( int i = 0; i < faces->total; i++ )
{
rect = *( CvRect * )cvGetSeqElem( faces, i );
faceRect = CGContextConvertRectToDeviceSpace( context, CGRectMake( rect.x * scale, rect.y * scale, rect.width * scale, rect.height * scale ) );
CGContextFillRect( context, faceRect );
CGContextStrokeRect( context, faceRect );
}
self.image = [ UIImage imageWithCGImage: CGBitmapContextCreateImage( context ) ];
}
CGContextRelease( context );
CGColorSpaceRelease( colorSpaceRef );
cvReleaseMemStorage( &storage );
cvReleaseHaarClassifierCascade( &cascade );
cvReleaseImage( &smallImage );
}
}
What this code does is, it draws a rectangle over my UIImage. and leave the rest of the image as it is. I would like to change all the area of UIImage to black except the detected rectangle.
As of right now, i have managed to crop the image. But i don't want to do that. the size of the image should be same, i just want to black out the rest of the area of the image.
I have broken my ans in steps.Hope it helps.
After detecting all faces, draw the filled rect, instead of path as new image. There after draw a black image, of original images size.
Thenceforth create a new context , draw black image, then drew the path image(image with rect path) with kCGBlendModeDestinationOut blend mode.Save the result as result1.
Again create a new context , draw original image, then draw the path image(image with rect path) with kCGBlendModeDestinationIn blend mode.Save the result as result2.
Take a new context draw both images result1 and result2 kCGBlendModeNormal.
Hope it helps.
Happy Coding.. !!

OpenCV - Creating Bounding boxes and circles for contours - Xcode [Objective-C++]

a similar question like mine was already posted, but there is still some part which doesn't fit to my current problem. I use Xcode
and Objective C++ to work with openCV.
What I do want to do is to create some bounding boxes and circles around human eyes in photos. I have used the following code but this does not work:
-(void)detectEye {
cv::Mat src_gray;
int thresh = 100;
RNG rng(12345);
NSString *path = #"/Users/NazarMedeiros/Desktop/image.jpg";
cv::Mat src = cv::imread("/Users/NazarMedeiros/Downloads/face.jpg");
if (src.empty())
return;
cv::cvtColor(src, src_gray, CV_BGR2GRAY );
cv::blur(src_gray, src_gray, cv::Size(3,3) );
cv::Mat threshold_output;
std::vector<std::vector<cv::Point> > contours;
std::vector<Vec4i> hierarchy;
/// Detect edges using Threshold
cv::threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY );
/// Find contours
cv::findContours(threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
/// Approximate contours to polygons + get bounding rects and circles
std::vector<std::vector<cv::Point> > contours_poly( contours.size() );
std::vector<cv::Rect> boundRect( contours.size() );
std::vector<Point2f>center( contours.size() );
std::vector<float>radius( contours.size() );
for(int i = 0; i < contours.size(); i++ ) {
cv::approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = cv::boundingRect(cv::Mat(contours_poly[i]) );
cv::minEnclosingCircle( (cv::Mat)contours_poly[i], center[i], radius[i] );
}
/// Draw polygonal contour + bonding rects + circles
Mat drawing = Mat::zeros( threshold_output.size(), CV_8UC3 );
for(int i = 0; i < contours.size(); i++ ) {
cv::Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
cv::drawContours( drawing, contours_poly, i, color, 1, 8, std::vector<Vec4i>(), 0, cv::Point() );
cv::circle( drawing, center[i], (int)radius[i], color, 2, 8, 0 );
}
imwrite([path UTF8String], src);}
}
The imwrite-function does not save my image..
Can anyone help me, please?
Best regards,
Nazar Medeiros
Try convert "drawing" to UIImage
- (UIImage *)UIImageFromCVMat:(cv::Mat)cvMat
{
NSData *data = [NSData dataWithBytes:cvMat.data length:cvMat.elemSize()*cvMat.total()];
CGColorSpaceRef colorSpace;
if (cvMat.elemSize() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
}
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
CGImageRef imageRef = CGImageCreate(cvMat.cols, //width
cvMat.rows, //height
8, //bits per component
8 * cvMat.elemSize(), //bits per pixel
cvMat.step[0], //bytesPerRow
colorSpace, //colorspace
kCGImageAlphaNone|kCGBitmapByteOrderDefault,// bitmap info
provider, //CGDataProviderRef
NULL, //decode
false, //should interpolate
kCGRenderingIntentDefault //intent
);
// Getting UIImage from CGImage
UIImage *finalImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpace);
return finalImage;
}

Matching template imge(scaled) to Main/larger image

I want to find/check subImage/template image in main image and want to know its coordinates,
I have used code given at following link to implement it,
Check presence of subimage in image in iOS
It is working fine, if the size of the template image is exactly same as size of the matching part of larger image.
But it is not giving result properly if the subimage is scaled down or scaled up than matching part of larger image.
Use OpenCV Feature Detection. it is more accurate than template matching..
Please try with this code..
-(void)featureDetection:(UIImage*)largerImage withImage:(UIImage*)subImage
{
cv::Mat tempMat1 = [largerImage CVMat];
cv::Mat tempMat2 = [subImage CVMat];
cv::cvtColor(tempMat1, tempMat1, CV_RGB2GRAY);
cv::cvtColor(tempMat2, tempMat2, CV_RGB2GRAY);
if( !tempMat1.data || !tempMat2.data ) {
return;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 25;
cv::SurfFeatureDetector detector( minHessian ); // More Accurate bt take more time..
//cv::FastFeatureDetector detector( minHessian ); //Less Accurate bt take less time..
std::vector<cv::KeyPoint> keypoints_1, keypoints_2;
detector.detect( tempMat1, keypoints_1 );
detector.detect( tempMat2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
cv::SurfDescriptorExtractor extractor;
cv::Mat descriptors_1, descriptors_2;
extractor.compute( tempMat1, keypoints_1, descriptors_1 );
extractor.compute( tempMat2, keypoints_2, descriptors_2 );
std::vector<cv::Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = (cvPoint(0,0));
obj_corners[1] = (cvPoint(tempMat2.cols,0));
obj_corners[2] = (cvPoint(tempMat2.cols,tempMat2.rows));
obj_corners[3] = (cvPoint(0, tempMat2.rows));
//-- Step 3: Matching descriptor vectors with a brute force matcher
//cv::BruteForceMatcher < cv::L2<float> > matcher;
cv::FlannBasedMatcher matcher;
//std::vector< cv::DMatch > matches;
std::vector<cv::vector<cv::DMatch > > matches;
std::vector<cv::DMatch > good_matches;
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
std::vector<cv::Point2f> scene_corners(4);
cv::Mat H;
matcher.knnMatch( descriptors_2, descriptors_1, matches,2);
for(int i = 0; i < cv::min(tempMat1.rows-1,(int) matches.size()); i++) {
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0)) {
good_matches.push_back(matches[i][0]);
}
}
cv::Mat img_matches;
drawMatches( tempMat2, keypoints_2, tempMat1, keypoints_1, good_matches, img_matches );
NSLog(#"good matches %lu",good_matches.size());
if (good_matches.size() >= 4) {
for( int i = 0; i < good_matches.size(); i++ ) {
//Get the keypoints from the good matches
obj.push_back( keypoints_2[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_1[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
NSLog(#"%f %f",scene_corners[0].x,scene_corners[0].y);
NSLog(#"%f %f",scene_corners[1].x,scene_corners[1].y);
NSLog(#"%f %f",scene_corners[2].x,scene_corners[2].y);
NSLog(#"%f %f",scene_corners[3].x,scene_corners[3].y);
//Draw lines between the corners (the mapped object in the scene image )
line( tempMat1, scene_corners[0], scene_corners[1], cvScalar(0, 255, 0), 4 );
line( tempMat1, scene_corners[1], scene_corners[2], cvScalar( 0, 255, 0), 4 );
line( tempMat1, scene_corners[2], scene_corners[3], cvScalar( 0, 255, 0), 4 );
line( tempMat1, scene_corners[3], scene_corners[0], cvScalar( 0, 255, 0), 4 );
}
// View matching..
UIImage *resultimage = [UIImage imageWithCVMat:img_matches];
UIImageView *imageview = [[UIImageView alloc] initWithImage:resultimage];
imageview.frame = CGRectMake(0, 0, 320, 240);
[self.view addSubview:imageview];
// View Result
UIImage *resultimage2 = [UIImage imageWithCVMat:tempMat1];
UIImageView *imageview2 = [[UIImageView alloc] initWithImage:resultimage2];
imageview2.frame = CGRectMake(0, 240, 320, 240);
[self.view addSubview:imageview2];
}

Understaing V&J (Haar.cpp) sliding window

I'm going over the code of haar.cpp to understand the sliding window approach. Here is the code:
for( factor = 1; ; factor *= scaleFactor )
{
CvSize winSize = { cvRound(winSize0.width*factor),
cvRound(winSize0.height*factor) };
CvSize sz = { cvRound( img->cols/factor ), cvRound( img->rows/factor ) };
CvSize sz1 = { sz.width - winSize0.width + 1, sz.height - winSize0.height + 1 };
CvRect equRect = { icv_object_win_border, icv_object_win_border,
winSize0.width - icv_object_win_border*2,
winSize0.height - icv_object_win_border*2 };
CvMat img1, sum1, sqsum1, norm1, tilted1, mask1;
CvMat* _tilted = 0;
if( sz1.width <= 0 || sz1.height <= 0 )
break;
if( winSize.width > maxSize.width || winSize.height > maxSize.height )
break;
if( winSize.width < minSize.width || winSize.height < minSize.height )
continue;
img1 = cvMat( sz.height, sz.width, CV_8UC1, imgSmall->data.ptr );
sum1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, sum->data.ptr );
sqsum1 = cvMat( sz.height+1, sz.width+1, CV_64FC1, sqsum->data.ptr );
if( tilted )
{
tilted1 = cvMat( sz.height+1, sz.width+1, CV_32SC1, tilted->data.ptr );
_tilted = &tilted1;
}
norm1 = cvMat( sz1.height, sz1.width, CV_32FC1, normImg ? normImg->data.ptr : 0 );
mask1 = cvMat( sz1.height, sz1.width, CV_8UC1, temp->data.ptr );
cvResize( img, &img1, CV_INTER_LINEAR );
cvIntegral( &img1, &sum1, &sqsum1, _tilted );
int ystep = factor > 2 ? 1 : 2;
const int LOCS_PER_THREAD = 1000;
int stripCount = ((sz1.width/ystep)*(sz1.height + ystep-1)/ystep + LOCS_PER_THREAD/2)/LOCS_PER_THREAD;
stripCount = std::min(std::max(stripCount, 1), 100);
#ifdef HAVE_IPP
if( use_ipp )
{
cv::Mat fsum(sum1.rows, sum1.cols, CV_32F, sum1.data.ptr, sum1.step);
cv::Mat(&sum1).convertTo(fsum, CV_32F, 1, -(1<<24));
}
else
#endif
cvSetImagesForHaarClassifierCascade( cascade, &sum1, &sqsum1, _tilted, 1. );
cv::Mat _norm1(&norm1), _mask1(&mask1);
cv::parallel_for_(cv::Range(0, stripCount),
cv::HaarDetectObjects_ScaleImage_Invoker(cascade,
(((sz1.height + stripCount - 1)/stripCount + ystep-1)/ystep)*ystep,
factor, cv::Mat(&sum1), cv::Mat(&sqsum1), &_norm1, &_mask1,
cv::Rect(equRect), allCandidates, rejectLevels, levelWeights, outputRejectLevels, &mtx));
}
}
Now, I want to make sure I got everything right. As I understand, we loop over the scales and in each scale we subsample the image and try to find objects at a fixed size (20X20 for faces), going over all the x and y locations.
The pseudo- code is:
for scale=1:ScaleMax
for X=1:width
for Y=1:height
Try do detect a face at position (x,y) and of a fixedsize of 20X20.
Is that precise or did I get something wrong?
Thanks,
Gil.
While the understanding is accurate, it is not precise.
For better precision, you should read the original paper from Viola and Jones, since all the magic is in the step "Try do detect a face at position (x,y) and of a fixedsize of 20X20"

How to increase haar detector's window size in OpenCV

I am using the code available in this website: http://nashruddin.com/OpenCV_Face_Detection to do face detection.
I would like to increase the size of the detected face region. I am not sure how to do it. Need some help on it..
The code i am using is this:
//
#include "stdafx.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
CvHaarClassifierCascade *cascade;
CvMemStorage *storage;
void detectFaces( IplImage *img );
int main( int argc, char** argv )
{
CvCapture *capture;
IplImage *frame;
int key;
char *filename = "C:/OpenCV2.1/data/haarcascades/haarcascade_frontalface_alt.xml";
cascade = ( CvHaarClassifierCascade* )cvLoad( filename, 0, 0, 0 );
storage = cvCreateMemStorage( 0 );
capture = cvCaptureFromCAM( 0 );
assert( cascade && storage && capture );
cvNamedWindow( "video", 1 );
while( key != 'q' ) {
frame = cvQueryFrame( capture );
if( !frame ) {
fprintf( stderr, "Cannot query frame!\n" );
break;
}
cvFlip( frame, frame, -1 );
frame->origin = 0;
detectFaces( frame );
key = cvWaitKey( 10 );
}
cvReleaseCapture( &capture );
cvDestroyWindow( "video" );
cvReleaseHaarClassifierCascade( &cascade );
cvReleaseMemStorage( &storage );
return 0;
}
void detectFaces( IplImage *img )
{
int i;
CvSeq *faces = cvHaarDetectObjects(
img,
cascade,
storage,
1.1,
3,
0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
cvSize( 40, 40 ) );
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
cvRectangle( img,
cvPoint( r->x, r->y ),
cvPoint( r->x + r->width, r->y + r->height ),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
cvShowImage( "video", img );
}
This increases the size of the rectangle around the face. If you meant increasing the haar detector's window size, please update your question.
int padding_width = 30; // pixels
int padding_height = 30; // pixels
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
CvRect *r = ( CvRect* )cvGetSeqElem( faces, i );
// Yes yes, all of this could be written much more compactly.
// It was written like this for clarity.
int topleft_x = r->x - (padding_width / 2);
int topleft_y = r->y - (padding_height / 2);
if (topleft_x < 0)
topleft_x = 0;
if (topleft_y < 0)
topleft_y = 0;
int bottomright_x = r->x + r->width + (padding_width / 2);
int bottomright_y = r->y + r->height + (padding_height / 2);
if (bottomright_x >= img->width)
bottomright_x = img->width - 1;
if (bottomright_y >= img->height)
bottomright_y = img->height - 1;
cvRectangle( img,
cvPoint(topleft_x, topleft_y),
cvPoint(bottomright_x, bottomright_y),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}

Resources