I want to calculate perimeter of a white blob in a 512*512 dimension binary image. Image will have only one blob. I used following code earlier in OpenCV 3 but somehow it doesn't work in OpenCV 4.2. IplImage
is deprecated in latest version. And I cannot pass Mat object directly to cvFindContours function. I am new to opencv and I don't know how does it work. Other related questions regarding perimeter are still unanswered.
To summaries, following works in opencv 3 but does not work in current opencv version (4.2).
int getPerimeter(unsigned char* inImagePtr, int inW, int inH)
{
int sumEven = 0; int sumOdd = 0;
int sumCorner = 0; int prevCode = 0;
//create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
//create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1; element.data[3] = 1;
element.data[4] = 1; element.data[5] = 1;
element.data[7] = 1;
//erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
//Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
//multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
//Get chain code of the blob
CvChain* chain = 0;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
auto temp = new IplImage(edge);
cvFindContours(temp, storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_EXTERNAL, CV_CHAIN_CODE);
delete temp;
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
for (i = 0; i < total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
if (code % 2 == 0)
sumEven++;
else
sumOdd++;
if (i > 0) {
if (code != prevCode)
sumCorner++;
}
prevCode = code;
}
}
float perimeter = (float)sumEven*0.980 + (float)sumOdd*1.406 - (float)sumCorner*0.091;
return (roundf(perimeter));
}
This worked just fine for me!
int getPerimeter(unsigned char* inImagePtr, int inW, int inH) {
// create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
// create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1;
element.data[3] = 1;
element.data[4] = 1;
element.data[5] = 1;
element.data[7] = 1;
// erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
// Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
// multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
vector<vector<Point>> contours;
findContours(edge, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); // Retrieve only external contour
int preValue[2];
int nextValue[2];
int sumEven = 0;
int sumOdd = 0;
//vector<Point>::iterator itr;
for (int ii = 0; ii < contours[0].size(); ii++) {
Point pt = contours[0].at(ii);
preValue[0] = pt.x;
preValue[1] = pt.y;
if (ii != contours[0].size() - 1) {
Point pt_next = contours[0].at(ii + 1);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
} else {
Point pt_next = contours[0].at(0);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
}
if ((preValue[0] == nextValue[0]) or (preValue[1] == nextValue[1])) {
sumEven = sumEven + abs(nextValue[0] - preValue[0]) + abs(nextValue[1] - preValue[1]);
} else {
sumOdd = sumOdd + abs(nextValue[0] - preValue[0]);
}
}
int sumCorner = contours[0].size() - 1;
float perimeter = round(sumEven * 0.980 + sumOdd * 1.406 - sumCorner * 0.091);
return (roundf(perimeter));
}
I want to apply on OpenCV a K Means to a region of an image not squared or a rectangle. For example the source image is:
now I select a custom mask:
and apply K Means with K = 3:
Obviously without considering the bounds (white).
Instead, what I can do with OpenCV is K Means but considering the bounds:
And that messes out my final image because black is considered one colour.
Do you have any clue?
Thank you in advance.
Quick and dirty solution.
vector<Vec3b> points;
vector<Point> locations;
for( int y = 0; y < src.rows; y++) {
for( int x = 0; x < src.cols; x++) {
if ( (int)mask.at<unsigned char>(y,x) != 0 ) {
points.push_back(src.at<Vec3b>(y,x));
locations.push_back(Point(x,y));
}
}
}
Mat kmeanPoints(points.size(), 3, CV_32F);
for( int y = 0; y < points.size(); y++ ) {
for( int z = 0; z < 3; z++) {
kmeanPoints.at<float>(y, z) = points[y][z];
}
}
Mat labels;
Mat centers;
kmeans(kmeanPoints, 4, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.1), 10, cv::KMEANS_PP_CENTERS, centers);
Mat final = Mat::zeros( src.size(), src.type() );
Vec3b tempColor;
for(int i = 0; i<locations.size(); i++) {
int cluster_idx = labels.at<int>(i,0);
tempColor[0] = centers.at<float>(cluster_idx, 0);
tempColor[1] = centers.at<float>(cluster_idx, 1);
tempColor[2] = centers.at<float>(cluster_idx, 2);
final.at<Vec3b>(locations[i]) = tempColor;
}
Assuming that you have an input RGB image called img(here) and a one-channel mask called mask(here), here is the snippet to prepare your k-means computation :
int nbClasses = 3; // or whatever you want
cv::TermCriteria myCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 10, 1.0);
cv::Mat labels, centers, result;
img.convertTo(data, CV_32F);
// reshape into 3 columns (one per channel, in BGR order) and as many rows as the total number of pixels in img
data = data.reshape(1, data.total());
If you want to apply a normal k-means (without mask) :
// apply k-means
cv::kmeans(data, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape both to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value
cv::Vec3f *p = data.ptr<cv::Vec3f>();
for (size_t i = 0; i < data.rows; i++)
{
int center_id = labels.at<int>(i);
p[i] = centers.at<cv::Vec3f>(center_id);
}
// back to 2D image
data = data.reshape(3, img.rows);
// optional conversion to uchar
data.convertTo(result, CV_8U);
The result is here.
But, if you want instead to apply a masked k-means :
int nbWhitePixels = cv::countNonZero(mask);
cv::Mat dataMasked = cv::Mat(nbWhitePixels, 3, CV_32F, cv::Scalar(0));
cv::Mat maskFlatten = mask.reshape(1, mask.total());
// filter data by the mask
int idx = 0;
for (int k = 0; k < mask.total(); k++)
{
int val = maskFlatten.at<uchar>(k, 0);
if (val != 0)
{
float val0 = data.at<float>(k, 0);
float val1 = data.at<float>(k, 1);
float val2 = data.at<float>(k, 2);
dataMasked.at<float>(idx,0) = val0;
dataMasked.at<float>(idx,1) = val1;
dataMasked.at<float>(idx,2) = val2;
idx++;
}
}
// apply k-means
cv::kmeans(dataMasked, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
dataMasked = dataMasked.reshape(3, dataMasked.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value, only for pixels in mask
cv::Vec3f *p = data.ptr<cv::Vec3f>();
idx = 0;
for (size_t i = 0; i < data.rows; i++)
{
if (maskFlatten.at<uchar>(i, 0) != 0)
{
int center_id = labels.at<int>(idx);
p[i] = centers.at<cv::Vec3f>(center_id);
idx++;
}
//else
// p[i] = cv::Vec3f(0, 0, 0);
}
// back to 2d, and uchar
data = data.reshape(3, img.rows);
data.convertTo(result, CV_8U);
You will have now this result.
If you let commented the else part, you will keep initial pixels outside the mask, whereas if you uncomment it, you will convert them into black pixels, like here.
Good day. I'm new to OpenCV and right now, I'm trying to do fingertip detection using colour tracking and background subtraction methods. I got the colour tracking part working but I have no idea on how to subtract the background and leave only the fingertips.
Here is my code.
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
IplImage* GetThresholdedImage(IplImage* img, CvScalar& lowerBound, CvScalar& upperBound)
{
// Convert the image into an HSV image
IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
cvCvtColor(img, imgHSV, CV_BGR2HSV);
IplImage* imgThreshed = cvCreateImage(cvGetSize(img), 8, 1);
cvInRangeS(imgHSV, lowerBound, upperBound, imgThreshed);
cvReleaseImage(&imgHSV);
return imgThreshed;
}
int main()
{
int lineThickness = 2;
CvScalar lowerBound = cvScalar(20, 100, 100);
CvScalar upperBound = cvScalar(30, 255, 255);
int b,g,r;
lowerBound = cvScalar(0,58,89);
upperBound = cvScalar(25,173,229);
CvCapture* capture = 0;
capture = cvCaptureFromCAM(1);
if(!capture)
{
printf("Could not initialize capturing...\n");
return -1;
}
cvNamedWindow("video");
cvNamedWindow("thresh");
// This image holds the "scribble" data...
// the tracked positions of the object
IplImage* imgScribble = NULL;
while(true)
{
IplImage* frame = 0;
frame = cvQueryFrame(capture);
if(!frame)
break;
// If this is the first frame, we need to initialize it
if(imgScribble == NULL)
{
imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
}
// Holds the thresholded image (tracked color -> white, the rest -> black)
IplImage* imgThresh = GetThresholdedImage(frame,lowerBound,upperBound);
// Calculate the moments to estimate the position of the object
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgThresh, moments, 1);
// The actual moment values
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// Holding the last and current positions
static int posX = 0;
static int posY = 0;
int lastX = posX;
int lastY = posY;
posX = moment10/area;
posY = moment01/area;
cout << "position = " << posX << " " << posY << endl;
// We want to draw a line only if its a valid position
if(lastX>0 && lastY>0 && posX>0 && posY>0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), upperBound, lineThickness);
}
// Add the scribbling image and the frame...
cvAdd(frame, imgScribble, frame);
cvShowImage("thresh", imgThresh);
cvShowImage("video", frame);
int c = cvWaitKey(10);
if(c==27) //ESC key
{
break;
}
cvReleaseImage(&imgThresh);
delete moments;
}
cvReleaseCapture(&capture);
return 0;
}
I don t know if I understand you right but I think you should need to add the following:
cvErode(imgThreshed, imgThreshed, NULL, 1);
cvDilate(imgThreshed, imgThreshed, NULL, 1);
in GetThresholdedImage and get less noise ! but after all I think it would be better for you to use the cv::Mat object of opencv ;)
Try BGS library, I used it before and like it. You can get it here: http://code.google.com/p/bgslibrary/
hey i tried to made a histogram that shows frames substraction, the code is running but i got gray window without result.
the message on the command window is:
Compiler did not align stack variables. Libavcodec has been miscompiled
and may be very slow or crash. This is not a bug in libavcodec,
but in the compiler. You may try recompiling using gcc >= 4.2.
Do not report crashes to FFmpeg developers.
OpenCV Error: Assertion failed (images[j].channels() == 1) in unknown function,
file ........\ocv\opencv\src\cv\cvhistogram.cpp, line 137
here is the code someone have an idea?thanks for help.....
int main()
{
int key = 0;
CvCapture* capture = cvCaptureFromAVI( "macroblock.mpg" );
IplImage* frame = cvQueryFrame( capture );
IplImage* currframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
IplImage* destframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
IplImage* imgHistogram = 0;
CvHistogram* hist;
if ( !capture )
{
fprintf( stderr, "Cannot open AVI!\n" );
return 1;
}
int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
cvNamedWindow( "dest", CV_WINDOW_AUTOSIZE );
cvNamedWindow( "imgHistogram", CV_WINDOW_AUTOSIZE );
while( key != 'x' )
{
frame = cvQueryFrame( capture );
currframe = cvCloneImage( frame );
frame = cvQueryFrame( capture );
cvSub(frame,currframe,destframe);
int bins = 256;
int hsize[] = {bins};
float max_value = 0, min_value = 0;
float value;
int normalized;
float xranges[] = {0, 256};
float* ranges[] = {xranges};
IplImage* planes[] = {destframe};
hist = cvCreateHist(1, hsize, CV_HIST_ARRAY, ranges,1);
cvCalcHist(planes, hist, 0, NULL);
cvGetMinMaxHistValue(hist, &min_value, &max_value);
// printf("Minimum Histogram Value: %f, Maximum Histogram Value: %f\n", min_value, max_value);
imgHistogram = cvCreateImage(cvSize(bins, 50),IPL_DEPTH_8U,3);
cvRectangle(imgHistogram, cvPoint(0,0), cvPoint(256,50), CV_RGB(255,255,255),-1);
for(int i=0; i < bins; i++){
value = cvQueryHistValue_1D(hist, i);
normalized = cvRound(value*50/max_value);
cvLine(imgHistogram,cvPoint(i,50), cvPoint(i,50-normalized), CV_RGB(0,0,0));
}
if(key==27 )break;
cvShowImage( "dest",destframe);
cvShowImage( "imgHistogram",imgHistogram);
key = cvWaitKey( 1000 / 10 );
}
cvDestroyWindow( "dest" );
cvReleaseCapture( &capture );
return 0;
}
Since you are trying to show a 1D histogram, the histogram plane needs to be in grayscale. So, you need to convert the resulting image from cvSub() to grayscale first. Try
IplImage *gray = NULL;
gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
while(key != 'x') {
...
cvSub(frame, currframe, destframe);
cvCvtColor(destframe, gray, CV_BGR2GRAY);
...
IplImage* planes[] = {gray};
..
}
Let me know if it works for you.
I am trying to implement online face recognition using the webcam. I am using this two websites as references
shervinemami.co.cc
cognotics.com
I have few questions:
In face recognition, there are 6 steps:
Grab a frame from the camera
Detect a face within the image
Crop the frame to show just the face
Convert the frame to greyscale
Preprocess the image
Recognize the person in the image.
I am able to do the first five steps. Last step i am not able to do. I am not sure how to link step 5 to step 6.
I have already created the train.txt file and test.txt file which contains the information of the training and testing images. I have already added the functions such as learn(), doPCA() to the code...
But the point is how to use these functions in the main to recognize the image that is already preprocessed.
Need some help on it...
Attached the code below:
// Real-time.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <cvaux.h>
IplImage ** faceImgArr = 0; // array of face images
CvMat * personNumTruthMat = 0; // array of person numbers
int nTrainFaces = 0; // the number of training images
int nEigens = 0; // the number of eigenvalues
IplImage * pAvgTrainImg = 0; // the average image
IplImage ** eigenVectArr = 0; // eigenvectors
CvMat * eigenValMat = 0; // eigenvalues
CvMat * projectedTrainFaceMat = 0; // projected training faces
IplImage* getCameraFrame(CvCapture* &camera);
IplImage* detectFaces( IplImage *img ,CvHaarClassifierCascade* facecascade,CvMemStorage* storage );
CvRect detectFaceInImage(IplImage *inputImg, CvHaarClassifierCascade* cascade);
IplImage* preprocess( IplImage* inputImg);
IplImage* resizeImage(const IplImage *origImg, int newWidth,
int newHeight, bool keepAspectRatio);
void learn();
void recognize();
void doPCA();
void storeTrainingData();
int loadTrainingData(CvMat ** pTrainPersonNumMat);
int findNearestNeighbor(float * projectedTestFace);
int loadFaceImgArray(char * filename);
int _tmain(int argc, _TCHAR* argv[])
{
CvCapture* camera = 0; // The camera device.
CvMemStorage *storage;
cvNamedWindow( "Realtime:", CV_WINDOW_AUTOSIZE);
char *faceCascadeFilename = "C:/OpenCV2.1/data/haarcascades/haarcascade_frontalface_alt.xml";
CvHaarClassifierCascade* faceCascade;
faceCascade = (CvHaarClassifierCascade*)cvLoad(faceCascadeFilename, 0, 0, 0);
storage = cvCreateMemStorage( 0 );
learn();
while ( cvWaitKey(10) != 27 ) // Quit on "Escape" key
{
IplImage *frame = getCameraFrame(camera);
//IplImage* resized=cvCreateImage(cvSize(420,240),frame->depth,3);
//cvResizeWindow( "Image:", 640, 480);
//cvResize(frame,resized);
//cvShowImage( "Realtime:", resized );
IplImage *imgA = resizeImage(frame, 420,240, true);
IplImage *frame1 = detectFaces(imgA,faceCascade,storage);
frame1 = preprocess(frame1);
}
// Free the camera.
cvReleaseCapture( &camera );
cvReleaseMemStorage( &storage );
return 0;
}
IplImage* getCameraFrame(CvCapture* &camera)
{
IplImage *frame;
int w, h;
// If the camera hasn't been initialized, then open it.
if (!camera) {
printf("Acessing the camera ...\n");
camera = cvCreateCameraCapture( 0 );
if (!camera) {
printf("Couldn't access the camera.\n");
exit(1);
}
// Try to set the camera resolution to 320 x 240.
cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_HEIGHT, 240);
// Get the first frame, to make sure the camera is initialized.
frame = cvQueryFrame( camera );
if (frame) {
w = frame->width;
h = frame->height;
printf("Got the camera at %dx%d resolution.\n", w, h);
}
// Wait a little, so that the camera can auto-adjust its brightness.
Sleep(1000); // (in milliseconds)
}
// Wait until the next camera frame is ready, then grab it.
frame = cvQueryFrame( camera );
if (!frame) {
printf("Couldn't grab a camera frame.\n");
exit(1);
}
return frame;
}
CvRect detectFaceInImage(IplImage *inputImg, CvHaarClassifierCascade* cascade)
{
// Smallest face size.
CvSize minFeatureSize = cvSize(20, 20);
// Only search for 1 face.
int flags = CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH;
// How detailed should the search be.
float search_scale_factor = 1.1f;
IplImage *detectImg;
IplImage *greyImg = 0;
CvMemStorage* storage;
CvRect rc;
double t;
CvSeq* rects;
CvSize size;
int i, ms, nFaces;
storage = cvCreateMemStorage(0);
cvClearMemStorage( storage );
// If the image is color, use a greyscale copy of the image.
detectImg = (IplImage*)inputImg;
if (inputImg->nChannels > 1) {
size = cvSize(inputImg->width, inputImg->height);
greyImg = cvCreateImage(size, IPL_DEPTH_8U, 1 );
cvCvtColor( inputImg, greyImg, CV_BGR2GRAY );
detectImg = greyImg; // Use the greyscale image.
}
// Detect all the faces in the greyscale image.
t = (double)cvGetTickCount();
rects = cvHaarDetectObjects( detectImg, cascade, storage,
search_scale_factor, 3, flags, minFeatureSize);
t = (double)cvGetTickCount() - t;
ms = cvRound( t / ((double)cvGetTickFrequency() * 1000.0) );
nFaces = rects->total;
printf("Face Detection took %d ms and found %d objects\n", ms, nFaces);
// Get the first detected face (the biggest).
if (nFaces > 0)
rc = *(CvRect*)cvGetSeqElem( rects, 0 );
else
rc = cvRect(-1,-1,-1,-1); // Couldn't find the face.
if (greyImg)
cvReleaseImage( &greyImg );
cvReleaseMemStorage( &storage );
//cvReleaseHaarClassifierCascade( &cascade );
return rc; // Return the biggest face found, or (-1,-1,-1,-1).
}
IplImage* detectFaces( IplImage *img ,CvHaarClassifierCascade* facecascade,CvMemStorage* storage )
{
int i;
CvRect *r;
CvSeq *faces = cvHaarDetectObjects(
img,
facecascade,
storage,
1.1,
3,
0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
cvSize( 40, 40 ) );
int padding_width = 30; // pixels
int padding_height = 30; // pixels
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
r = ( CvRect* )cvGetSeqElem( faces, i );
cvRectangle( img,
cvPoint( r->x, r->y ),
cvPoint( r->x + r->width, r->y + r->height ),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
cvShowImage( "Realtime:", img );
//cropping the face
cvSetImageROI(img, cvRect(r->x,r->y,r->width,r->height));
IplImage *img2 = cvCreateImage(cvGetSize(img),
img->depth,
img->nChannels);
cvCopy(img, img2, NULL);
cvResetImageROI(img);
return img;
}
IplImage* preprocess( IplImage* inputImg){
IplImage *detectImg, *greyImg = 0;
IplImage *imageProcessed;
CvSize size;
detectImg = (IplImage*)inputImg;
if (inputImg->nChannels > 1) {
size = cvSize(inputImg->width, inputImg->height);
greyImg = cvCreateImage(size, IPL_DEPTH_8U, 1 );
cvCvtColor( inputImg, greyImg, CV_BGR2GRAY );
detectImg = greyImg; // Use the greyscale image.
}
imageProcessed = cvCreateImage(cvSize(inputImg->width, inputImg->height), IPL_DEPTH_8U, 1);
cvResize(detectImg, imageProcessed, CV_INTER_LINEAR);
cvEqualizeHist(imageProcessed, imageProcessed);
return imageProcessed;
}
IplImage* resizeImage(const IplImage *origImg, int newWidth,
int newHeight, bool keepAspectRatio)
{
IplImage *outImg = 0;
int origWidth;
int origHeight;
if (origImg) {
origWidth = origImg->width;
origHeight = origImg->height;
}
if (newWidth <= 0 || newHeight <= 0 || origImg == 0
|| origWidth <= 0 || origHeight <= 0) {
//cerr << "ERROR: Bad desired image size of " << newWidth
// << "x" << newHeight << " in resizeImage().\n";
exit(1);
}
if (keepAspectRatio) {
// Resize the image without changing its aspect ratio,
// by cropping off the edges and enlarging the middle section.
CvRect r;
// input aspect ratio
float origAspect = (origWidth / (float)origHeight);
// output aspect ratio
float newAspect = (newWidth / (float)newHeight);
// crop width to be origHeight * newAspect
if (origAspect > newAspect) {
int tw = (origHeight * newWidth) / newHeight;
r = cvRect((origWidth - tw)/2, 0, tw, origHeight);
}
else { // crop height to be origWidth / newAspect
int th = (origWidth * newHeight) / newWidth;
r = cvRect(0, (origHeight - th)/2, origWidth, th);
}
IplImage *croppedImg = cropImage(origImg, r);
// Call this function again, with the new aspect ratio image.
// Will do a scaled image resize with the correct aspect ratio.
outImg = resizeImage(croppedImg, newWidth, newHeight, false);
cvReleaseImage( &croppedImg );
}
else {
// Scale the image to the new dimensions,
// even if the aspect ratio will be changed.
outImg = cvCreateImage(cvSize(newWidth, newHeight),
origImg->depth, origImg->nChannels);
if (newWidth > origImg->width && newHeight > origImg->height) {
// Make the image larger
cvResetImageROI((IplImage*)origImg);
// CV_INTER_LINEAR: good at enlarging.
// CV_INTER_CUBIC: good at enlarging.
cvResize(origImg, outImg, CV_INTER_LINEAR);
}
else {
// Make the image smaller
cvResetImageROI((IplImage*)origImg);
// CV_INTER_AREA: good at shrinking (decimation) only.
cvResize(origImg, outImg, CV_INTER_AREA);
}
}
return outImg;
}
void learn()
{
int i, offset;
// load training data
nTrainFaces = loadFaceImgArray("C:/Users/HP/Desktop/OpenCV/50_images_of_15_people.txt");
if( nTrainFaces < 2 )
{
fprintf(stderr,
"Need 2 or more training faces\n"
"Input file contains only %d\n", nTrainFaces);
return;
}
// do PCA on the training faces
doPCA();
// project the training images onto the PCA subspace
projectedTrainFaceMat = cvCreateMat( nTrainFaces, nEigens, CV_32FC1 );
offset = projectedTrainFaceMat->step / sizeof(float);
for(i=0; i<nTrainFaces; i++)
{
//int offset = i * nEigens;
cvEigenDecomposite(
faceImgArr[i],
nEigens,
eigenVectArr,
0, 0,
pAvgTrainImg,
//projectedTrainFaceMat->data.fl + i*nEigens);
projectedTrainFaceMat->data.fl + i*offset);
}
// store the recognition data as an xml file
storeTrainingData();
}
void recognize()
{
int i, nTestFaces = 0; // the number of test images
CvMat * trainPersonNumMat = 0; // the person numbers during training
float * projectedTestFace = 0;
// load test images and ground truth for person number
nTestFaces = loadFaceImgArray("C:/Users/HP/Desktop/OpenCV/test.txt");
printf("%d test faces loaded\n", nTestFaces);
// load the saved training data
if( !loadTrainingData( &trainPersonNumMat ) ) return;
// project the test images onto the PCA subspace
projectedTestFace = (float *)cvAlloc( nEigens*sizeof(float) );
for(i=0; i<nTestFaces; i++)
{
int iNearest, nearest, truth;
// project the test image onto the PCA subspace
cvEigenDecomposite(
faceImgArr[i],
nEigens,
eigenVectArr,
0, 0,
pAvgTrainImg,
projectedTestFace);
iNearest = findNearestNeighbor(projectedTestFace);
truth = personNumTruthMat->data.i[i];
nearest = trainPersonNumMat->data.i[iNearest];
printf("nearest = %d, Truth = %d\n", nearest, truth);
}
}
int loadTrainingData(CvMat ** pTrainPersonNumMat)
{
CvFileStorage * fileStorage;
int i;
// create a file-storage interface
fileStorage = cvOpenFileStorage( "facedata.xml", 0, CV_STORAGE_READ );
if( !fileStorage )
{
fprintf(stderr, "Can't open facedata.xml\n");
return 0;
}
nEigens = cvReadIntByName(fileStorage, 0, "nEigens", 0);
nTrainFaces = cvReadIntByName(fileStorage, 0, "nTrainFaces", 0);
*pTrainPersonNumMat = (CvMat *)cvReadByName(fileStorage, 0, "trainPersonNumMat", 0);
eigenValMat = (CvMat *)cvReadByName(fileStorage, 0, "eigenValMat", 0);
projectedTrainFaceMat = (CvMat *)cvReadByName(fileStorage, 0, "projectedTrainFaceMat", 0);
pAvgTrainImg = (IplImage *)cvReadByName(fileStorage, 0, "avgTrainImg", 0);
eigenVectArr = (IplImage **)cvAlloc(nTrainFaces*sizeof(IplImage *));
for(i=0; i<nEigens; i++)
{
char varname[200];
sprintf( varname, "eigenVect_%d", i );
eigenVectArr[i] = (IplImage *)cvReadByName(fileStorage, 0, varname, 0);
}
// release the file-storage interface
cvReleaseFileStorage( &fileStorage );
return 1;
}
void storeTrainingData()
{
CvFileStorage * fileStorage;
int i;
// create a file-storage interface
fileStorage = cvOpenFileStorage( "facedata.xml", 0, CV_STORAGE_WRITE );
// store all the data
cvWriteInt( fileStorage, "nEigens", nEigens );
cvWriteInt( fileStorage, "nTrainFaces", nTrainFaces );
cvWrite(fileStorage, "trainPersonNumMat", personNumTruthMat, cvAttrList(0,0));
cvWrite(fileStorage, "eigenValMat", eigenValMat, cvAttrList(0,0));
cvWrite(fileStorage, "projectedTrainFaceMat", projectedTrainFaceMat, cvAttrList(0,0));
cvWrite(fileStorage, "avgTrainImg", pAvgTrainImg, cvAttrList(0,0));
for(i=0; i<nEigens; i++)
{
char varname[200];
sprintf( varname, "eigenVect_%d", i );
cvWrite(fileStorage, varname, eigenVectArr[i], cvAttrList(0,0));
}
// release the file-storage interface
cvReleaseFileStorage( &fileStorage );
}
int findNearestNeighbor(float * projectedTestFace)
{
//double leastDistSq = 1e12;
double leastDistSq = DBL_MAX;
int i, iTrain, iNearest = 0;
for(iTrain=0; iTrain<nTrainFaces; iTrain++)
{
double distSq=0;
for(i=0; i<nEigens; i++)
{
float d_i =
projectedTestFace[i] -
projectedTrainFaceMat->data.fl[iTrain*nEigens + i];
//distSq += d_i*d_i / eigenValMat->data.fl[i]; // Mahalanobis
distSq += d_i*d_i; // Euclidean
}
if(distSq < leastDistSq)
{
leastDistSq = distSq;
iNearest = iTrain;
}
}
return iNearest;
}
void doPCA()
{
int i;
CvTermCriteria calcLimit;
CvSize faceImgSize;
// set the number of eigenvalues to use
nEigens = nTrainFaces-1;
// allocate the eigenvector images
faceImgSize.width = faceImgArr[0]->width;
faceImgSize.height = faceImgArr[0]->height;
eigenVectArr = (IplImage**)cvAlloc(sizeof(IplImage*) * nEigens);
for(i=0; i<nEigens; i++)
eigenVectArr[i] = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1);
// allocate the eigenvalue array
eigenValMat = cvCreateMat( 1, nEigens, CV_32FC1 );
// allocate the averaged image
pAvgTrainImg = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1);
// set the PCA termination criterion
calcLimit = cvTermCriteria( CV_TERMCRIT_ITER, nEigens, 1);
// compute average image, eigenvalues, and eigenvectors
cvCalcEigenObjects(
nTrainFaces,
(void*)faceImgArr,
(void*)eigenVectArr,
CV_EIGOBJ_NO_CALLBACK,
0,
0,
&calcLimit,
pAvgTrainImg,
eigenValMat->data.fl);
cvNormalize(eigenValMat, eigenValMat, 1, 0, CV_L1, 0);
}
int loadFaceImgArray(char * filename)
{
FILE * imgListFile = 0;
char imgFilename[512];
int iFace, nFaces=0;
// open the input file
if( !(imgListFile = fopen(filename, "r")) )
{
fprintf(stderr, "Can\'t open file %s\n", filename);
return 0;
}
// count the number of faces
while( fgets(imgFilename, 512, imgListFile) ) ++nFaces;
rewind(imgListFile);
// allocate the face-image array and person number matrix
faceImgArr = (IplImage **)cvAlloc( nFaces*sizeof(IplImage *) );
personNumTruthMat = cvCreateMat( 1, nFaces, CV_32SC1 );
// store the face images in an array
for(iFace=0; iFace<nFaces; iFace++)
{
// read person number and name of image file
fscanf(imgListFile,
"%d %s", personNumTruthMat->data.i+iFace, imgFilename);
// load the face image
faceImgArr[iFace] = cvLoadImage(imgFilename, CV_LOAD_IMAGE_GRAYSCALE);
if( !faceImgArr[iFace] )
{
fprintf(stderr, "Can\'t load image from %s\n", imgFilename);
return 0;
}
}
fclose(imgListFile);
return nFaces;
}
My answer may came late but it might be useful for pals if i answer it.I am working on a similar project and i have faced the same problem.I solved it by writing a function the saves or write the detected,cropped and preprocessed image on to the hard disk of my computer(Using CvWrite).And feeding the parameter of the saved images to the recognition part of the code. It has made my life easier.It has been a bit harder for me to to pass the parameters of the rect of the region of interest. If you or someone else did this it might be great sharing the code with us.
You can use the following code to save the image after resizing it to a constant value using the resizeimage function on you code.
void saveCroppedFaces(CvSeq* tempon,IplImage* DetectedImage)
{
char* name;
int nFaces;
CvRect rect;
nFaces=tempon->total;
name =new char[nFaces];
IplImage* cropped = 0;
IplImage* croppedResized=0;
Mat croped;
for(int k=0;k<nFaces;k++)
{
itoa(k,(name+k),10);
rect = *(CvRect*)cvGetSeqElem( tempon, k );
cropped= cropImage(DetectedImage,rect);
//i can resize the cropped faces in to a fixed size here
//i can write a function to save images and call it so
//that it will save it in to hard drive
//cvNamedWindow((name+k),CV_WINDOW_AUTOSIZE);
//cvShowImage((name+k),cropped);
croppedResized=resizeImage(cropped,60,60);
croped=IplToMatConverter(croppedResized);
saveROI(croped,itoa(k,(name+k),10));
cvReleaseImage(&cropped);
}
name=NULL;
delete[] name;
}
void saveROI(Mat mat,String outputFileName)
{
string store_path("C://Users/sizusuzu/Desktop/Images/FaceDetection2
/"+outputFileName+".jpg");
bool write_success = imwrite(store_path,mat);
}
After this you can change the IplImage* to Mat using
Mat IplToMatConverter(IplImage* imageToMat)
{
Mat mat = cvarrToMat(imageToMat);
return mat;
}
And use the Mat in FaceRecognizer API.Or just do the other/harder way.
Thanks
I just read
int _tmain(int argc, _TCHAR* argv[])
{
.......
}
part of your code. This code is used for detecting the face in the image. Lets say it is Face_x. Now extract features from Face_x, call it as F_x. In your database, you should store features {F_1, F_2,..., F_N} extracted from n different faces {Face_1, Face_2,..Face_N}.
Simple algorithm to recognize Face_x is to calculate Euclidean distances between F_x and n features. The minimum distance (below threshold) gives corresponding face. If the minimum distance is not below threshold then Face_x is a new face. Add feature F_x to database. This way you can increase your database. You can begin your algorithm with no features in database. With each new face, database grows.
I hope the method suggested by me will lead you to the solution