Calculate white areas pixels in contours using opencv and Javacv - opencv

I have develop a program to detect motions using JavaCV. up to now i have completed cvFindContours of the processed image. source code is given below,
public class MotionDetect {
public static void main(String args[]) throws Exception, InterruptedException {
//FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new File("D:/pool.avi"));
OpenCVFrameGrabber grabber = new OpenCVFrameGrabber("D:/2.avi");
final CanvasFrame canvas = new CanvasFrame("My Image");
final CanvasFrame canvas2 = new CanvasFrame("ROI");
canvas.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE);
grabber.start();
IplImage frame = grabber.grab();
CvSize imgsize = cvGetSize(frame);
IplImage grayImage = cvCreateImage(imgsize, IPL_DEPTH_8U, 1);
IplImage ROIFrame = cvCreateImage(cvSize((265 - 72), (214 - 148)), IPL_DEPTH_8U, 1);
IplImage colorImage;
IplImage movingAvg = cvCreateImage(imgsize, IPL_DEPTH_32F, 3);
IplImage difference = null;
IplImage temp = null;
IplImage motionHistory = cvCreateImage(imgsize, IPL_DEPTH_8U, 3);
CvRect bndRect = cvRect(0, 0, 0, 0);
CvPoint pt1 = new CvPoint(), pt2 = new CvPoint();
CvFont font = null;
//Capture the movie frame by frame.
int prevX = 0;
int numPeople = 0;
char[] wow = new char[65];
int avgX = 0;
//Indicates whether this is the first time in the loop of frames.
boolean first = true;
//Indicates the contour which was closest to the left boundary before the object
//entered the region between the buildings.
int closestToLeft = 0;
//Same as above, but for the right.
int closestToRight = 320;
while (true) {
colorImage = grabber.grab();
if (colorImage != null) {
if (first) {
difference = cvCloneImage(colorImage);
temp = cvCloneImage(colorImage);
cvConvertScale(colorImage, movingAvg, 1.0, 0.0);
first = false;
//cvShowImage("My Window1", difference);
} //else, make a running average of the motion.
else {
cvRunningAvg(colorImage, movingAvg, 0.020, null);
}
//Convert the scale of the moving average.
cvConvertScale(movingAvg, temp, 1.0, 0.0);
//Minus the current frame from the moving average.
cvAbsDiff(colorImage, temp, difference);
//Convert the image to grayscale.
cvCvtColor(difference, grayImage, CV_RGB2GRAY);
//canvas.showImage(grayImage);
//Convert the image to black and white.
cvThreshold(grayImage, grayImage, 70, 255, CV_THRESH_BINARY);
//Dilate and erode to get people blobs
cvDilate(grayImage, grayImage, null, 18);
cvErode(grayImage, grayImage, null, 10);
canvas.showImage(grayImage);
ROIFrame = cvCloneImage(grayImage);
cvSetImageROI(ROIFrame, cvRect(72, 148, (265 - 72), (214 - 148)));
//cvOr(outFrame, tempFrame, outFrame);
cvShowImage("ROI Frame", ROIFrame);
cvRectangle(colorImage, /* the dest image */
cvPoint(72, 148), /* top left point */
cvPoint(265, 214), /* bottom right point */
cvScalar(255, 0, 0, 0), /* the color; blue */
1, 8, 0);
CvMemStorage storage = cvCreateMemStorage(0);
CvSeq contour = new CvSeq(null);
cvFindContours(grayImage, storage, contour, Loader.sizeof(CvContour.class), CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
}
//Show the frame.
cvShowImage("My Window", colorImage);
//Wait for the user to see it.
cvWaitKey(10);
}
//If this is the first time, initialize the images.
//Thread.sleep(50);
}
}
}
In this code ROIFrame, i need to calculate white contours area or pixel numbers??.. is there any way that i can proceed with

Use the function cvContourArea() Documentation here.
In your code, after your cvFindContours, do a loop with all of your contours like as:
CvSeq* curr_contour = contour;
while (curr_contour != NULL) {
area = fabs(cvContourArea(curr_contour,CV_WHOLE_SEQ, 0));
current_contour = current_contour->h_next;
}
Don't forget to store the area somewhere.

Related

Detetcting intersection between 2 line in webcam feed opencv

i'm trying to detect the intersection between two line in webcam feed. Here's the screenshot of what i already have
I try to find the intersection between my red and green line.
And here's the code of what i already have
#include "stdafx.h"
#include <cv.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <highgui.h>
IplImage* imgTracking;
int lastX = -1;
int lastY = -1;
//This function threshold the HSV image and create a binary image
IplImage* GetThresholdedImage(IplImage* imgHSV){
IplImage* imgThresh=cvCreateImage(cvGetSize(imgHSV),IPL_DEPTH_8U, 1);
cvInRangeS(imgHSV, cvScalar(170,160,60), cvScalar(180,2556,256), imgThresh);
return imgThresh;
}
void trackObject(IplImage* imgThresh){
// Calculate the moments of 'imgThresh'
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgThresh, moments, 1);
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// if the area<1000, I consider that the there are no object in the image and it's because of the noise, the area is not zero
if(area>1000){
// calculate the position of the ball
int posX = moment10/area;
int posY = moment01/area;
if(lastX>=0 && lastY>=0 && posX>=0 && posY>=0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,0,255), 4);
}
lastX = posX;
lastY = posY;
}
cvLine(imgTracking,cv::Point(100,300) , cv::Point(600,300),cv::Scalar(0,200,0),2,8);
free(moments);
}
bool intersection(cv::Point lastX, cv::Point lastY, cv::Point , cv::Point())
{
}
/*
void imaginaryline()
{
cv::Mat img = cv::Mat::zeros(500, 500, CV_8UC3);
cv::line(img, cv::Point(100,200) , cv::Point(400,100),cv::Scalar(0,200,0),2,8);
}*/
int main(){
CvCapture* capture =0;
capture = cvCaptureFromCAM(0);
if(!capture){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
frame = cvQueryFrame(capture);
if(!frame) return -1;
//create a blank image and assigned to 'imgTracking' which has the same size of original video
imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);
cvZero(imgTracking); //covert the image, 'imgTracking' to black
cvNamedWindow("Video");
cvNamedWindow("Ball");
//iterate through each frames of the video
while(true){
frame = cvQueryFrame(capture);
if(!frame) break;
frame=cvCloneImage(frame);
cvSmooth(frame, frame, CV_GAUSSIAN,3,3); //smooth the original image using Gaussian kernel
IplImage* imgHSV = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 3);
cvCvtColor(frame, imgHSV, CV_BGR2HSV); //Change the color format from BGR to HSV
IplImage* imgThresh = GetThresholdedImage(imgHSV);
cvSmooth(imgThresh, imgThresh, CV_GAUSSIAN,3,3); //smooth the binary image using Gaussian kernel
//track the possition of the ball
trackObject(imgThresh);
printf("Pos X = %d", lastX);
printf("Pos Y = %d", lastY);
// Add the tracking image and the frame
cvAdd(frame, imgTracking, frame);
cvShowImage("Ball", imgThresh);
cvShowImage("Video", frame);
//Clean up used images
cvReleaseImage(&imgHSV);
cvReleaseImage(&imgThresh);
cvReleaseImage(&frame);
//Wait 10mS
int c = cvWaitKey(10);
//If 'ESC' is pressed, break the loop
if((char)c==27 ) break;
}
cvDestroyAllWindows() ;
cvReleaseImage(&imgTracking);
cvReleaseCapture(&capture);
return 0;
}
Thank you for your attention guys, i waited for any of your response
UPDATE :
Thanks to Sebastian Schmitz, i sollved it. Here's my code
void checkIntersection(int line, int lastY, int y)
{
if(lastY << line && y >= line || lastY > line && y <= line)
{
printf("intersection detected");
}
}
void trackObject(IplImage* imgThresh){
// Calculate the moments of 'imgThresh'
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgThresh, moments, 1);
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// if the area<1000, I consider that the there are no object in the image and it's because of the noise, the area is not zero
if(area>1000){
// calculate the position of the ball
int posX = moment10/area;
int posY = moment01/area;
if(lastX>=0 && lastY>=0 && posX>=0 && posY>=0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX, lastY), cvScalar(0,0,255), 4);
}
checkIntersection(300, lastY, posY);
lastX = posX;
lastY = posY;
}
cvLine(imgTracking,cv::Point(100,300) , cv::Point(600,300),cv::Scalar(0,200,0),2,8);
cvRectangle(imgTracking,cv::Point(400,400), cv::Point(450,450),cv::Scalar(0,200,0),2,8);
free(moments);
}
i put the call for checkintersection function inside trackobject function so i dont have to change variable posY into global which lead to many error.
Thank you all for your response
If the line is always perfectly horiontal, it will be enough to test if the y coordinate of your last point is on one side of the line and your current Point on the other:
//Pseudocode:
int line = 20; //horizontal line on y-coordinate "20"
while(tracking == true){
int lastY = posY;
int y = getCoordinate().getY(); //call the y-coordinate of your current point
checkIntersection(line, lastY, y)
}
checkIntersection(int line, int lastY, int y){
if(lastY < line && y >= line ||
lastY > line && y <= line ){
print("intersection detected")
//optional additional check if point is between endpoint of line if you have to
}
}

OpenCV Fingertip detection

Good day. I'm new to OpenCV and right now, I'm trying to do fingertip detection using colour tracking and background subtraction methods. I got the colour tracking part working but I have no idea on how to subtract the background and leave only the fingertips.
Here is my code.
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
IplImage* GetThresholdedImage(IplImage* img, CvScalar& lowerBound, CvScalar& upperBound)
{
// Convert the image into an HSV image
IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
cvCvtColor(img, imgHSV, CV_BGR2HSV);
IplImage* imgThreshed = cvCreateImage(cvGetSize(img), 8, 1);
cvInRangeS(imgHSV, lowerBound, upperBound, imgThreshed);
cvReleaseImage(&imgHSV);
return imgThreshed;
}
int main()
{
int lineThickness = 2;
CvScalar lowerBound = cvScalar(20, 100, 100);
CvScalar upperBound = cvScalar(30, 255, 255);
int b,g,r;
lowerBound = cvScalar(0,58,89);
upperBound = cvScalar(25,173,229);
CvCapture* capture = 0;
capture = cvCaptureFromCAM(1);
if(!capture)
{
printf("Could not initialize capturing...\n");
return -1;
}
cvNamedWindow("video");
cvNamedWindow("thresh");
// This image holds the "scribble" data...
// the tracked positions of the object
IplImage* imgScribble = NULL;
while(true)
{
IplImage* frame = 0;
frame = cvQueryFrame(capture);
if(!frame)
break;
// If this is the first frame, we need to initialize it
if(imgScribble == NULL)
{
imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
}
// Holds the thresholded image (tracked color -> white, the rest -> black)
IplImage* imgThresh = GetThresholdedImage(frame,lowerBound,upperBound);
// Calculate the moments to estimate the position of the object
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgThresh, moments, 1);
// The actual moment values
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// Holding the last and current positions
static int posX = 0;
static int posY = 0;
int lastX = posX;
int lastY = posY;
posX = moment10/area;
posY = moment01/area;
cout << "position = " << posX << " " << posY << endl;
// We want to draw a line only if its a valid position
if(lastX>0 && lastY>0 && posX>0 && posY>0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), upperBound, lineThickness);
}
// Add the scribbling image and the frame...
cvAdd(frame, imgScribble, frame);
cvShowImage("thresh", imgThresh);
cvShowImage("video", frame);
int c = cvWaitKey(10);
if(c==27) //ESC key
{
break;
}
cvReleaseImage(&imgThresh);
delete moments;
}
cvReleaseCapture(&capture);
return 0;
}
I don t know if I understand you right but I think you should need to add the following:
cvErode(imgThreshed, imgThreshed, NULL, 1);
cvDilate(imgThreshed, imgThreshed, NULL, 1);
in GetThresholdedImage and get less noise ! but after all I think it would be better for you to use the cv::Mat object of opencv ;)
Try BGS library, I used it before and like it. You can get it here: http://code.google.com/p/bgslibrary/

Online Face Recognition using OpenCV

I am trying to implement online face recognition using the webcam. I am using this two websites as references
shervinemami.co.cc
cognotics.com
I have few questions:
In face recognition, there are 6 steps:
Grab a frame from the camera
Detect a face within the image
Crop the frame to show just the face
Convert the frame to greyscale
Preprocess the image
Recognize the person in the image.
I am able to do the first five steps. Last step i am not able to do. I am not sure how to link step 5 to step 6.
I have already created the train.txt file and test.txt file which contains the information of the training and testing images. I have already added the functions such as learn(), doPCA() to the code...
But the point is how to use these functions in the main to recognize the image that is already preprocessed.
Need some help on it...
Attached the code below:
// Real-time.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include <cv.h>
#include <cxcore.h>
#include <highgui.h>
#include <cvaux.h>
IplImage ** faceImgArr = 0; // array of face images
CvMat * personNumTruthMat = 0; // array of person numbers
int nTrainFaces = 0; // the number of training images
int nEigens = 0; // the number of eigenvalues
IplImage * pAvgTrainImg = 0; // the average image
IplImage ** eigenVectArr = 0; // eigenvectors
CvMat * eigenValMat = 0; // eigenvalues
CvMat * projectedTrainFaceMat = 0; // projected training faces
IplImage* getCameraFrame(CvCapture* &camera);
IplImage* detectFaces( IplImage *img ,CvHaarClassifierCascade* facecascade,CvMemStorage* storage );
CvRect detectFaceInImage(IplImage *inputImg, CvHaarClassifierCascade* cascade);
IplImage* preprocess( IplImage* inputImg);
IplImage* resizeImage(const IplImage *origImg, int newWidth,
int newHeight, bool keepAspectRatio);
void learn();
void recognize();
void doPCA();
void storeTrainingData();
int loadTrainingData(CvMat ** pTrainPersonNumMat);
int findNearestNeighbor(float * projectedTestFace);
int loadFaceImgArray(char * filename);
int _tmain(int argc, _TCHAR* argv[])
{
CvCapture* camera = 0; // The camera device.
CvMemStorage *storage;
cvNamedWindow( "Realtime:", CV_WINDOW_AUTOSIZE);
char *faceCascadeFilename = "C:/OpenCV2.1/data/haarcascades/haarcascade_frontalface_alt.xml";
CvHaarClassifierCascade* faceCascade;
faceCascade = (CvHaarClassifierCascade*)cvLoad(faceCascadeFilename, 0, 0, 0);
storage = cvCreateMemStorage( 0 );
learn();
while ( cvWaitKey(10) != 27 ) // Quit on "Escape" key
{
IplImage *frame = getCameraFrame(camera);
//IplImage* resized=cvCreateImage(cvSize(420,240),frame->depth,3);
//cvResizeWindow( "Image:", 640, 480);
//cvResize(frame,resized);
//cvShowImage( "Realtime:", resized );
IplImage *imgA = resizeImage(frame, 420,240, true);
IplImage *frame1 = detectFaces(imgA,faceCascade,storage);
frame1 = preprocess(frame1);
}
// Free the camera.
cvReleaseCapture( &camera );
cvReleaseMemStorage( &storage );
return 0;
}
IplImage* getCameraFrame(CvCapture* &camera)
{
IplImage *frame;
int w, h;
// If the camera hasn't been initialized, then open it.
if (!camera) {
printf("Acessing the camera ...\n");
camera = cvCreateCameraCapture( 0 );
if (!camera) {
printf("Couldn't access the camera.\n");
exit(1);
}
// Try to set the camera resolution to 320 x 240.
cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty(camera, CV_CAP_PROP_FRAME_HEIGHT, 240);
// Get the first frame, to make sure the camera is initialized.
frame = cvQueryFrame( camera );
if (frame) {
w = frame->width;
h = frame->height;
printf("Got the camera at %dx%d resolution.\n", w, h);
}
// Wait a little, so that the camera can auto-adjust its brightness.
Sleep(1000); // (in milliseconds)
}
// Wait until the next camera frame is ready, then grab it.
frame = cvQueryFrame( camera );
if (!frame) {
printf("Couldn't grab a camera frame.\n");
exit(1);
}
return frame;
}
CvRect detectFaceInImage(IplImage *inputImg, CvHaarClassifierCascade* cascade)
{
// Smallest face size.
CvSize minFeatureSize = cvSize(20, 20);
// Only search for 1 face.
int flags = CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH;
// How detailed should the search be.
float search_scale_factor = 1.1f;
IplImage *detectImg;
IplImage *greyImg = 0;
CvMemStorage* storage;
CvRect rc;
double t;
CvSeq* rects;
CvSize size;
int i, ms, nFaces;
storage = cvCreateMemStorage(0);
cvClearMemStorage( storage );
// If the image is color, use a greyscale copy of the image.
detectImg = (IplImage*)inputImg;
if (inputImg->nChannels > 1) {
size = cvSize(inputImg->width, inputImg->height);
greyImg = cvCreateImage(size, IPL_DEPTH_8U, 1 );
cvCvtColor( inputImg, greyImg, CV_BGR2GRAY );
detectImg = greyImg; // Use the greyscale image.
}
// Detect all the faces in the greyscale image.
t = (double)cvGetTickCount();
rects = cvHaarDetectObjects( detectImg, cascade, storage,
search_scale_factor, 3, flags, minFeatureSize);
t = (double)cvGetTickCount() - t;
ms = cvRound( t / ((double)cvGetTickFrequency() * 1000.0) );
nFaces = rects->total;
printf("Face Detection took %d ms and found %d objects\n", ms, nFaces);
// Get the first detected face (the biggest).
if (nFaces > 0)
rc = *(CvRect*)cvGetSeqElem( rects, 0 );
else
rc = cvRect(-1,-1,-1,-1); // Couldn't find the face.
if (greyImg)
cvReleaseImage( &greyImg );
cvReleaseMemStorage( &storage );
//cvReleaseHaarClassifierCascade( &cascade );
return rc; // Return the biggest face found, or (-1,-1,-1,-1).
}
IplImage* detectFaces( IplImage *img ,CvHaarClassifierCascade* facecascade,CvMemStorage* storage )
{
int i;
CvRect *r;
CvSeq *faces = cvHaarDetectObjects(
img,
facecascade,
storage,
1.1,
3,
0 /*CV_HAAR_DO_CANNY_PRUNNING*/,
cvSize( 40, 40 ) );
int padding_width = 30; // pixels
int padding_height = 30; // pixels
for( i = 0 ; i < ( faces ? faces->total : 0 ) ; i++ ) {
r = ( CvRect* )cvGetSeqElem( faces, i );
cvRectangle( img,
cvPoint( r->x, r->y ),
cvPoint( r->x + r->width, r->y + r->height ),
CV_RGB( 255, 0, 0 ), 1, 8, 0 );
}
cvShowImage( "Realtime:", img );
//cropping the face
cvSetImageROI(img, cvRect(r->x,r->y,r->width,r->height));
IplImage *img2 = cvCreateImage(cvGetSize(img),
img->depth,
img->nChannels);
cvCopy(img, img2, NULL);
cvResetImageROI(img);
return img;
}
IplImage* preprocess( IplImage* inputImg){
IplImage *detectImg, *greyImg = 0;
IplImage *imageProcessed;
CvSize size;
detectImg = (IplImage*)inputImg;
if (inputImg->nChannels > 1) {
size = cvSize(inputImg->width, inputImg->height);
greyImg = cvCreateImage(size, IPL_DEPTH_8U, 1 );
cvCvtColor( inputImg, greyImg, CV_BGR2GRAY );
detectImg = greyImg; // Use the greyscale image.
}
imageProcessed = cvCreateImage(cvSize(inputImg->width, inputImg->height), IPL_DEPTH_8U, 1);
cvResize(detectImg, imageProcessed, CV_INTER_LINEAR);
cvEqualizeHist(imageProcessed, imageProcessed);
return imageProcessed;
}
IplImage* resizeImage(const IplImage *origImg, int newWidth,
int newHeight, bool keepAspectRatio)
{
IplImage *outImg = 0;
int origWidth;
int origHeight;
if (origImg) {
origWidth = origImg->width;
origHeight = origImg->height;
}
if (newWidth <= 0 || newHeight <= 0 || origImg == 0
|| origWidth <= 0 || origHeight <= 0) {
//cerr << "ERROR: Bad desired image size of " << newWidth
// << "x" << newHeight << " in resizeImage().\n";
exit(1);
}
if (keepAspectRatio) {
// Resize the image without changing its aspect ratio,
// by cropping off the edges and enlarging the middle section.
CvRect r;
// input aspect ratio
float origAspect = (origWidth / (float)origHeight);
// output aspect ratio
float newAspect = (newWidth / (float)newHeight);
// crop width to be origHeight * newAspect
if (origAspect > newAspect) {
int tw = (origHeight * newWidth) / newHeight;
r = cvRect((origWidth - tw)/2, 0, tw, origHeight);
}
else { // crop height to be origWidth / newAspect
int th = (origWidth * newHeight) / newWidth;
r = cvRect(0, (origHeight - th)/2, origWidth, th);
}
IplImage *croppedImg = cropImage(origImg, r);
// Call this function again, with the new aspect ratio image.
// Will do a scaled image resize with the correct aspect ratio.
outImg = resizeImage(croppedImg, newWidth, newHeight, false);
cvReleaseImage( &croppedImg );
}
else {
// Scale the image to the new dimensions,
// even if the aspect ratio will be changed.
outImg = cvCreateImage(cvSize(newWidth, newHeight),
origImg->depth, origImg->nChannels);
if (newWidth > origImg->width && newHeight > origImg->height) {
// Make the image larger
cvResetImageROI((IplImage*)origImg);
// CV_INTER_LINEAR: good at enlarging.
// CV_INTER_CUBIC: good at enlarging.
cvResize(origImg, outImg, CV_INTER_LINEAR);
}
else {
// Make the image smaller
cvResetImageROI((IplImage*)origImg);
// CV_INTER_AREA: good at shrinking (decimation) only.
cvResize(origImg, outImg, CV_INTER_AREA);
}
}
return outImg;
}
void learn()
{
int i, offset;
// load training data
nTrainFaces = loadFaceImgArray("C:/Users/HP/Desktop/OpenCV/50_images_of_15_people.txt");
if( nTrainFaces < 2 )
{
fprintf(stderr,
"Need 2 or more training faces\n"
"Input file contains only %d\n", nTrainFaces);
return;
}
// do PCA on the training faces
doPCA();
// project the training images onto the PCA subspace
projectedTrainFaceMat = cvCreateMat( nTrainFaces, nEigens, CV_32FC1 );
offset = projectedTrainFaceMat->step / sizeof(float);
for(i=0; i<nTrainFaces; i++)
{
//int offset = i * nEigens;
cvEigenDecomposite(
faceImgArr[i],
nEigens,
eigenVectArr,
0, 0,
pAvgTrainImg,
//projectedTrainFaceMat->data.fl + i*nEigens);
projectedTrainFaceMat->data.fl + i*offset);
}
// store the recognition data as an xml file
storeTrainingData();
}
void recognize()
{
int i, nTestFaces = 0; // the number of test images
CvMat * trainPersonNumMat = 0; // the person numbers during training
float * projectedTestFace = 0;
// load test images and ground truth for person number
nTestFaces = loadFaceImgArray("C:/Users/HP/Desktop/OpenCV/test.txt");
printf("%d test faces loaded\n", nTestFaces);
// load the saved training data
if( !loadTrainingData( &trainPersonNumMat ) ) return;
// project the test images onto the PCA subspace
projectedTestFace = (float *)cvAlloc( nEigens*sizeof(float) );
for(i=0; i<nTestFaces; i++)
{
int iNearest, nearest, truth;
// project the test image onto the PCA subspace
cvEigenDecomposite(
faceImgArr[i],
nEigens,
eigenVectArr,
0, 0,
pAvgTrainImg,
projectedTestFace);
iNearest = findNearestNeighbor(projectedTestFace);
truth = personNumTruthMat->data.i[i];
nearest = trainPersonNumMat->data.i[iNearest];
printf("nearest = %d, Truth = %d\n", nearest, truth);
}
}
int loadTrainingData(CvMat ** pTrainPersonNumMat)
{
CvFileStorage * fileStorage;
int i;
// create a file-storage interface
fileStorage = cvOpenFileStorage( "facedata.xml", 0, CV_STORAGE_READ );
if( !fileStorage )
{
fprintf(stderr, "Can't open facedata.xml\n");
return 0;
}
nEigens = cvReadIntByName(fileStorage, 0, "nEigens", 0);
nTrainFaces = cvReadIntByName(fileStorage, 0, "nTrainFaces", 0);
*pTrainPersonNumMat = (CvMat *)cvReadByName(fileStorage, 0, "trainPersonNumMat", 0);
eigenValMat = (CvMat *)cvReadByName(fileStorage, 0, "eigenValMat", 0);
projectedTrainFaceMat = (CvMat *)cvReadByName(fileStorage, 0, "projectedTrainFaceMat", 0);
pAvgTrainImg = (IplImage *)cvReadByName(fileStorage, 0, "avgTrainImg", 0);
eigenVectArr = (IplImage **)cvAlloc(nTrainFaces*sizeof(IplImage *));
for(i=0; i<nEigens; i++)
{
char varname[200];
sprintf( varname, "eigenVect_%d", i );
eigenVectArr[i] = (IplImage *)cvReadByName(fileStorage, 0, varname, 0);
}
// release the file-storage interface
cvReleaseFileStorage( &fileStorage );
return 1;
}
void storeTrainingData()
{
CvFileStorage * fileStorage;
int i;
// create a file-storage interface
fileStorage = cvOpenFileStorage( "facedata.xml", 0, CV_STORAGE_WRITE );
// store all the data
cvWriteInt( fileStorage, "nEigens", nEigens );
cvWriteInt( fileStorage, "nTrainFaces", nTrainFaces );
cvWrite(fileStorage, "trainPersonNumMat", personNumTruthMat, cvAttrList(0,0));
cvWrite(fileStorage, "eigenValMat", eigenValMat, cvAttrList(0,0));
cvWrite(fileStorage, "projectedTrainFaceMat", projectedTrainFaceMat, cvAttrList(0,0));
cvWrite(fileStorage, "avgTrainImg", pAvgTrainImg, cvAttrList(0,0));
for(i=0; i<nEigens; i++)
{
char varname[200];
sprintf( varname, "eigenVect_%d", i );
cvWrite(fileStorage, varname, eigenVectArr[i], cvAttrList(0,0));
}
// release the file-storage interface
cvReleaseFileStorage( &fileStorage );
}
int findNearestNeighbor(float * projectedTestFace)
{
//double leastDistSq = 1e12;
double leastDistSq = DBL_MAX;
int i, iTrain, iNearest = 0;
for(iTrain=0; iTrain<nTrainFaces; iTrain++)
{
double distSq=0;
for(i=0; i<nEigens; i++)
{
float d_i =
projectedTestFace[i] -
projectedTrainFaceMat->data.fl[iTrain*nEigens + i];
//distSq += d_i*d_i / eigenValMat->data.fl[i]; // Mahalanobis
distSq += d_i*d_i; // Euclidean
}
if(distSq < leastDistSq)
{
leastDistSq = distSq;
iNearest = iTrain;
}
}
return iNearest;
}
void doPCA()
{
int i;
CvTermCriteria calcLimit;
CvSize faceImgSize;
// set the number of eigenvalues to use
nEigens = nTrainFaces-1;
// allocate the eigenvector images
faceImgSize.width = faceImgArr[0]->width;
faceImgSize.height = faceImgArr[0]->height;
eigenVectArr = (IplImage**)cvAlloc(sizeof(IplImage*) * nEigens);
for(i=0; i<nEigens; i++)
eigenVectArr[i] = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1);
// allocate the eigenvalue array
eigenValMat = cvCreateMat( 1, nEigens, CV_32FC1 );
// allocate the averaged image
pAvgTrainImg = cvCreateImage(faceImgSize, IPL_DEPTH_32F, 1);
// set the PCA termination criterion
calcLimit = cvTermCriteria( CV_TERMCRIT_ITER, nEigens, 1);
// compute average image, eigenvalues, and eigenvectors
cvCalcEigenObjects(
nTrainFaces,
(void*)faceImgArr,
(void*)eigenVectArr,
CV_EIGOBJ_NO_CALLBACK,
0,
0,
&calcLimit,
pAvgTrainImg,
eigenValMat->data.fl);
cvNormalize(eigenValMat, eigenValMat, 1, 0, CV_L1, 0);
}
int loadFaceImgArray(char * filename)
{
FILE * imgListFile = 0;
char imgFilename[512];
int iFace, nFaces=0;
// open the input file
if( !(imgListFile = fopen(filename, "r")) )
{
fprintf(stderr, "Can\'t open file %s\n", filename);
return 0;
}
// count the number of faces
while( fgets(imgFilename, 512, imgListFile) ) ++nFaces;
rewind(imgListFile);
// allocate the face-image array and person number matrix
faceImgArr = (IplImage **)cvAlloc( nFaces*sizeof(IplImage *) );
personNumTruthMat = cvCreateMat( 1, nFaces, CV_32SC1 );
// store the face images in an array
for(iFace=0; iFace<nFaces; iFace++)
{
// read person number and name of image file
fscanf(imgListFile,
"%d %s", personNumTruthMat->data.i+iFace, imgFilename);
// load the face image
faceImgArr[iFace] = cvLoadImage(imgFilename, CV_LOAD_IMAGE_GRAYSCALE);
if( !faceImgArr[iFace] )
{
fprintf(stderr, "Can\'t load image from %s\n", imgFilename);
return 0;
}
}
fclose(imgListFile);
return nFaces;
}
My answer may came late but it might be useful for pals if i answer it.I am working on a similar project and i have faced the same problem.I solved it by writing a function the saves or write the detected,cropped and preprocessed image on to the hard disk of my computer(Using CvWrite).And feeding the parameter of the saved images to the recognition part of the code. It has made my life easier.It has been a bit harder for me to to pass the parameters of the rect of the region of interest. If you or someone else did this it might be great sharing the code with us.
You can use the following code to save the image after resizing it to a constant value using the resizeimage function on you code.
void saveCroppedFaces(CvSeq* tempon,IplImage* DetectedImage)
{
char* name;
int nFaces;
CvRect rect;
nFaces=tempon->total;
name =new char[nFaces];
IplImage* cropped = 0;
IplImage* croppedResized=0;
Mat croped;
for(int k=0;k<nFaces;k++)
{
itoa(k,(name+k),10);
rect = *(CvRect*)cvGetSeqElem( tempon, k );
cropped= cropImage(DetectedImage,rect);
//i can resize the cropped faces in to a fixed size here
//i can write a function to save images and call it so
//that it will save it in to hard drive
//cvNamedWindow((name+k),CV_WINDOW_AUTOSIZE);
//cvShowImage((name+k),cropped);
croppedResized=resizeImage(cropped,60,60);
croped=IplToMatConverter(croppedResized);
saveROI(croped,itoa(k,(name+k),10));
cvReleaseImage(&cropped);
}
name=NULL;
delete[] name;
}
void saveROI(Mat mat,String outputFileName)
{
string store_path("C://Users/sizusuzu/Desktop/Images/FaceDetection2
/"+outputFileName+".jpg");
bool write_success = imwrite(store_path,mat);
}
After this you can change the IplImage* to Mat using
Mat IplToMatConverter(IplImage* imageToMat)
{
Mat mat = cvarrToMat(imageToMat);
return mat;
}
And use the Mat in FaceRecognizer API.Or just do the other/harder way.
Thanks
I just read
int _tmain(int argc, _TCHAR* argv[])
{
.......
}
part of your code. This code is used for detecting the face in the image. Lets say it is Face_x. Now extract features from Face_x, call it as F_x. In your database, you should store features {F_1, F_2,..., F_N} extracted from n different faces {Face_1, Face_2,..Face_N}.
Simple algorithm to recognize Face_x is to calculate Euclidean distances between F_x and n features. The minimum distance (below threshold) gives corresponding face. If the minimum distance is not below threshold then Face_x is a new face. Add feature F_x to database. This way you can increase your database. You can begin your algorithm with no features in database. With each new face, database grows.
I hope the method suggested by me will lead you to the solution

Read HSV value of pixel in opencv

how would you go about reading the pixel value in HSV format rather than RGB? The code below reads the pixel value of the circles' centers in RGB format. Is there much difference when it comes to reading value in HSV?
int main(int argc, char** argv)
{
//load image from directory
IplImage* img = cvLoadImage("C:\\Users\\Nathan\\Desktop\\SnookerPic.png");
IplImage* gray = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
CvMemStorage* storage = cvCreateMemStorage(0);
//covert to grayscale
cvCvtColor(img, gray, CV_BGR2GRAY);
// This is done so as to prevent a lot of false circles from being detected
cvSmooth(gray, gray, CV_GAUSSIAN, 7, 7);
IplImage* canny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
IplImage* rgbcanny = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,3);
cvCanny(gray, canny, 50, 100, 3);
//detect circles
CvSeq* circles = cvHoughCircles(gray, storage, CV_HOUGH_GRADIENT, 1, 35.0, 75, 60,0,0);
cvCvtColor(canny, rgbcanny, CV_GRAY2BGR);
//draw all detected circles
for (int i = 0; i < circles->total; i++)
{
// round the floats to an int
float* p = (float*)cvGetSeqElem(circles, i);
cv::Point center(cvRound(p[0]), cvRound(p[1]));
int radius = cvRound(p[2]);
//uchar* ptr;
//ptr = cvPtr2D(img, center.y, center.x, NULL);
//printf("B: %d G: %d R: %d\n", ptr[0],ptr[1],ptr[2]);
CvScalar s;
s = cvGet2D(img,center.y, center.x);//colour of circle
printf("B: %f G: %f R: %f\n",s.val[0],s.val[1],s.val[2]);
// draw the circle center
cvCircle(img, center, 3, CV_RGB(0,255,0), -1, 8, 0 );
// draw the circle outline
cvCircle(img, center, radius+1, CV_RGB(0,0,255), 2, 8, 0 );
//display coordinates
printf("x: %d y: %d r: %d\n",center.x,center.y, radius);
}
//create window
//cvNamedWindow("circles", 1);
cvNamedWindow("SnookerImage", 1);
//show image in window
//cvShowImage("circles", rgbcanny);
cvShowImage("SnookerImage", img);
cvSaveImage("out.png", img);
//cvDestroyWindow("SnookerImage");
//cvDestroyWindow("circles");
//cvReleaseMemStorage("storage");
cvWaitKey(0);
return 0;
}
If you use the C++ interface, you can use
cv::cvtColor(img, img, CV_BGR2HSV);
See the documentation for cvtColor for more information.
Update:
Reading and writing pixels the slow way (assuming that the HSV values are stored as a cv::Vec3b (doc))
cv::Vec3b pixel = image.at<cv::Vec3b>(0,0); // read pixel (0,0) (make copy)
pixel[0] = 0; // H
pixel[1] = 0; // S
pixel[2] = 0; // V
image.at<cv::Vec3b>(0,0) = pixel; // write pixel (0,0) (copy pixel back to image)
Using the image.at<...>(x, y) (doc, scroll down a lot) notation is quite slow, if you want to manipulate every pixel. There is an article in the documentation on how to access the pixels faster. You can apply the iterator method also like this:
cv::MatIterator_<cv::Vec3b> it = image.begin<cv::Vec3b>(),
it_end = image.end<cv::Vec3b>();
for(; it != it_end; ++it)
{
// work with pixel in here, e.g.:
cv::Vec3b& pixel = *it; // reference to pixel in image
pixel[0] = 0; // changes pixel in image
}

Feature tracking using Lucas Kanade

I am in the process of implementing Lucas-Kanade algorithm using OpenCv. Even though my intention is to track facial features, as a first cut i am getting all the good features using cvGoodFeatures api and using this points as the input, i am trying to track the points using Lucas-Kanade algorithm.
Now the scenario is if i start moving the objects captured by camera near to the edges of the frame(not out of the frame) what i observe is that LK algorithm starts giving me points that are either outside the frame or having negative values.
Please let me know whether i am doing a wrong implementation or is this behavior expected from LK tracking method. Also i am attaching my code at the end of this post for reference.
Regards,
Sujil C
IplImage *image = 0,
*grey = 0,
*prev_grey = 0,
*pyramid = 0,
*prev_pyramid = 0,
*swap_temp = 0,
*velx = 0,
*vely = 0;
const int MAX_COUNT = 20;
CvPoint2D32f* points[2] = {0,0}, *swap_points;
char* status = 0;
int lkcount = 0;
int detectGoodFeature = 0;
int flags = 0;
CvPoint pt;
CvSize currentFrameSize;
CvPoint2D32f* processLKFrame(IplImage* frame, int &pointCount)
{
int win_size = 15;
int level = 5;
int i, k;
// If frame size has changed, release all resources (they will be reallocated further on)
if ( (grey && ((currentFrameSize.width != cvGetSize(frame).width) || (currentFrameSize.height != cvGetSize(frame).height))))
{
// Release images
cvReleaseImage(&grey);
cvReleaseImage(&prev_grey);
cvReleaseImage(&pyramid);
cvReleaseImage(&prev_pyramid);
cvReleaseImage(&velx);
cvReleaseImage(&vely);
// Release buffers
cvFree(&(points[0]));
cvFree(&(points[1]));
cvFree(&status);
// Zerofiy grey so initialization will occur
grey = NULL;
}
// Initialize
if( !grey )
{
/* allocate all the buffers */
currentFrameSize = cvGetSize(frame);
grey = cvCreateImage( currentFrameSize, 8, 1 );
prev_grey = cvCreateImage( currentFrameSize, 8, 1 );
pyramid = cvCreateImage( currentFrameSize, 8, 1 );
prev_pyramid = cvCreateImage( currentFrameSize, 8, 1 );
velx = cvCreateImage(currentFrameSize, 32, 1);
vely = cvCreateImage(currentFrameSize, 32, 1);
points[0] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
points[1] = (CvPoint2D32f*)cvAlloc(MAX_COUNT*sizeof(points[0][0]));
status = (char*)cvAlloc(MAX_COUNT);
flags = 0;
}
printf("Current Frame Size : Width:%d Height:%d\n", currentFrameSize.width, currentFrameSize.height );
cvCopy( frame, grey, 0 );
if (detectGoodFeature) {
/* automatic initialization */
IplImage* eig = cvCreateImage( cvGetSize(grey), 32, 1 );
IplImage* temp = cvCreateImage( cvGetSize(grey), 32, 1 );
double quality = 0.01;
double min_distance = 10;
lkcount = MAX_COUNT;
cvGoodFeaturesToTrack( grey, eig, temp, points[1], &lkcount,
quality, min_distance, 0, 3, 0, 0.04 );
cvFindCornerSubPix( grey, points[1], lkcount,
cvSize(win_size,win_size), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03));
cvReleaseImage( &eig );
cvReleaseImage( &temp );
}
else if( lkcount > 0 )
{
//For debugging points
printf("==============================================================================================================\n");
printf("Input Points:");
for (int i = 0; i < lkcount; i++) {
printf("(%f, %f)", points[0][i].x, points[0][i].y);
}
printf("\n");
// Calc movement of tracked points
cvCalcOpticalFlowPyrLK( prev_grey, grey, prev_pyramid, pyramid,
points[0], points[1], lkcount, cvSize(win_size,win_size), level, status, 0,
cvTermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03), 0 );
//For debugging points
printf("Tracked Points:");
for (int i = 0; i < lkcount; i++) {
printf("(%f, %f),", points[1][i].x, points[1][i].y);
}
printf("\n");
printf("==============================================================================================================\n");
}
CV_SWAP( prev_grey, grey, swap_temp );
CV_SWAP( prev_pyramid, pyramid, swap_temp );
CV_SWAP( points[0], points[1], swap_points );
detectGoodFeature = 0;
pointCount = lkcount;
return points[0];
}

Resources