I am trying to detect ears in a profile image(side view) of face.I tried using harrcascades (haarcascade_mcs_rightear,haarcascade_mcs_leftear,left_ear.xml,right_ear.xml) provided in opencv.I am able to detect profile face.But I am not able to detect ears with any one of haarcascade.I am specifying region of interest using profile face detector.I am not able to find out where i am going wrong.Please help me with that.A code snippet for the same would be of great help.Thanks in advance.Following is the code with comments.
#include <stdio.h>
#include<conio.h>
#include "cv.h"
#include "highgui.h"
using namespace std;
using namespace cv;
CvMemStorage *storage;
int detectFeature(int,char *imname,IplImage* image,CvRect featureROI, Rect* feature_box);
const char *file_profileface = "haarcascade_profileface.xml";
const char *ear_profileface = "left_ear.xml";//cascade name
CvRect profile_face;
CvRect ear;
int main()
{
int flagFaceDetect;
storage = cvCreateMemStorage(0);
assert(storage);
Rect faceRect;Rect leftEar;Rect rightEar;
char myimage1[50];
sprintf(myimage1,"profile%d.jpg",1);
IplImage* img = cvLoadImage(myimage1, CV_LOAD_IMAGE_COLOR);
/*first detect profile face and then detect ears*/
Rect* rectptr = &faceRect;//rectangle for profile face
CvRect face_roi = cvGetImageROI(img);
flagFaceDetect = detectFeature(0,myimage1,img,face_roi,rectptr);
rectptr = &leftEar;
//set ROI for ear with respect to profile face rectangle
CvRect leftear_roi = cvRect(faceRect.x+faceRect.x*3/4,faceRect.y+faceRect.height /4,faceRect.x+faceRect.width+faceRect.width/10,faceRect.y+faceRect.height-faceRect.height/4);
flagFaceDetect = detectFeature(1,myimage1,img,leftear_roi,rectptr);
getch();
return 0;
}
int detectFeature(int feature_index,char *imname,IplImage* image,CvRect featureROI, Rect* feature_box) {//general function to locate feature
cvSetImageROI(image, featureROI);
CvSeq* feature;
CvHaarClassifierCascade* featureCascade;
if(feature_index==0)//cascade for profile face
featureCascade = (CvHaarClassifierCascade*) cvLoad(file_profileface, 0, 0, 0);
if(feature_index==1)//cascade for ear
featureCascade = (CvHaarClassifierCascade*) cvLoad(ear_profileface, 0, 0, 0);
// feature = cvHaarDetectObjects(image,featureCascade,storage,1.1,2,CV_HAAR_DO_CANNY_PRUNING, cvSize(50,50));
feature = cvHaarDetectObjects(image,featureCascade,storage,1.2, 3,0,cvSize(18,12));
cvResetImageROI(image);
IplImage* displayImage = cvLoadImage(imname, CV_LOAD_IMAGE_COLOR);
CvRect* r;
int index_max_area;
int x1, x2, y1, y2; // opposite vertices of the rectangle
if (feature->total == 0) {
cout<<"here";
return 0;
}
else {
CvRect *fture = (CvRect*)cvGetSeqElem(feature, 0);
feature_box->x=fture->x;
feature_box->y=fture->y;
feature_box->height=fture->height;
feature_box->width=fture->width;
/* draw a red rectangle around the feature*/
cvRectangle(displayImage,
cvPoint(fture->x+fture->x*3/4,fture->y+fture->height/4),
cvPoint(fture->x+fture->width+fture->width/10,fture->y+fture->height-fture->height/4),
CV_RGB(0, 0, 255),
1, 8, 0
);
cvShowImage("frame",displayImage);
cvWaitKey(0);
return 1;
}
}
Related
Hello and thanks for your help.
I would like to test the use of shapes for matching in OpenCV and managed to do the matching part.
To locate the rotated shape, i tought the AffineTransformer Class would be the right choice. As I don't know how the matching would work internally, it would be nice if someone has a link where the proceedings are described.
As shawshank mentioned my following code throw an Assertion failed-error because the variable matches is empty when passed to estimateTransformation function. Does anybody know how to use this function in the right way -respectively what it really does?
#include<opencv2/opencv.hpp>
#include<algorithm>
#include<iostream>
#include<string>
#include<opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
bool rotateImage(Mat src, Mat &dst, double angle)
{
// get rotation matrix for rotating the image around its center
cv::Point2f center(src.cols/2.0, src.rows/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::warpAffine(src, dst, rot, bbox.size());
return 1;
}
static vector<Point> sampleContour( const Mat& image, int n=300 )
{
vector<vector<Point>> contours;
vector<Point> all_points;
findContours(image, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
for (size_t i=0; i <contours.size(); i++)
{
for (size_t j=0; j<contours[i].size(); j++)
{
all_points.push_back(contours[i][j]);
}
}
int dummy=0;
for (int add=(int)all_points.size(); add<n; add++)
{
all_points.push_back(all_points[dummy++]);
}
// shuffel
random_shuffle(all_points.begin(), all_points.end());
vector<Point> sampled;
for (int i=0; i<n; i++)
{
sampled.push_back(all_points[i]);
}
return sampled;
}
int main(void)
{
Mat img1, img2;
vector<Point> img1Points, img2Points;
float distSC, distHD;
// read images
string img1Path = "testimage.jpg";
img1 = imread(img1Path, IMREAD_GRAYSCALE);
rotateImage(img1, img2, 45);
imshow("original", img1);
imshow("transformed", img2);
waitKey();
// Contours
img1Points = sampleContour(img1);
img2Points = sampleContour(img2);
//Calculate Distances
Ptr<ShapeContextDistanceExtractor> mysc = createShapeContextDistanceExtractor();
Ptr<HausdorffDistanceExtractor> myhd = createHausdorffDistanceExtractor();
distSC = mysc->computeDistance( img1Points, img2Points );
distHD = myhd -> computeDistance( img1Points, img2Points );
cout << distSC << endl << distHD << endl;
vector<DMatch> matches;
Ptr<AffineTransformer> transformerHD = createAffineTransformer(0);
transformerHD -> estimateTransformation(img1Points, img2Points, matches);
return 0;
}
I have used AffineTransformer class on a 2D image. Below is the basic code which will give you an idea of what it does.
// My OpenCv AffineTransformer demo code
// I have tested this on a 500 x 500 resolution image
#include <iostream>
#include "opencv2/opencv.hpp"
#include <vector>
using namespace cv;
using namespace std;
int arrSize = 10;
int sourcePx[]={154,155,159,167,182,209,238,265,295,316};
int sourcePy[]={190,222,252,285,314,338,344,340,321,290};
int tgtPx[]={120,127,137,150,188,230,258,285,305,313};
int tgtPy[]={207,245,275,305,336,345,342,332,305,274};
int main()
{
// Prepare 'vector of points' from above hardcoded points
int sInd=0, eInd=arrSize;
vector<Point2f> sourceP; for(int i=sInd; i<eInd; i++) sourceP.push_back(Point2f(sourcePx[i], sourcePy[i]));
vector<Point2f> tgtP; for(int i=sInd; i<eInd; i++) tgtP.push_back(Point2f(tgtPx[i], tgtPy[i]));
// Create object of AffineTransformer
bool fullAffine = true; // change its value and see difference in result
auto aft = cv::createAffineTransformer(fullAffine);
// Prepare vector<cv::DMatch> - this is just mapping of corresponding points indices
std::vector<cv::DMatch> matches;
for(int i=0; i<sourceP.size(); ++i) matches.push_back(cv::DMatch(i, i, 0));
// Read image
Mat srcImg = imread("image1.jpg");
Mat tgtImg;
// estimate points transformation
aft->estimateTransformation(sourceP, tgtP, matches);
// apply transformation
aft->applyTransformation(sourceP, tgtP);
// warp image
aft->warpImage(srcImg, tgtImg);
// show generated output
imshow("warped output", tgtImg);
waitKey(0);
return 0;
}
Why is webcam image processing is very slow while using Xcode for this OpenCV project, and only one out of three windows are working (similar spaces and HSV windows are not turning up) and are very slow? How to increase the speed of execution of the program?
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
Mat img, hsv, res;
char *win1 = "RGB";
char *win2 = "HSV";
char *win3 = "similar spaces";
uchar thresh = 5;
void setColor(uchar hval){
int i,j;
for (i = 0; i < res.rows; ++i){
for (j = 0; j < res.cols; ++j){
if( hsv.at<Vec3b>(i,j)[0] <= hval+thresh
&& hsv.at<Vec3b>(i,j)[0] >= hval-thresh)
res.at<uchar>(i,j) = 255;
else res.at<uchar>(i,j) = 0;
}
}
imshow(win3, res);
}
void MouseCallBackFunc(int event, int x, int y, int flags, void* userdata){
if(event==EVENT_LBUTTONDOWN){
cout<<"\t x,y : "<<x<<','<<y<<endl;
cout<<'\t'<<img.at<Vec3b>(y,x)<<endl;
setColor(hsv.at<Vec3b>(y,x)[0]);
}
}
int main()
{
img = imread("/usr/share/opencv/samples/cpp/stuff.jpg", CV_LOAD_IMAGE_COLOR);
hsv = Mat::zeros(img.size(), CV_8UC3);
res = Mat::zeros(img.size(), CV_8UC1);
char c;
int i,j;
namedWindow(win2, CV_WINDOW_NORMAL);
namedWindow(win3, CV_WINDOW_NORMAL);
cvtColor(img, hsv, CV_RGB2HSV);
imshow(win1, img);
imshow(win2, hsv);
imshow(win3, res);
setMouseCallback(win1, MouseCallBackFunc, NULL);
// VideoCapture stream(0); //0 is the id of video device.0 if you have only one camera.
// if (!stream.isOpened()) { //check if video device has been initialised
// cout << "cannot open camera";
// }
// while (true) {
// Mat cameraFrame;
// stream.read(cameraFrame);
// imshow("test", cameraFrame);
// c = waitKey(30);
// if(c==27)
// break;
// }
while((c=waitKey(300))!=27){}
return 0;
}
I am having doubt in opencv. I'm trying to implement SURF algorithm. When I trying to build the code but I'm getting the following error.
*****error LNK2019: unresolved external symbol _cvExtractSURF referenced in function _main
1>SAMPLE.obj : error LNK2019: unresolved external symbol _cvSURFParams referenced in function _main*****
I have gone through all the posts related to my topic in this forum, but couldn't figure out the problem with my code. Please help me in resolving my problem.
code :
#include <stdio.h>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2\objdetect\objdetect.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <opencv2\core\core.hpp>
#include <opencv2\legacy\legacy.hpp>
#include <opencv2\legacy\compat.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv\opensurf\surf.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
CvMemStorage* storage = cvCreateMemStorage(0);
cvNamedWindow("Image", 1);
int key = 0;
static CvScalar red_color[] ={0,0,255};
IplImage* capture= cvLoadImage( "testface.jpg");
CvMat* prevgray = 0, *image = 0, *gray =0;
while( key != 'q' )
{
int firstFrame = gray == 0;
IplImage* frame =capture;
if(!frame)
break;
if(!gray)
{
image = cvCreateMat(frame->height, frame->width, CV_8UC1);
}
//Convert the RGB image obtained from camera into Grayscale
cvCvtColor(frame, image, CV_BGR2GRAY);
//Define sequence for storing surf keypoints and descriptors
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
int i;
//Extract SURF points by initializing parameters
CvSURFParams params = cvSURFParams(500,1);
cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );
printf("Image Descriptors: %d\n", imageDescriptors->total);
//draw the keypoints on the captured frame
for( i = 0; i < imageKeypoints->total; i++ )
{
CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, i );
CvPoint center;
int radius;
center.x = cvRound(r->pt.x);
center.y = cvRound(r->pt.y);
radius = cvRound(r->size*1.2/9.*2);
cvCircle( frame, center, radius, red_color[0], 1, 8, 0 );
}
cvShowImage( "Image", frame );
cvWaitKey(0);
}
cvDestroyWindow("Image");
return 0
}
Thank you,
Sreelakshmi Priya
Good day. I'm new to OpenCV and right now, I'm trying to do fingertip detection using colour tracking and background subtraction methods. I got the colour tracking part working but I have no idea on how to subtract the background and leave only the fingertips.
Here is my code.
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
IplImage* GetThresholdedImage(IplImage* img, CvScalar& lowerBound, CvScalar& upperBound)
{
// Convert the image into an HSV image
IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
cvCvtColor(img, imgHSV, CV_BGR2HSV);
IplImage* imgThreshed = cvCreateImage(cvGetSize(img), 8, 1);
cvInRangeS(imgHSV, lowerBound, upperBound, imgThreshed);
cvReleaseImage(&imgHSV);
return imgThreshed;
}
int main()
{
int lineThickness = 2;
CvScalar lowerBound = cvScalar(20, 100, 100);
CvScalar upperBound = cvScalar(30, 255, 255);
int b,g,r;
lowerBound = cvScalar(0,58,89);
upperBound = cvScalar(25,173,229);
CvCapture* capture = 0;
capture = cvCaptureFromCAM(1);
if(!capture)
{
printf("Could not initialize capturing...\n");
return -1;
}
cvNamedWindow("video");
cvNamedWindow("thresh");
// This image holds the "scribble" data...
// the tracked positions of the object
IplImage* imgScribble = NULL;
while(true)
{
IplImage* frame = 0;
frame = cvQueryFrame(capture);
if(!frame)
break;
// If this is the first frame, we need to initialize it
if(imgScribble == NULL)
{
imgScribble = cvCreateImage(cvGetSize(frame), 8, 3);
}
// Holds the thresholded image (tracked color -> white, the rest -> black)
IplImage* imgThresh = GetThresholdedImage(frame,lowerBound,upperBound);
// Calculate the moments to estimate the position of the object
CvMoments *moments = (CvMoments*)malloc(sizeof(CvMoments));
cvMoments(imgThresh, moments, 1);
// The actual moment values
double moment10 = cvGetSpatialMoment(moments, 1, 0);
double moment01 = cvGetSpatialMoment(moments, 0, 1);
double area = cvGetCentralMoment(moments, 0, 0);
// Holding the last and current positions
static int posX = 0;
static int posY = 0;
int lastX = posX;
int lastY = posY;
posX = moment10/area;
posY = moment01/area;
cout << "position = " << posX << " " << posY << endl;
// We want to draw a line only if its a valid position
if(lastX>0 && lastY>0 && posX>0 && posY>0)
{
// Draw a yellow line from the previous point to the current point
cvLine(imgScribble, cvPoint(posX, posY), cvPoint(lastX, lastY), upperBound, lineThickness);
}
// Add the scribbling image and the frame...
cvAdd(frame, imgScribble, frame);
cvShowImage("thresh", imgThresh);
cvShowImage("video", frame);
int c = cvWaitKey(10);
if(c==27) //ESC key
{
break;
}
cvReleaseImage(&imgThresh);
delete moments;
}
cvReleaseCapture(&capture);
return 0;
}
I don t know if I understand you right but I think you should need to add the following:
cvErode(imgThreshed, imgThreshed, NULL, 1);
cvDilate(imgThreshed, imgThreshed, NULL, 1);
in GetThresholdedImage and get less noise ! but after all I think it would be better for you to use the cv::Mat object of opencv ;)
Try BGS library, I used it before and like it. You can get it here: http://code.google.com/p/bgslibrary/
I am new to OpenCV. Currently, trying to load and save a defined ROI of an image.
For OpenCV 1.x, I got it working with the following function...
#include <cv.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
void SaveROI(const CStringA& inputFile, const CStringA& outputFile)
{
if (ATLPath::FileExists(inputFile))
{
CvRect rect;
rect.x = 8;
rect.y = 90;
rect.width = 26;
rect.height = 46;
IplImage* imgInput = cvLoadImage(inputFile.GetString(), 1);
IplImage* imgRoi = cvCloneImage(imgInput);
cvSetImageROI(imgRoi, rect);
cvSaveImage(outputFile.GetString(), imgRoi);
cvReleaseImage(&imgInput);
cvReleaseImage(&imgRoi);
}
}
How can this be done with the OpenCV 2 or C++. I tried the following without a success, the whole image is saved.
void SaveROICPP(const CStringA& inputFile, const CStringA& outputFile)
{
if (ATLPath::FileExists(inputFile))
{
cv::Mat imgInput = cv::imread(inputFile.GetString());
if (imgInput.data != NULL)
{
cv::Mat imgRoi = imgInput(cv::Rect(8, 90, 26, 46));
imgInput.copyTo(imgRoi);
cv::imwrite(outputFile.GetString(), imgRoi);
}
}
}
Any help or suggestion?
You just don't need to call copyTo:
void SaveROICPP(const CStringA& inputFile, const CStringA& outputFile)
{
if (ATLPath::FileExists(inputFile))
{
cv::Mat imgInput = cv::imread(inputFile.GetString());
if (imgInput.data != NULL)
{
cv::Mat imgRoi = imgInput(cv::Rect(8, 90, 26, 46));
cv::imwrite(outputFile.GetString(), imgRoi);
}
}
}
In your version copyTo sees that imgInput is bigger then imgRoi and reallocates a new full-size matrix to make the copy. imgRoi is already a sub-image and you can simply pass it to any OpenCV function.
Here is some tested code for blending, cropping and saving new images.
You crop and then save that region in a new file.
#include <cv.h>
#include <highgui.h>
#include <math.h>
// alphablend <imageA> <image B> <x> <y> <width> <height>
// <alpha> <beta>
IplImage* crop( IplImage* src, CvRect roi){
// Must have dimensions of output image
IplImage* cropped = cvCreateImage( cvSize(roi.width,roi.height), src->depth, src->nChannels );
// Say what the source region is
cvSetImageROI( src, roi );
// Do the copy
cvCopy( src, cropped );
cvResetImageROI( src );
cvNamedWindow( "check", 1 );
cvShowImage( "check", cropped );
cvSaveImage ("style.jpg" , cropped);
return cropped;
}
int main(int argc, char** argv){
IplImage *src1, *src2;
CvRect myRect;
// IplImage* cropped ;
src1=cvLoadImage(argv[1],1);
src2=cvLoadImage(argv[2],1);
{
int x = atoi(argv[3]);
int y = atoi(argv[4]);
int width = atoi(argv[5]);
int height = atoi(argv[6]);
double alpha = (double)atof(argv[7]);
double beta = (double)atof(argv[8]);
cvSetImageROI(src1, cvRect(x,y,width,height));
cvSetImageROI(src2, cvRect(100,200,width,height));
myRect = cvRect(x,y,width,height) ;
cvAddWeighted(src1, alpha, src2, beta,0.0,src1);
cvResetImageROI(src1);
crop (src1 , myRect);
cvNamedWindow( "Alpha_blend", 1 );
cvShowImage( "Alpha_blend", src1 );
cvWaitKey(0);
}
return 0;
}