I am having doubt in opencv. I'm trying to implement SURF algorithm. When I trying to build the code but I'm getting the following error.
*****error LNK2019: unresolved external symbol _cvExtractSURF referenced in function _main
1>SAMPLE.obj : error LNK2019: unresolved external symbol _cvSURFParams referenced in function _main*****
I have gone through all the posts related to my topic in this forum, but couldn't figure out the problem with my code. Please help me in resolving my problem.
code :
#include <stdio.h>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2\objdetect\objdetect.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <opencv2\core\core.hpp>
#include <opencv2\legacy\legacy.hpp>
#include <opencv2\legacy\compat.hpp>
#include <opencv2/nonfree/nonfree.hpp>
#include <opencv\opensurf\surf.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
CvMemStorage* storage = cvCreateMemStorage(0);
cvNamedWindow("Image", 1);
int key = 0;
static CvScalar red_color[] ={0,0,255};
IplImage* capture= cvLoadImage( "testface.jpg");
CvMat* prevgray = 0, *image = 0, *gray =0;
while( key != 'q' )
{
int firstFrame = gray == 0;
IplImage* frame =capture;
if(!frame)
break;
if(!gray)
{
image = cvCreateMat(frame->height, frame->width, CV_8UC1);
}
//Convert the RGB image obtained from camera into Grayscale
cvCvtColor(frame, image, CV_BGR2GRAY);
//Define sequence for storing surf keypoints and descriptors
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
int i;
//Extract SURF points by initializing parameters
CvSURFParams params = cvSURFParams(500,1);
cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );
printf("Image Descriptors: %d\n", imageDescriptors->total);
//draw the keypoints on the captured frame
for( i = 0; i < imageKeypoints->total; i++ )
{
CvSURFPoint* r = (CvSURFPoint*)cvGetSeqElem( imageKeypoints, i );
CvPoint center;
int radius;
center.x = cvRound(r->pt.x);
center.y = cvRound(r->pt.y);
radius = cvRound(r->size*1.2/9.*2);
cvCircle( frame, center, radius, red_color[0], 1, 8, 0 );
}
cvShowImage( "Image", frame );
cvWaitKey(0);
}
cvDestroyWindow("Image");
return 0
}
Thank you,
Sreelakshmi Priya
Related
Hi. I have the above image and use the "findContours" function.
And then I use the "convexity defects" functions to find the corner points.
The result is as follows.
The problem with this code is that it can not find the rounded corners.You can not find a point like the following.
This is my code
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include <iostream>
#include <sstream>
#include <fstream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
cv::Mat image = cv::imread("find_Contours.png");
//Prepare the image for findContours
cv::cvtColor(image, image, CV_BGR2GRAY);
cv::threshold(image, image, 128, 255, CV_THRESH_BINARY);
//Find the contours. Use the contourOutput Mat so the original image doesn't get overwritten
std::vector<std::vector<cv::Point> > contours;
cv::Mat contourOutput = image.clone();
cv::findContours(contourOutput, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
////convexityDefects
vector<vector<Point> >hull(contours.size());
vector<vector<int> > hullsI(contours.size()); // Indices to contour points
vector<vector<Vec4i>> defects(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(contours[i], hull[i], false);
convexHull(contours[i], hullsI[i], false);
if (hullsI[i].size() > 3) // You need more than 3 indices
{
convexityDefects(contours[i], hullsI[i], defects[i]);
}
}
///// Draw convexityDefects
for (int i = 0; i < contours.size(); ++i)
{
for (const Vec4i& v : defects[i])
{
float depth = v[3]/256;
if (depth >= 0) // filter defects by depth, e.g more than 10
{
int startidx = v[0]; Point ptStart(contours[i][startidx]);
int endidx = v[1]; Point ptEnd(contours[i][endidx]);
int faridx = v[2]; Point ptFar(contours[i][faridx]);
circle(image, ptFar, 4, Scalar(255, 255, 255), 2);
cout << ptFar << endl;
}
}
}
//
cv::imshow("Input Image", image);
cvMoveWindow("Input Image", 0, 0);
//
waitKey(0);
}
Can someone make the code and find the red dot? please help.
now i want find "convexity defects" from inside,not outside like this image:
Someone can help me??
It is very important to use
convexHull(contours[i], hullsI[i], true);
That is, with the last argument "true" for indices. I'm almost certain this is the reason it cannot find all the defects. Before fixing this, it is not much sense try to find other bugs (if any).
I have recently used this piece of code to save frame data from a webcam
#include <opencv\cv.h>
#include <opencv\highgui.h>
#include <opencv2/opencv.hpp>
using namespace cv;
#include <fstream>
using namespace std;
int main(int argc, char** argv)
{
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) // check if we succeeded
return -1;
cap.set(CV_CAP_PROP_FPS, 15);
Mat edges;
namedWindow("image", 1);
std::vector<cv::Mat> images(100);
for (int i = 0; i < 100; ++i) {
// this is optional, preallocation so there's no allocation
// during capture
images[i].create(480, 640, CV_8UC3);
}
for (int i = 0; i < 100; ++i)
{
Mat frame;
cap >> frame; // get a new frame from camera
frame.copyTo(images[i]);
}
cap.release();
for (int i = 0; i < 100; ++i)
{
imshow("image", images[i]);
if (waitKey(30) >= 0) break;
}
After this, I want to use imread to analyse the newly splitted frames. However, I cannot think of a way to accomplish this.
I tried: Mat colorImage = imread(images[i]);
However, it leads to:
error C2664: 'cv::Mat cv::imread(const cv::String &,int)': cannot convert argument 1 from 'std::vector<cv::Mat,std::allocator<_Ty>>' to 'const cv::String &'
with
[
_Ty=cv::Mat
]
Thanks a lot in advance :)
imread function is used to open the image from disk.
You already have vector of images so you just do:
Mat colorImage = images[i];
and btw. there is no need for this:
for (int i = 0; i < 100; ++i) {
// this is optional, preallocation so there's no allocation
// during capture
images[i].create(480, 640, CV_8UC3);
}
because you are allocating new space anyway except you capture the frames directly like this:
cap >> images[i];
I am trying to detect ears in a profile image(side view) of face.I tried using harrcascades (haarcascade_mcs_rightear,haarcascade_mcs_leftear,left_ear.xml,right_ear.xml) provided in opencv.I am able to detect profile face.But I am not able to detect ears with any one of haarcascade.I am specifying region of interest using profile face detector.I am not able to find out where i am going wrong.Please help me with that.A code snippet for the same would be of great help.Thanks in advance.Following is the code with comments.
#include <stdio.h>
#include<conio.h>
#include "cv.h"
#include "highgui.h"
using namespace std;
using namespace cv;
CvMemStorage *storage;
int detectFeature(int,char *imname,IplImage* image,CvRect featureROI, Rect* feature_box);
const char *file_profileface = "haarcascade_profileface.xml";
const char *ear_profileface = "left_ear.xml";//cascade name
CvRect profile_face;
CvRect ear;
int main()
{
int flagFaceDetect;
storage = cvCreateMemStorage(0);
assert(storage);
Rect faceRect;Rect leftEar;Rect rightEar;
char myimage1[50];
sprintf(myimage1,"profile%d.jpg",1);
IplImage* img = cvLoadImage(myimage1, CV_LOAD_IMAGE_COLOR);
/*first detect profile face and then detect ears*/
Rect* rectptr = &faceRect;//rectangle for profile face
CvRect face_roi = cvGetImageROI(img);
flagFaceDetect = detectFeature(0,myimage1,img,face_roi,rectptr);
rectptr = &leftEar;
//set ROI for ear with respect to profile face rectangle
CvRect leftear_roi = cvRect(faceRect.x+faceRect.x*3/4,faceRect.y+faceRect.height /4,faceRect.x+faceRect.width+faceRect.width/10,faceRect.y+faceRect.height-faceRect.height/4);
flagFaceDetect = detectFeature(1,myimage1,img,leftear_roi,rectptr);
getch();
return 0;
}
int detectFeature(int feature_index,char *imname,IplImage* image,CvRect featureROI, Rect* feature_box) {//general function to locate feature
cvSetImageROI(image, featureROI);
CvSeq* feature;
CvHaarClassifierCascade* featureCascade;
if(feature_index==0)//cascade for profile face
featureCascade = (CvHaarClassifierCascade*) cvLoad(file_profileface, 0, 0, 0);
if(feature_index==1)//cascade for ear
featureCascade = (CvHaarClassifierCascade*) cvLoad(ear_profileface, 0, 0, 0);
// feature = cvHaarDetectObjects(image,featureCascade,storage,1.1,2,CV_HAAR_DO_CANNY_PRUNING, cvSize(50,50));
feature = cvHaarDetectObjects(image,featureCascade,storage,1.2, 3,0,cvSize(18,12));
cvResetImageROI(image);
IplImage* displayImage = cvLoadImage(imname, CV_LOAD_IMAGE_COLOR);
CvRect* r;
int index_max_area;
int x1, x2, y1, y2; // opposite vertices of the rectangle
if (feature->total == 0) {
cout<<"here";
return 0;
}
else {
CvRect *fture = (CvRect*)cvGetSeqElem(feature, 0);
feature_box->x=fture->x;
feature_box->y=fture->y;
feature_box->height=fture->height;
feature_box->width=fture->width;
/* draw a red rectangle around the feature*/
cvRectangle(displayImage,
cvPoint(fture->x+fture->x*3/4,fture->y+fture->height/4),
cvPoint(fture->x+fture->width+fture->width/10,fture->y+fture->height-fture->height/4),
CV_RGB(0, 0, 255),
1, 8, 0
);
cvShowImage("frame",displayImage);
cvWaitKey(0);
return 1;
}
}
Hi i am doing a project to do an image 3d reconstruction. I am the phase of calibrating the camera, which is taking a long time to do. But when i compile the code and display the checkerboard in front of the camera it goes straight to exception error unhandled.
When picture not in frame, no error as soon as it gets in the frame, unhandled error occurs i don't know why.
I have asked a lot of people, no body can seem to help.
here is my code
#include <cv.h>
#include <highgui.h>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
using namespace cv;
using namespace std;
int main()
{
int numBoards = 0;
int numCornersHor;
int numCornersVer;
printf("Enter number of corners along width: ");
scanf("%d", &numCornersHor);
printf("Enter number of corners along height: ");
scanf("%d", &numCornersVer);
printf("Enter number of boards: ");
scanf("%d", &numBoards);
int numSquares = numCornersHor * numCornersVer;
Size board_sz = Size(numCornersHor, numCornersVer);
VideoCapture capture = VideoCapture(0);
vector<vector<Point3d>> object_points;
vector<vector<Point2d>> image_points;
vector<Point2d> corners;
int successes=0;
Mat image;
Mat gray_image;
capture >> image;
vector<Point3d> obj;
for(int j=0;j<numSquares;j++)
obj.push_back(Point3d(j/numCornersHor, j%numCornersHor, 0.0f));
while(successes<numBoards)
{
cvtColor(image, gray_image, CV_BGR2GRAY);
bool found = findChessboardCorners(image, board_sz, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found)
{
cornerSubPix(gray_image, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(gray_image, board_sz, corners, found);
}
imshow("win1", image);
imshow("win2", gray_image);
capture >> image;
int key = waitKey(1);
if(key==27)
return 0;
if(key==' ' && found!=0)
{
image_points.push_back(corners);
object_points.push_back(obj);
printf("Snap stored!\n");
successes++;
if(successes>=numBoards)
break;
}
}
Mat intrinsic = Mat(3, 3, CV_32FC1);
Mat distCoeffs;
vector<Mat> rvecs;
vector<Mat> tvecs;
intrinsic.ptr<float>(0)[0] = 1;
intrinsic.ptr<float>(1)[1] = 1;
calibrateCamera(object_points, image_points, image.size(), intrinsic, distCoeffs, rvecs, tvecs);
Mat imageUndistorted;
while(1)
{
capture >> image;
undistort(image, imageUndistorted, intrinsic, distCoeffs);
imshow("win1", image);
imshow("win2", imageUndistorted);
waitKey(1);
}
capture.release();
return 0;
}
the error i get on the console is
OpenCV ERROR: Assertion failed (ncorners >=0 && corners.depth() == CV_32F) in unknown function file , file .....\src\opencv\modules\imgproc\src\cornersubpix.cpp, line 257.
and the error dialog says
Unhandled exception at 0x769afc16 in basiccalibration.exe: Microsoft C++ exception: cv::Exception at memory location 0x0021f51c..
Help would be appreciated.
Thanks
Use Point2f and Point3f instead of Point2d and Point3d. Read the assertion text please. It demands a CV_32F depth structure.
I am new to OpenCV. Currently, trying to load and save a defined ROI of an image.
For OpenCV 1.x, I got it working with the following function...
#include <cv.h>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
void SaveROI(const CStringA& inputFile, const CStringA& outputFile)
{
if (ATLPath::FileExists(inputFile))
{
CvRect rect;
rect.x = 8;
rect.y = 90;
rect.width = 26;
rect.height = 46;
IplImage* imgInput = cvLoadImage(inputFile.GetString(), 1);
IplImage* imgRoi = cvCloneImage(imgInput);
cvSetImageROI(imgRoi, rect);
cvSaveImage(outputFile.GetString(), imgRoi);
cvReleaseImage(&imgInput);
cvReleaseImage(&imgRoi);
}
}
How can this be done with the OpenCV 2 or C++. I tried the following without a success, the whole image is saved.
void SaveROICPP(const CStringA& inputFile, const CStringA& outputFile)
{
if (ATLPath::FileExists(inputFile))
{
cv::Mat imgInput = cv::imread(inputFile.GetString());
if (imgInput.data != NULL)
{
cv::Mat imgRoi = imgInput(cv::Rect(8, 90, 26, 46));
imgInput.copyTo(imgRoi);
cv::imwrite(outputFile.GetString(), imgRoi);
}
}
}
Any help or suggestion?
You just don't need to call copyTo:
void SaveROICPP(const CStringA& inputFile, const CStringA& outputFile)
{
if (ATLPath::FileExists(inputFile))
{
cv::Mat imgInput = cv::imread(inputFile.GetString());
if (imgInput.data != NULL)
{
cv::Mat imgRoi = imgInput(cv::Rect(8, 90, 26, 46));
cv::imwrite(outputFile.GetString(), imgRoi);
}
}
}
In your version copyTo sees that imgInput is bigger then imgRoi and reallocates a new full-size matrix to make the copy. imgRoi is already a sub-image and you can simply pass it to any OpenCV function.
Here is some tested code for blending, cropping and saving new images.
You crop and then save that region in a new file.
#include <cv.h>
#include <highgui.h>
#include <math.h>
// alphablend <imageA> <image B> <x> <y> <width> <height>
// <alpha> <beta>
IplImage* crop( IplImage* src, CvRect roi){
// Must have dimensions of output image
IplImage* cropped = cvCreateImage( cvSize(roi.width,roi.height), src->depth, src->nChannels );
// Say what the source region is
cvSetImageROI( src, roi );
// Do the copy
cvCopy( src, cropped );
cvResetImageROI( src );
cvNamedWindow( "check", 1 );
cvShowImage( "check", cropped );
cvSaveImage ("style.jpg" , cropped);
return cropped;
}
int main(int argc, char** argv){
IplImage *src1, *src2;
CvRect myRect;
// IplImage* cropped ;
src1=cvLoadImage(argv[1],1);
src2=cvLoadImage(argv[2],1);
{
int x = atoi(argv[3]);
int y = atoi(argv[4]);
int width = atoi(argv[5]);
int height = atoi(argv[6]);
double alpha = (double)atof(argv[7]);
double beta = (double)atof(argv[8]);
cvSetImageROI(src1, cvRect(x,y,width,height));
cvSetImageROI(src2, cvRect(100,200,width,height));
myRect = cvRect(x,y,width,height) ;
cvAddWeighted(src1, alpha, src2, beta,0.0,src1);
cvResetImageROI(src1);
crop (src1 , myRect);
cvNamedWindow( "Alpha_blend", 1 );
cvShowImage( "Alpha_blend", src1 );
cvWaitKey(0);
}
return 0;
}