Insufficient memory when using SIFT::detect - opencv

I am using sift to detect keypoints of two images of 3264x2466. Here follows my code. However, I got an error saying that opencv error: insufficient memory. Is there anything wrong?
Here is the image http://img42.imageshack.us/img42/6963/v839.jpg and I am running the program on win7x86, opencv 2.4.7
#include <opencv\cv.h>
#include <opencv2/opencv.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <string>
#include <vector>
#include <cmath>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/nonfree//nonfree.hpp>
using namespace std;
using namespace cv;
int main(int argc, char **argv)
{
cv::initModule_nonfree();
Mat image1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat image2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE );
//1. compute the keypoints
int minHessian = 400;
Ptr<FeatureDetector> detector = FeatureDetector::create("SIFT");
vector<KeyPoint> keypoints1,keypoints2;
detector->detect(image1, keypoints1);
detector->detect(image2, keypoints2);
//2. compute the descriptor
Ptr<DescriptorExtractor> extractor = DescriptorExtractor::create("SIFT");
Mat descriptors1, descriptors2;
extractor->compute( image1, keypoints1, descriptors1);
extractor->compute( image2, keypoints2, descriptors2);
//3. match
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
std::vector< DMatch > matches;
matcher->match( descriptors1, descriptors2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors1.rows; i++ )
{
if( matches[i].distance <= 2*min_dist ) {
good_matches.push_back( matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( image1, keypoints1, image2, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imwrite("C:\\Users\\flex\\Desktop\\output2.jpg", img_matches);
//imshow( "Good Matches", img_matches );
//waitKey(0);
return 0;
}

Related

opencv 3.0, about using Ptr<SURF> ( surf algorithm)

this code is example about surf algorithm.
but when I execute this code, I have a error message.
error : error LNK2019: "public: static struct cv::Ptr __cdecl
cv::xfeatures2d::SURF::create(double,int,int,bool,bool)"
(?create#SURF#xfeatures2d#cv##SA?AU?$Ptr#VSURF#xfeatures2d#cv###3#NHH_N0#Z)
why I have this error? I have to use surf algorithm.
what is the solution about this error?
#include <stdio.h>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
void readme();
int main()
{
Mat img_1 = imread( "a.jpg", IMREAD_GRAYSCALE );
Mat img_2 = imread( "b.jpg", IMREAD_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector->detect( img_1, keypoints_1 );
detector->detect( img_2, keypoints_2 );
//-- Draw keypoints
Mat img_keypoints_1; Mat img_keypoints_2;
drawKeypoints( img_1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( img_2, keypoints_2, img_keypoints_2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//-- Show detected (drawn) keypoints
imshow("Keypoints 1", img_keypoints_1 );
imshow("Keypoints 2", img_keypoints_2 );
waitKey(0);
return 0;
}
/** #function readme */
void readme()
{ std::cout << " Usage: ./SURF_detector <img1> <img2>" << std::endl; }

Accurate subpixel edge location in C (OPENCV)

I want to find subpixel and I resarched this topic However I think that subpixels must be such as 152.6 , 49.3 ...
I find this document in opencv http://docs.opencv.org/2.4/modules/imgproc/doc/feature_detection.html?highlight=cornersubpix#cornersubpix
And I try this code
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src, src_gray;
int maxCorners = 10;
int maxTrackbar = 50;
RNG rng(11111);
char* source_window = "Image";
void goodFeaturesToTrack_Demo( int, void* );
int main( int argc, char** argv )
{
src = imread( "a.png", 1 );
cvtColor( src, src_gray, CV_BGR2GRAY );
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
imshow( source_window, src );
goodFeaturesToTrack_Demo( 0, 0 );
waitKey(0);
return(0);
}
void goodFeaturesToTrack_Demo( int, void* )
{
if( maxCorners < 1 )
{ maxCorners = 1; }
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
Mat copy;
copy = src.clone();
goodFeaturesToTrack( src_gray,corners,maxCorners,qualityLevel,minDistance,Mat(),blockSize,useHarrisDetector,k );
cout<<"** Number of corners detected: "<<corners.size()<<endl;
int r = 4;
for( int i = 0; i < corners.size(); i++ )
{ circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255),rng.uniform(0,255)), -1, 8, 0 ); }
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, copy );
Size winSize = Size( 10, 10 );
Size zeroZone = Size( -1, -1 );
TermCriteria criteria = TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 );
cornerSubPix( src_gray, corners, winSize, zeroZone, criteria );
for( int i = 0; i < corners.size(); i++ )
{ cout<<" -- Refined Corner ["<<i<<"] ("<<corners[i].x<<","<<corners[i].y<<")"<<endl; }
}
But I have this result:
this code ofind only corner's subpixel I want to find edge's subpixel

Panoramic image Stitching Algorithm

Maybe the problem is, that I'm not good at English.
I am new to openCV. I want know area which stitcher merge. like this↓
to
If you know the order in which your image is taken then you may follow this code for stitching your images together. If the order is not known then the solution becomes more complex. Also, this code is designed for images of same size, if your camera is shifted it may result in some erroneous result.Implement some checks for proper understanding. You may refer to this article "http://ramsrigoutham.com/2012/11/22/panorama-image-stitching-in-opencv/"for much proper understanding of the stitching function that has been called twice in main.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
void stitching( cv::Mat&,cv::Mat& ,cv::Mat& );
int main()
{
Mat image1= imread("image1.jpg");
Mat image2= imread("image2.jpg");
Mat image3= imread("image3.jpg");
Mat gray_image1;
Mat gray_image2;
Mat gray_image3;
Mat result1,result2;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
cvtColor( image3, gray_image3, CV_RGB2GRAY );
stitching(gray_image1,gray_image2,result1);
stitching(result1,gray_image3,result2);
cv::imshow("stitched image"result2);
cv::WaitKey(0);
}
void stitching( cv::Mat& im1,cv::Mat& im2,cv::Mat& stitch_im)
{
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect(im1, keypoints_object );
detector.detect(im2, keypoints_scene );
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( im1, keypoints_object, descriptors_object );
extractor.compute( im2, keypoints_scene, descriptors_scene );
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
cv::Mat result;
warpPerspective(im1,stitch_im,H,cv::Size(im1.cols+im2.cols,im1.rows));
}

OPENCV (Unsupported format or combination formats) SURF

anyone familiar with this error? I tested a surf descriptor in real-time. I want to use it for recognition of different species of fish using of this features. Sometimes the program is okay but sometimes it gets an error. The compilation is Build successfully. After the compilation this error show.
#include <opencv2\imgproc\imgproc_c.h>
#include <stdio.h>
#include <math.h>
#include <opencv\highgui.h>
#include <opencv\cv.h>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
using namespace cv;
#define nimg 3
int main()
{
Mat object = imread( "D:/galunggong.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat object1 = imread( "D:/sapsap.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat object2 = imread( "D:/bisugo.jpg",CV_LOAD_IMAGE_GRAYSCALE );
// Mat object3 = imread( "4.jpg", CV_LOAD_IMAGE_GRAYSCALE );
//Mat object4 = imread( "5.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
if( !object1.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
if( !object2.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 1000;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
std::vector<KeyPoint> kp_object1;
std::vector<KeyPoint> kp_object2;
std::vector<KeyPoint> kp_object3;
std::vector<KeyPoint> kp_object4;
detector.detect( object, kp_object );
detector.detect( object1, kp_object1 );
detector.detect( object2, kp_object2 );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
Mat des_object1;
Mat des_object2;
extractor.compute( object, kp_object, des_object );
extractor.compute( object1, kp_object1, des_object1 );
extractor.compute( object2, kp_object2, des_object2 );
FlannBasedMatcher matcher;
CvCapture* cap = cvCreateCameraCapture(0);
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT, 240 );
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
Mat frame;
Mat des_image, img_matches;
char vect[nimg];
char contor;
char ok, ko;
while (1)
{
frame = cvQueryFrame(cap);
if (framecount < 5)
{
framecount++;
continue;
}
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<vector<DMatch > > matches1;
std::vector<vector<DMatch > > matches2;
std::vector<DMatch > good_matches;
std::vector<DMatch > good_matches1;
std::vector<DMatch > good_matches2;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
// printf("d \n");
//////////////////////////////////////////////////////
contor=0;
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int)
matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
vect[contor]=good_matches.size();
/////////////////////////////////////////////////////
contor=1;
matcher.knnMatch(des_object1, des_image, matches1, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches1.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches1[i][0].distance < 0.6*(matches1[i][1].distance)) && ((int)
matches1[i].size()<=2 && (int) matches1[i].size()>0))
{
good_matches1.push_back(matches1[i][0]);
}
}
vect[contor]=good_matches1.size();
/////////////////////////////////////////////////////
contor=2;
matcher.knnMatch(des_object2, des_image, matches2, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches2.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches2[i][0].distance < 0.6*(matches2[i][1].distance)) && ((int)
matches2[i].size()<=2 && (int) matches2[i].size()>0))
{
good_matches2.push_back(matches2[i][0]);
}
}
vect[contor]=good_matches2.size();
////////////////////////////////////////////////////
/*
contor =3;
matcher.knnMatch(des_object, des_image, matches3, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches3.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches3[i][0].distance < 0.6*(matches3[i][1].distance)) && ((int)
matches3[i].size()<=2 && (int) matches3[i].size()>0))
{
good_matches3.push_back(matches[i][0]);
}
}
vect[contor]=good_matches3.size();
//////////////////////////////////////////////////
contor=4;
matcher.knnMatch(des_object, des_image, matches4, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches4.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if((matches4[i][0].distance < 0.6*(matches4[i][1].distance)) && ((int)
matches4[i].size()<=2 && (int) matches4[i].size()>0))
{
good_matches4.push_back(matches[i][0]);
}
}
vect[contor]=good_matches4.size();
*/
printf("%d %d %d \n ",vect[0],vect[1],vect[2]);
ok=0;
for (contor=1;contor<nimg;contor++)
if (vect[contor]>vect[contor-1])
ok=contor;
for (ko=10;ko>3;ko++)
{
if (ok==0 && vect[ok]>ko)
{printf("Forward \n");
ko=2;}
else if (ok==1 && vect[ok]>ko)
{printf("Turn Left \n");
ko=2;}
else if (ok==2 && vect[ok]>ko)
{printf("Turn Right \n");
ko=2;}
}
//Show detected matches
imshow( "Good Matches",frame /*img_matches*/ );
key = waitKey(1);
}
}
Sometimes SURF cannot find any feature points so it may return an empty descriptor.
You should check the descriptors for data:
if(des_object.empty()||des_object1.empty()||des_object2.empty())
{
std::cout<<"Empty descriptor(s)."<<std::endl;
return -1;
}

Opencv Surf Bug error

Anyone familiar with this error? I tested a surf descriptor in real-time and it worked well but after few seconds it crashes and I got this error.
It was related when no points were detected. I run my code again and have the detected object stays for more than 2 mins and still no error. but when I removed the object and there were no points, it crashes again after 40 secs.
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/legacy/legacy.hpp"
using namespace cv;
using namespace std;
char key = 'a';
int framecount = 0;
SurfFeatureDetector detector(1000);
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;
Mat frame, des_object, image;
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
int main()
{
//reference image
Mat object = imread("D:/milo.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//compute detectors and descriptors of reference image
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
//create video capture object
CvCapture* capture = cvCaptureFromCAM(0);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 270);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 190);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
//wile loop for real time detection
while (key != 27)
{
Mat frame;
frame = cvQueryFrame(capture);
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
int goodMatchesCounter =0;
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS
SENSITIVE TO SEGFAULTS
{
if(((int)matches[i].size()<=2 && (int)matches[i].size()>0) && (matches[i}
[0].distance<0.6*(matches[i][1].distance)))
{
// good_matches.push_back(matches[i][0]);
obj.push_back( kp_object[ matches[i][0].queryIdx ].pt );
scene.push_back( kp_image[ matches[i][0].trainIdx ].pt );
goodMatchesCounter++;
}
}
//Draw only "good" matches
// drawMatches( object, kp_object, image, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (goodMatchesCounter >= 4)
{
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( image, scene_corners[0], scene_corners[1], Scalar( 0, 0, 0), 4 );
line( image, scene_corners[1], scene_corners[2], Scalar( 0, 0, 0),
4 );
line( image, scene_corners[2], scene_corners[3], Scalar( 0, 0, 0),
4 );
line( image, scene_corners[3], scene_corners[0], Scalar( 0, 0, 0),
4 );
}
//Show detected matches
imshow( "Good Matches", image );
key = waitKey(1);
}
return 0;
}

Resources