Error in implementing realtime camera based GPU_SURF in opencv - opencv

As the CPU based SURF in opencv was very slow for realtime application, we decided to use GPU_SURF, after setting up the opencv_gpu we made the following code:
#include <iostream>
#include <iomanip>
#include <windows.h>
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/core/types_c.h"
using namespace std;
using namespace cv;
using namespace cv::gpu;
void help()
{
cout << "\nThis program demonstrates using SURF_GPU features detector, descriptor extractor and BruteForceMatcher_GPU" << endl;
cout << "\nUsage:\n\tmatcher_simple_gpu <image1> <image2>" << endl;
}
int main(int argc, char* argv[])
{
GpuMat img1(imread("C:\\OpenCV2.3\\opencv2.3\\bin\\Debug\\tsucuba_left.png", CV_LOAD_IMAGE_GRAYSCALE));
SURF_GPU surf;
// detecting keypoints & computing descriptors
GpuMat keypoints1GPU, keypoints2GPU;
GpuMat descriptors1GPU, descriptors2GPU;
surf(img1, GpuMat(), keypoints1GPU, descriptors1GPU);
cout << "FOUND " << keypoints1GPU.cols << " keypoints on first image" << endl;
//cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
CvCapture* capture = cvCreateCameraCapture(0);
int frame_width = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
int frame_height = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
cout<<"frames done\n";
cv::gpu::GpuMat frame_gpu = cv::gpu::GpuMat(frame_width, frame_height, CV_8UC3);
cv::gpu::GpuMat frame_gpu_cvt = cv::gpu::GpuMat(frame_width, frame_height, CV_8UC1);
cout<<"gpu frmes loaded\n";
//Sleep(200);
while(cvGrabFrame(capture))
{
IplImage* frame;
frame =cvQueryFrame(capture);
CvMat* image=0;
image = cvCreateMat(frame->height, frame->width, CV_8UC1);
frame_gpu.upload(image);
cout<<"frame uploaded\n";
cvtColor(frame_gpu,frame_gpu_cvt,CV_RGB2GRAY);
cout<<"color done\n";
surf(frame_gpu_cvt, GpuMat(), keypoints2GPU, descriptors2GPU);
// matching descriptors
BruteForceMatcher_GPU< L2<float> > matcher;
GpuMat trainIdx, distance;
matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);
// downloading results
vector<KeyPoint> keypoints1, keypoints2;
vector<float> descriptors1, descriptors2;
vector<DMatch> matches;
surf.downloadKeypoints(keypoints1GPU, keypoints1);
surf.downloadKeypoints(keypoints2GPU, keypoints2);
surf.downloadDescriptors(descriptors1GPU, descriptors1);
surf.downloadDescriptors(descriptors2GPU, descriptors2);
BruteForceMatcher_GPU< L2<float> >::matchDownload(trainIdx, distance, matches);
// drawing the results
Mat img_matches;
drawMatches(img1, keypoints1, frame_gpu, keypoints2, matches, img_matches);
namedWindow("matches", 0);
imshow("matches", img_matches);
//waitKey(0);
}
return 0;
}
The error that comes on executing it is:
OpenCV Error: Assertion failed (scn == 3 || scn == 4) in unknown function, file
..\..\..\opencv_2.3\opencv\modules\gpu\src\color.cpp, line 186
It is due to the line:
cvtColor(frame_gpu,frame_gpu_cvt,CV_RGB2GRAY);
There maybe other errors, can someone please help us out on this one.

scn is the number of channels in the first argument to cvtColor. Converting from RGB to GRAY requires that the first argument has three or four channels. The line frame_gpu.upload(image); is converting frame_gpu to one channel since image has one channel. It looks like you can skip the call to cvtColor and just call SURF directly on frame_gpu.

Related

Frames Lost while processing video with opencv

I am capturing a video with 30fps, but when I process the video with openCV for AruCo marker detection I am loosing almost half of the frames. So for a 5 min video I expect 5x60x30 = 9000 frames but I am getting only about 4500 frames. I tried different resolution and fps while recording but the problem still persists. My code is as follows. I later want to sync the video with the audio recorded from the camera, so even if I could know the frames which are being lost can solve my problem. Any pointers or suggestions are welcome.
#include "opencv2\core.hpp"
#include "opencv2\imgcodecs.hpp"
#include "opencv2\imgproc.hpp"
#include "opencv2\highgui.hpp"
#include "opencv2\aruco.hpp"
#include "opencv2\calib3d.hpp"
#include <time.h>
#include <sstream>
#include <iostream>
#include <fstream>
#include "stdafx.h"
using namespace std;
using namespace cv;
#define _CRT_SECURE_NO_WARNINGS 1
int startWebcamMonitoring() //(const Mat& cameraMatrix, const Mat&
distanceCoefficients, float arucoSquareDimensions)
{
Mat frame4;
Scalar_<double> borderColor, borderColor2, borderColor3;
vector<int> markerIds;
vector < vector<Point2f>> markerCorners, rejectedCandidates ;
aruco::DetectorParameters parameters;
Ptr<aruco::Dictionary> makerDiktionary = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME::DICT_4X4_50);
VideoCapture cap("sample.mp4");
double fps = cap.get(CV_CAP_PROP_FPS);
cout << "Frames per second : " << fps << endl;
while (true)
{
cap >> sample;
if (!cap.read(frame4))
break;
aruco::detectMarkers(frame4, makerDiktionary, markerCorners, markerIds);
aruco::drawDetectedMarkers(frame4, markerCorners, markerIds, borderColor);
aruco::estimatePoseSingleMarkers(markerCorners, arucoSquareDimension,
cameraMatrix, distanceCoefficients, rotationVectors, translationVectors);
Mat rotationMatrix
for (int i = 0; i < markerIds.size(); i++)
{
aruco::drawAxis(frame4, cameraMatrix, distanceCoefficients, rotationVectors[i], translationVectors[i], 0.01f);
Rodrigues(rotationVectors[i], rotationMatrix);
time_t current = time(0);
cout << " Translation " << translationVectors[i] << " ID" << markerIds[i] << " Euler angles " << 180 / 3.1415*rotationMatrixToEulerAngles(rotationMatrix) << "current time " << ctime(&current) << endl;
}
freopen("output_sample", "a", stdout);
imshow("recording", frame4);
if (waitKey(30) >= 0) break;
}
The problem is:
cap >> sample;
if (!cap.read(frame4))
break;
the program is reading a frame from source twice in every iteration.
You should remove the cap >> sample; line and it will be fine.

how to track objects within the ROI using dlib?

I'm trying to get the stream and defining the ROI and tracking an object within ROI using dlib correlation tracker but there is a error in compilation, not found any code or explanation which can describe any solution of this error. Thanks in advance.
cvcap.cpp
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include "dlib/image_processing.h"
#include "dlib/gui_widgets.h"
#include "dlib/image_io.h"
#include "dlib/dir_nav.h"
using namespace dlib;
using namespace std;
int main(int argc, char * argv[]){
CvCapture* camera = cvCaptureFromFile(argv[1]);
if (camera==NULL)
printf("camera is null\n");
else
printf("camera is not null\n");
cvNamedWindow("CvCapture");
while (cvWaitKey(1000)!=atoi("q")){
double t1 = (double)cvGetTickCount();
IplImage *img = cvQueryFrame(camera);
cvSetImageROI(img, cvRect(140, 150, 340, 150));
IplImage *img1 = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
cvCopy(img, img1, NULL);
cvResetImageROI(img);
cvShowImage("ROI",img1);
correlation_tracker tracker;
tracker.start_track(img1, centered_rect(point(120,100), 80, 150));
image_window win;
tracker.update(img1);
win.set_image(img1);
win.clear_overlay();
win.add_overlay(tracker.get_position());
cout << "hit enter to process next frame" << endl;
cin.get();
double t2=(double)cvGetTickCount();
printf("time: %gms fps: %.2g\n",(t2-t1)/(cvGetTickFrequency()*1000.), 1000./((t2-t1)/(cvGetTickFrequency()*1000.)));
cvShowImage("CvCapture",img1);
}
cvReleaseCapture(&camera);
}
the error is :
In file included from dlib/image_processing.h:24:0,
from cvcap.cpp:3:
dlib/image_processing/correlation_tracker.h: In instantiation of ‘dlib::point_transform_affine dlib::correlation_tracker::make_chip(const image_type&,
dlib::drectangle, std::vector<dlib::matrix<std::complex<double> > >&) const [with image_type = _IplImage*]’:
dlib/image_processing/correlation_tracker.h:65:47: required from ‘void dlib::correlation_tracker::start_track(const image_type&, const dlib::drectang
le&) [with image_type = _IplImage*]’
cvcap.cpp:36:70: required from here
dlib/image_processing/correlation_tracker.h:301:67: error: invalid use of incomplete type ‘struct dlib::image_traits<_IplImage*>’
typedef typename image_traits<image_type>::pixel_type pixel_type;
Update
i edited my code and now it is compiling but when i'm running my code, tracking box appears but it is not tracking the objects.
for(;;) {
if(!vcap.read(image)) {
std::cout << "No frame" << std::endl;
break;
} else {
std::cout << "Starting" << std::endl;
array2d<rgb_pixel> img;
assign_image(img, cv_image<rgb_pixel>(image));
tracker.start_track(img, centered_rect(point(413,260), 98, 126));
for (unsigned long i = 0; i < img.size(); i++) {
tracker.update(img);
win.set_image(img);
win.clear_overlay();
win.add_overlay(tracker.get_position());
}
}
}
You're passing an IplImage to the dlib routine. But if you look at the docs it says you have to convert your opencv images to cv_images before passing them to dlib functions. And you can see this with the error pointing out IplImage doesn't have a pixel_type trait. A fix would be.
IplImage *img1 = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
...
correlation_tracker tracker;
// Not sure if the tracker wants a single channel image
// change rgb_alpha_pixel to uchar in that case.
tracker.start_track(dlib::cv_image<dlib::rgb_alpha_pixel>(img1), centered_rect(point(120,100), 80, 150));
Pixel types and the Correlation tracker
EDIT: Passed your stuff to opencv 3 and this is compiling for me. Do you really need to use opencv 2?
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include "dlib/image_processing.h"
#include "dlib/gui_widgets.h"
#include "dlib/image_io.h"
#include "dlib/dir_nav.h"
#include <dlib/opencv/cv_image.h>
using namespace cv;
using namespace dlib;
using namespace std;
int main(int argc, char * argv[]){
VideoCapture cap(0);
Mat frame;
while (cap.read(frame)) {
cvtColor(frame, frame, CV_RGB2GRAY);
cv_image<unsigned char> img(frame);
correlation_tracker tracker;
tracker.start_track(img, centered_rect(point(120,100), 80, 150));
image_window win;
tracker.update(img);
win.set_image(img);
win.clear_overlay();
win.add_overlay(tracker.get_position());
cout << "hit enter to process next frame" << endl;
cv::waitKey(0);
}
}
and with an uglier opencv 2..
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include "dlib/image_processing.h"
#include "dlib/gui_widgets.h"
#include "dlib/image_io.h"
#include "dlib/dir_nav.h"
#include <dlib/opencv/cv_image.h>
using namespace cv;
using namespace dlib;
using namespace std;
int main(int argc, char * argv[]){
CvCapture* camera = cvCaptureFromFile(argv[1]);
if (camera==NULL)
printf("camera is null\n");
else
printf("camera is not null\n");
cvNamedWindow("CvCapture");
while (cvWaitKey(1000)!=atoi("q")) {
double t1 = (double)cvGetTickCount();
IplImage *img = cvQueryFrame(camera);
cvSetImageROI(img, cvRect(140, 150, 340, 150));
IplImage *img1 = cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
cvCopy(img, img1, NULL);
cvResetImageROI(img);
cvShowImage("ROI",img1);
cvCvtColor(img1, img1, CV_RGB2GRAY);
cv_image<unsigned char> img2(img1);
correlation_tracker tracker;
tracker.start_track(img2, centered_rect(point(120,100), 80, 150));
image_window win;
tracker.update(img2);
win.set_image(img2);
win.clear_overlay();
win.add_overlay(tracker.get_position());
cout << "hit enter to process next frame" << endl;
cin.get();
double t2=(double)cvGetTickCount();
printf("time: %gms fps: %.2g\n",(t2-t1)/(cvGetTickFrequency()*1000.), 1000./((t2-t1)/(cvGetTickFrequency()*1000.)));
cvShowImage("CvCapture",img1);
}
cvReleaseCapture(&camera);
}
As you see the error came exactly from what I told you before.

Improve Prediction Sensivity using SVM with OpenCV

I am trying to classify my images whether characters are printed on surface or not.
For doing it.
First I take surf features of images with real images and manually defect real images to try create bag of words to an xml file and then try to predict.
however unless I use absolutely different image or totally cropped image my SVM classifier predicts as it is correct.
those are the images I used for train
https://www.dropbox.com/sh/xked9ywnibzv3tt/AADC0lP4WYAo3ddEDgvHpFhha/negative?dl=0
Here is my code.
#include <stdio.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "opencv2/core/core.hpp"
#include<dirent.h>
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/ml/ml.hpp>
using namespace cv;
using namespace std;
Ptr<FeatureDetector> detector = FeatureDetector::create("SURF");
Ptr<DescriptorExtractor> descriptors = DescriptorExtractor::create("SURF");
string to_string(const int val) {
int i = val;
std::string s;
std::stringstream out;
out << i;
s = out.str();
return s;
}
Mat compute_features(Mat image) {
vector<KeyPoint> keypoints;
Mat features;
detector->detect(image, keypoints);
KeyPointsFilter::retainBest(keypoints, 1500);
descriptors->compute(image, keypoints, features);
return features;
}
BOWKMeansTrainer addFeaturesToBOWKMeansTrainer(String dir, BOWKMeansTrainer& bowTrainer) {
DIR *dp;
struct dirent *dirp;
struct stat filestat;
dp = opendir(dir.c_str());
Mat features;
Mat img;
string filepath;
#pragma loop(hint_parallel(4))
for (; (dirp = readdir(dp));) {
filepath = dir + dirp->d_name;
cout << "Reading... " << filepath << endl;
if (stat( filepath.c_str(), &filestat )) continue;
if (S_ISDIR( filestat.st_mode )) continue;
img = imread(filepath, 0);
features = compute_features(img);
bowTrainer.add(features);
}
return bowTrainer;
}
void computeFeaturesWithBow(string dir, Mat& trainingData, Mat& labels, BOWImgDescriptorExtractor& bowDE, int label) {
DIR *dp;
struct dirent *dirp;
struct stat filestat;
dp = opendir(dir.c_str());
vector<KeyPoint> keypoints;
Mat features;
Mat img;
string filepath;
#pragma loop(hint_parallel(4))
for (;(dirp = readdir(dp));) {
filepath = dir + dirp->d_name;
cout << "Reading: " << filepath << endl;
if (stat( filepath.c_str(), &filestat )) continue;
if (S_ISDIR( filestat.st_mode )) continue;
img = imread(filepath, 0);
detector->detect(img, keypoints);
bowDE.compute(img, keypoints, features);
trainingData.push_back(features);
labels.push_back((float) label);
}
cout << string( 100, '\n' );
}
int main() {
initModule_nonfree();
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("FlannBased");
TermCriteria tc(CV_TERMCRIT_ITER + CV_TERMCRIT_EPS, 10, 0.001);
int dictionarySize = 1000;
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(dictionarySize, tc, retries, flags);
BOWImgDescriptorExtractor bowDE(descriptors, matcher);
string dir = "/positive/", filepath;
DIR *dp;
struct dirent *dirp;
struct stat filestat;
cout << "Add Features to KMeans" << endl;
addFeaturesToBOWKMeansTrainer("/positive/", bowTrainer);
addFeaturesToBOWKMeansTrainer("/negative/", bowTrainer);
cout << endl << "Clustering..." << endl;
Mat dictionary = bowTrainer.cluster();
bowDE.setVocabulary(dictionary);
Mat labels(0, 1, CV_32FC1);
Mat trainingData(0, dictionarySize, CV_32FC1);
cout << endl << "Extract bow features" << endl;
computeFeaturesWithBow("/positive/", trainingData, labels, bowDE, 1);
computeFeaturesWithBow("/negative/", trainingData, labels, bowDE, 0);
CvSVMParams params;
params.kernel_type=CvSVM::LINEAR;
params.svm_type=CvSVM::C_SVC;
params.gamma=5;
params.C=100;
params.term_crit=cvTermCriteria(CV_TERMCRIT_NUMBER,100,0.000001);
CvSVM svm;
cout << endl << "Begin training" << endl;
bool res =svm.train(trainingData,labels,cv::Mat(),cv::Mat(),params);
svm.save("classifier.xml");
//CvSVM svm;
svm.load("classifier.xml");
vector<KeyPoint> cameraKeyPoints;
Mat rotated = imread("test.jpg",0);
Mat featuresFromimage;
detector->detect(rotated, cameraKeyPoints);
bowDE.compute(rotated, cameraKeyPoints, featuresFromimage);
cout <<"anar:"<< svm.predict(featuresFromimage) << endl;
imshow("edges", rotated);
cvWaitKey(0);
return 0;
}
Question 1: since those images are too similiar how can I do prediction like
if similiarity > %80
"correct"
else
"defected"
Question 2 Since this character defection is too rare in a factory it is going to very very tough to get a lot of defected images to train. Manually create defect on this images is a correct solution ? if not what I can actually do ?
Question 3
What kind of preprocessing methods I can actually do on this kind of images to increase accuracy of SVM ?
thank you

opencv cvblob -- render blobs other than white

I am trying to detect objects with cvblob. Somehow, my code only marks the white objects. How to mark objects of other colors, like a can of beer or a bottle of water.
Here is my code:
#include "highgui.h"
#include "cv.h"
#include "cvaux.h"
#include "iostream"
#include <stdio.h>
#include <ctype.h>
#include <cvblob.h>
using namespace cv;
using namespace std;
using namespace cvb;
int main(int argc, char** argv) {
CvTracks tracks;
cvNamedWindow("frame", CV_WINDOW_AUTOSIZE);
cvMoveWindow("frame", 50, 100);
CvCapture* capture;
IplImage* frame = 0;
capture = cvCreateCameraCapture( 0 ); //capture frames from cam on index 0: /dev/video0/
if (!capture) {
return -1;
}
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 240);
frame = cvQueryFrame(capture);
while(frame) {
IplImage *gray = cvCreateImage(cvGetSize(frame), IPL_DEPTH_8U, 1);
cvCvtColor(frame, gray, CV_BGR2GRAY);
cvThreshold(gray, gray, 150, 255, CV_THRESH_BINARY);
IplImage *labelImg=cvCreateImage(cvGetSize(gray), IPL_DEPTH_LABEL, 1);
CvBlobs blobs;
unsigned int result=cvLabel(gray, labelImg, blobs);
cvFilterByArea(blobs, 500, 1000000);
// cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX);
cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_CENTROID);
cvUpdateTracks(blobs, tracks, 200., 5);
cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID|CV_TRACK_RENDER_BOUNDING_BOX);
for (CvBlobs::const_iterator it=blobs.begin(); it!=blobs.end(); ++it) {
cout << "Blob #" << it->second->label << ": Area=" << it->second->area << ", Centroid=(" << it->second->centroid.x << ", " << it->second->centroid.y << ")" << endl;
}
cvShowImage("frame", frame);
cout << "----------------------------" << endl;
frame = cvQueryFrame(capture);
char c = cvWaitKey(10);
if(c==27)
break;
}
}
Any tip is appreciated.
Milo
That's the option by default and you cannot change it if you don't change the source code in cvblob library.
If you really want to change this is so easy, you can create a copy of the same method adding a new input var like CvScalar to select output color. It's so easy.
The method cvRenderBlob will be in cvcontour.cpp.
I've been made many improvement in cvblob library and in next months I will push it to the creator.
Try adding:
"cvInRangeS(hsvframe,cvScalar(23,41,133),cvScalar(40,150,255),threshy);//for yellow"
Before Filtering the blobs. Its a range of HSV(instead of RGB) values that defines the threshold of the desire color.
Hope it helps.

Convert IplImage to CvMat

Here is the gpu surf code:
#include <iostream>
#include <iomanip>
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/imgproc/imgproc_c.h>
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
using namespace std;
using namespace cv;
using namespace cv::gpu;
void help()
{
cout << "\nThis program demonstrates using SURF_GPU features detector, descriptor extractor and BruteForceMatcher_GPU" << endl;
cout << "\nUsage:\n\tmatcher_simple_gpu <image1> <image2>" << endl;
}
int main(int argc, char* argv[])
{
GpuMat img1(imread("C:\\OpenCV2.3\\opencv2.3\\bin\\Debug\\tsucuba_left.png", CV_LOAD_IMAGE_GRAYSCALE));
SURF_GPU surf;
// detecting keypoints & computing descriptors
GpuMat keypoints1GPU, keypoints2GPU;
GpuMat descriptors1GPU, descriptors2GPU;
surf(img1, GpuMat(), keypoints1GPU, descriptors1GPU);
cout << "FOUND " << keypoints1GPU.cols << " keypoints on first image" << endl;
//cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
CvCapture* capture = cvCaptureFromCAM(0);
int frame_width = (int) cvGetCaptureProperty(capture, 320);
int frame_height = (int) cvGetCaptureProperty(capture, 240);
cout<<"frames done\n";
GpuMat frame_gpu = GpuMat(frame_width, frame_height, CV_8UC3);
GpuMat frame_gpu_cvt = GpuMat(frame_width, frame_height, CV_8UC1);
cout<<"gpu frmes loaded\n";
IplImage* frame;
while(1)
{
frame =cvQueryFrame(capture);
CvMat* image = cvCreateMat(frame->height, frame->width, CV_8UC1);
/*CvMat* image = cvCreateMatHeader(frame->height, frame->width, CV_8UC1);
image->step = 4 * (image->cols * CV_ELEM_SIZE1(image->type) * CV_MAT_CN(image->type) / 4 + 1);//critical
cvCreateData(image);*/
cvInitMatHeader( image, frame->width, frame->height, CV_8UC1,frame->imageData);
// cvConvert( frame, image );
//cvCvtColor( frame, image, CV_RGB2GRAY );
cvConvertImage( frame, image, CV_RGB2GRAY);
namedWindow("aa", 1);
cvShowImage("aa", frame);
frame_gpu.upload(image);
cout<<"frame uploaded\n";
surf(frame_gpu, GpuMat(), keypoints2GPU, descriptors2GPU);
cout<<"surf done\n";
// matching descriptors
BruteForceMatcher_GPU< L2<float> > matcher;
GpuMat trainIdx, distance;
matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);
cout<<"match done\n";
// downloading results
vector<KeyPoint> keypoints1, keypoints2;
vector<float> descriptors1, descriptors2;
vector<DMatch> matches;
surf.downloadKeypoints(keypoints1GPU, keypoints1);
surf.downloadKeypoints(keypoints2GPU, keypoints2);
surf.downloadDescriptors(descriptors1GPU, descriptors1);
surf.downloadDescriptors(descriptors2GPU, descriptors2);
BruteForceMatcher_GPU< L2<float> >::matchDownload(trainIdx, distance, matches);
// drawing the results
Mat img_matches;
drawMatches(img1, keypoints1, frame_gpu, keypoints2, matches, img_matches);
cout<<"match done\n";
namedWindow("matches", 1);
imshow("matches", img_matches);
cvReleaseMat(&image);
frame_gpu.release();
cvReleaseImage(&frame);
img_matches.release();
cout<<"deallocation done\n";
waitKey(0);
}
cvReleaseCapture(&capture);
cout<<"work done";
return 0;
}
We don't get correct image in frame_gpu,so there is problem in getting image from frame,we printed frame using: cvShowImage("aa", frame); but instead of frame if we try image there is just blank screen
Use your IplImage for counting, and then convert it to a Mat (in C++) with this constructor:
Mat(const IplImage* img, bool copyData=false);
So you would just do:
Mat myMat(img);
That would make your matrix. You would use it in the tracking section of your program!
NOTE: Data is not copied. You can set the copyData parameter to true, though, if you want data to be copied.
Try something like this:
IplImage* frame;
// here load your frame with some image
// convert to cv::Mat and show the converted image
cv::Mat image(frame);
cv::imshow("Image Window", image)
That's how IplImage can be converted to cv::Mat and displayed.
For conversion to and from cvMat, you will find all the details here:
http://wangpengnorman.blogspot.com/2009/06/transform-between-cvmat-and-iplimage.html

Resources