I am trying to do feature matching between 2 perspectives of the same image using DAISY and the FlannBasedMatcher.
I don't think there is even a single match that is correct.
Note: I also get different results each time I run the program but I think this is expected behaviour as explained here: FlannBasedMatcher returning different results
So what am I doing wrong? Why are these matches so bad?
Input Images
Wrong & non-deterministic results
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <iostream>
#include <vector>
#include <stdio.h>
using namespace cv;
using std::vector;
const float nn_match_ratio = 0.7f; // Nearest neighbor matching ratio
const float keypoint_diameter = 15.0f;
int main(int argc, char ** argv){
// Load images
Mat img1 = imread(argv[1]);
Mat img2 = imread(argv[2]);
vector<KeyPoint> keypoints1, keypoints2;
// Add every pixel to the list of keypoints for each image
for (float xx = keypoint_diameter; xx < img1.size().width - keypoint_diameter; xx++) {
for (float yy = keypoint_diameter; yy < img1.size().height - keypoint_diameter; yy++) {
keypoints1.push_back(KeyPoint(xx, yy, keypoint_diameter));
keypoints2.push_back(KeyPoint(xx, yy, keypoint_diameter));
}
}
Mat desc1, desc2;
Ptr<cv::xfeatures2d::DAISY> descriptor_extractor = cv::xfeatures2d::DAISY::create();
// Compute DAISY descriptors for both images
descriptor_extractor->compute(img1, keypoints1, desc1);
descriptor_extractor->compute(img2, keypoints2, desc2);
vector <vector<DMatch>> matches;
// For each descriptor in image1, find 2 closest matched in image2 (note: couldn't get BF matcher to work here at all)
FlannBasedMatcher flannmatcher;
flannmatcher.add(desc1);
flannmatcher.train();
flannmatcher.knnMatch(desc2, matches, 2);
// ignore matches with high ambiguity -- i.e. second closest match not much worse than first
// push all remaining matches back into DMatch Vector "good_matches" so we can draw them using DrawMatches
int num_good = 0;
vector<KeyPoint> matched1, matched2;
vector<DMatch> good_matches;
for (int i = 0; i < matches.size(); i++) {
DMatch first = matches[i][0];
DMatch second = matches[i][1];
if (first.distance < nn_match_ratio * second.distance) {
matched1.push_back(keypoints1[first.queryIdx]);
matched2.push_back(keypoints2[first.trainIdx]);
good_matches.push_back(DMatch(num_good, num_good, 0));
num_good++;
}
}
Mat res;
drawMatches(img1, matched1, img2, matched2, good_matches, res);
imwrite("_res.png", res);
return 0;
}
Sorry. I found my bug. I have the Indexes reversed in the lines that read:
matched1.push_back(keypoints1[first.queryIdx]);
matched2.push_back(keypoints2[first.trainIdx]);
how can i get the coordinates of the matches find in the two images,that is the coordinates of the matches in the first image and the coordinates of the matches in the second?
Related
Hi. I have the above image and use the "findContours" function.
And then I use the "convexity defects" functions to find the corner points.
The result is as follows.
The problem with this code is that it can not find the rounded corners.You can not find a point like the following.
This is my code
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/video.hpp>
#include <iostream>
#include <sstream>
#include <fstream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
cv::Mat image = cv::imread("find_Contours.png");
//Prepare the image for findContours
cv::cvtColor(image, image, CV_BGR2GRAY);
cv::threshold(image, image, 128, 255, CV_THRESH_BINARY);
//Find the contours. Use the contourOutput Mat so the original image doesn't get overwritten
std::vector<std::vector<cv::Point> > contours;
cv::Mat contourOutput = image.clone();
cv::findContours(contourOutput, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
////convexityDefects
vector<vector<Point> >hull(contours.size());
vector<vector<int> > hullsI(contours.size()); // Indices to contour points
vector<vector<Vec4i>> defects(contours.size());
for (int i = 0; i < contours.size(); i++)
{
convexHull(contours[i], hull[i], false);
convexHull(contours[i], hullsI[i], false);
if (hullsI[i].size() > 3) // You need more than 3 indices
{
convexityDefects(contours[i], hullsI[i], defects[i]);
}
}
///// Draw convexityDefects
for (int i = 0; i < contours.size(); ++i)
{
for (const Vec4i& v : defects[i])
{
float depth = v[3]/256;
if (depth >= 0) // filter defects by depth, e.g more than 10
{
int startidx = v[0]; Point ptStart(contours[i][startidx]);
int endidx = v[1]; Point ptEnd(contours[i][endidx]);
int faridx = v[2]; Point ptFar(contours[i][faridx]);
circle(image, ptFar, 4, Scalar(255, 255, 255), 2);
cout << ptFar << endl;
}
}
}
//
cv::imshow("Input Image", image);
cvMoveWindow("Input Image", 0, 0);
//
waitKey(0);
}
Can someone make the code and find the red dot? please help.
now i want find "convexity defects" from inside,not outside like this image:
Someone can help me??
It is very important to use
convexHull(contours[i], hullsI[i], true);
That is, with the last argument "true" for indices. I'm almost certain this is the reason it cannot find all the defects. Before fixing this, it is not much sense try to find other bugs (if any).
Hello and thanks for your help.
I would like to test the use of shapes for matching in OpenCV and managed to do the matching part.
To locate the rotated shape, i tought the AffineTransformer Class would be the right choice. As I don't know how the matching would work internally, it would be nice if someone has a link where the proceedings are described.
As shawshank mentioned my following code throw an Assertion failed-error because the variable matches is empty when passed to estimateTransformation function. Does anybody know how to use this function in the right way -respectively what it really does?
#include<opencv2/opencv.hpp>
#include<algorithm>
#include<iostream>
#include<string>
#include<opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
bool rotateImage(Mat src, Mat &dst, double angle)
{
// get rotation matrix for rotating the image around its center
cv::Point2f center(src.cols/2.0, src.rows/2.0);
cv::Mat rot = cv::getRotationMatrix2D(center, angle, 1.0);
// determine bounding rectangle
cv::Rect bbox = cv::RotatedRect(center,src.size(), angle).boundingRect();
// adjust transformation matrix
rot.at<double>(0,2) += bbox.width/2.0 - center.x;
rot.at<double>(1,2) += bbox.height/2.0 - center.y;
cv::warpAffine(src, dst, rot, bbox.size());
return 1;
}
static vector<Point> sampleContour( const Mat& image, int n=300 )
{
vector<vector<Point>> contours;
vector<Point> all_points;
findContours(image, contours, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
for (size_t i=0; i <contours.size(); i++)
{
for (size_t j=0; j<contours[i].size(); j++)
{
all_points.push_back(contours[i][j]);
}
}
int dummy=0;
for (int add=(int)all_points.size(); add<n; add++)
{
all_points.push_back(all_points[dummy++]);
}
// shuffel
random_shuffle(all_points.begin(), all_points.end());
vector<Point> sampled;
for (int i=0; i<n; i++)
{
sampled.push_back(all_points[i]);
}
return sampled;
}
int main(void)
{
Mat img1, img2;
vector<Point> img1Points, img2Points;
float distSC, distHD;
// read images
string img1Path = "testimage.jpg";
img1 = imread(img1Path, IMREAD_GRAYSCALE);
rotateImage(img1, img2, 45);
imshow("original", img1);
imshow("transformed", img2);
waitKey();
// Contours
img1Points = sampleContour(img1);
img2Points = sampleContour(img2);
//Calculate Distances
Ptr<ShapeContextDistanceExtractor> mysc = createShapeContextDistanceExtractor();
Ptr<HausdorffDistanceExtractor> myhd = createHausdorffDistanceExtractor();
distSC = mysc->computeDistance( img1Points, img2Points );
distHD = myhd -> computeDistance( img1Points, img2Points );
cout << distSC << endl << distHD << endl;
vector<DMatch> matches;
Ptr<AffineTransformer> transformerHD = createAffineTransformer(0);
transformerHD -> estimateTransformation(img1Points, img2Points, matches);
return 0;
}
I have used AffineTransformer class on a 2D image. Below is the basic code which will give you an idea of what it does.
// My OpenCv AffineTransformer demo code
// I have tested this on a 500 x 500 resolution image
#include <iostream>
#include "opencv2/opencv.hpp"
#include <vector>
using namespace cv;
using namespace std;
int arrSize = 10;
int sourcePx[]={154,155,159,167,182,209,238,265,295,316};
int sourcePy[]={190,222,252,285,314,338,344,340,321,290};
int tgtPx[]={120,127,137,150,188,230,258,285,305,313};
int tgtPy[]={207,245,275,305,336,345,342,332,305,274};
int main()
{
// Prepare 'vector of points' from above hardcoded points
int sInd=0, eInd=arrSize;
vector<Point2f> sourceP; for(int i=sInd; i<eInd; i++) sourceP.push_back(Point2f(sourcePx[i], sourcePy[i]));
vector<Point2f> tgtP; for(int i=sInd; i<eInd; i++) tgtP.push_back(Point2f(tgtPx[i], tgtPy[i]));
// Create object of AffineTransformer
bool fullAffine = true; // change its value and see difference in result
auto aft = cv::createAffineTransformer(fullAffine);
// Prepare vector<cv::DMatch> - this is just mapping of corresponding points indices
std::vector<cv::DMatch> matches;
for(int i=0; i<sourceP.size(); ++i) matches.push_back(cv::DMatch(i, i, 0));
// Read image
Mat srcImg = imread("image1.jpg");
Mat tgtImg;
// estimate points transformation
aft->estimateTransformation(sourceP, tgtP, matches);
// apply transformation
aft->applyTransformation(sourceP, tgtP);
// warp image
aft->warpImage(srcImg, tgtImg);
// show generated output
imshow("warped output", tgtImg);
waitKey(0);
return 0;
}
I have an nxd matrix V=[v_1; v_2;...; v_n] (; means new row) where v_i are 1xd vectors.
I want to compute the following sum: v_1^T*v_1 + v_2^T*v_2 + ... + v_n^T*v_n, which is a dxd matrix (v_i^T is the transpose of v_i).
For the moment I use a for loop, as in the code below, which is not efficient when n is very large (I think so).
#include <iostream>
#include <opencv2/core.hpp>
using namespace cv;
using namespace std;
int main (int argc, char * argv[])
{
int n=5, d=3;
Mat V = Mat(n, d, CV_32F);
randu(V, Scalar::all(0), Scalar::all(10));
cout<<V<<endl<<endl;
Mat M = Mat::zeros(d, d, CV_32F);
for(int i=0; i<n; i++)
{
M = M + V.row(i).t()*V.row(i);
}
cout<<M<<endl<<endl;
return 0;
}
Hope that somebody can suggest a faster way. Thanks in advance.
You can just take V.t()*V
(It took me a minute to realize it too, but if you go through the matrix multiplication you'll see it's the same)
I 'm implementing sift using opencv 2.3.
Sift implementation with OpenCV 2.2
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main(int argc, char *argv[])
{
Mat image = imread("TestImage.jpg");
// Create smart pointer for SIFT feature detector.
Ptr<FeatureDetector> featureDetector = FeatureDetector::create("SIFT");
vector<KeyPoint> keypoints;
// Detect the keypoints
featureDetector->detect(image, keypoints); // NOTE: featureDetector is a pointer hence the '->'.
//Similarly, we create a smart pointer to the SIFT extractor.
Ptr<DescriptorExtractor> featureExtractor = DescriptorExtractor::create("SIFT");
// Compute the 128 dimension SIFT descriptor at each keypoint.
// Each row in "descriptors" correspond to the SIFT descriptor for each keypoint
Mat descriptors;
featureExtractor->compute(image, keypoints, descriptors);
// If you would like to draw the detected keypoint just to check
Mat outputImage;
Scalar keypointColor = Scalar(255, 0, 0); // Blue keypoints.
drawKeypoints(image, keypoints, outputImage, keypointColor, DrawMatchesFlags::DEFAULT);
namedWindow("Output");
imshow("Output", outputImage);
char c = ' ';
while ((c = waitKey(0)) != 'q'); // Keep window there until user presses 'q' to quit.
return 0;
}
How can I change default CommonParams, DetectorParams, DescriptorParams parametrs.
The answer is at OpenCV change keypoint or descriptor parameters after creation. Relevant snippet:
featureDetector->set("someParam", someValue);
The objective is to detect the 5 white circles in the image.The test image in which the circles have to be detected is the one shown here 640x480
Please download the original image here,1280x1024
I am using different methods to bring out a evaluation of various circle/ellipse detection methods. But somehow I am not able to fix my simple Hough transform code. It does not detect any circles. I am not clear whether the problem is with pre-processing step, or the parameters of the HoughCircle. I have gone through all the similar questions in the forum, but still not able to fix the issue. This is my code. Please help me in this regards..
Header file
#ifndef IMGPROCESSOR_H
#define IMGPROCESSOR_H
// OpenCV Library
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
class ImgProcessor{
public:
Mat OpImg ;
ImgProcessor();
~ImgProcessor();
//aquire filter methods to image
int Do_Hough(Mat IpImg);
};
#endif /* ImgProcessor_H */
Source file
#include "ImgProcessor.h"
#include <opencv2\opencv.hpp>
#include "opencv2\imgproc\imgproc.hpp"
#include "opencv2\imgproc\imgproc_c.h"
#include <vector>
using namespace cv;
ImgProcessor::ImgProcessor(){
return;
}
ImgProcessor::~ImgProcessor(){
return;
}
//Apply filtering for the input image
int ImgProcessor::Do_Hough(Mat IpImg)
{
//Parameter Initialization________________________________________________________
double sigma_x, sigma_y, thresh=250, max_thresh = 255;
int ksize_w = 5 ;
int ksize_h = 5;
sigma_x = 0.3*((ksize_w-1)*0.5 - 1) + 0.8 ;
sigma_y = 0.3*((ksize_h-1)*0.5 - 1) + 0.8 ;
vector<Vec3f> circles;
//Read the image as a matrix
Mat TempImg;
//resize(IpImg, IpImg ,Size(), 0.5,0.5, INTER_AREA);
//Preprocessing__________________________________________________________
//Perform initial smoothing
GaussianBlur( IpImg, TempImg, Size(ksize_w, ksize_h),2,2);
//perform thresholding
threshold(TempImg,TempImg, thresh,thresh, 0);
//Remove noise by gaussian smoothing
GaussianBlur( TempImg, TempImg, Size(ksize_w, ksize_h),2,2);
/*imshow("Noisefree Image", TempImg);
waitKey(10000);*/
//Obtain edges
Canny(TempImg, TempImg, 255,240 , 3);
imshow("See Edges", TempImg);
waitKey(10000);
//Increase the line thickness
//dilate(TempImg,TempImg,0,Point(-1,-1),3);
//Hough Circle Method______________________________________________________________
// Apply the Hough Transform to find the circles
HoughCircles( TempImg, circles, 3, 1, TempImg.rows/32, 255, 240, 5, 0 );
// Draw the circles detected
for( size_t i = 0; i < circles.size(); i++ )
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
// circle center
circle( IpImg, center, 3, Scalar(0,255,0), -1, 8, 0 );
// circle outline
circle( IpImg, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
// Show your results
namedWindow( "Hough Circle Transform", WINDOW_AUTOSIZE );
imshow( "Hough Circle Transform", IpImg );
// waitKey(0);
return 0;
}
int main(int argc, char** argv)
{
ImgProcessor Iclass;
//char* imageName = argv[1];
string imageName = "D:/Projects/test_2707/test_2707/1.bmp";
Mat IpImg = imread( imageName );
cvtColor(IpImg, IpImg,6,CV_8UC1);
Iclass.Do_Hough(IpImg);
/*Iclass.Do_Contours(IpImg);*/
return 0;
}
The code seems fine, other than for:
HoughCircles( TempImg, circles, 3, 1, TempImg.rows/32, 255, 240, 5, 0 );
Does number 3 in the parameter list correspond to CV_HOUGH_GRADIENT ? It is always better to use definitions instead of numbers.
May be you should test it first with an image with bigger circles. Once you are sure that the rest of the code is correct, you can tune the parameters of HoughCircles.