I'm working on image processing. I want to match 2D Features and I did many tests on SURF, SIFT, ORB.How can I apply RANSAC on SURF/SIFT/ORB in OpenCV?
OpenCV has the function cv::findHomography which can optionally use RANSAC to find the homography matrix relating two images. You can see an example of this function in action here.
Specifically the section of code you are interested in is:
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
You can then use the function cv::perspectiveTransform to warp the images according to the homography matrix.
Other options for cv::findHomography other than CV_RANSAC are 0 which uses every point and CV_LMEDS which uses the Least-Median method. More info can be found in the OpenCV camera calibration documentation here.
Here is the python implementation of applying ransac using skimage either with ProjectiveTransform or AffineTransform (i.e. Homography) model on obtained SIFT / SURF keypoints. This implementation first does Lowe's ratio test on obtained keypoints then it does ransac on filtered keypoints from Lowe's ratio test.
import cv2
from skimage.measure import ransac
from skimage.transform import ProjectiveTransform, AffineTransform
import numpy as np
def siftMatching(img1, img2):
# Input : image1 and image2 in opencv format
# Output : corresponding keypoints for source and target images
# Output Format : Numpy matrix of shape: [No. of Correspondences X 2]
surf = cv2.xfeatures2d.SURF_create(100)
# surf = cv2.xfeatures2d.SIFT_create()
kp1, des1 = surf.detectAndCompute(img1, None)
kp2, des2 = surf.detectAndCompute(img2, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# Lowe's Ratio test
good = []
for m, n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1, 2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1, 2)
# Ransac
model, inliers = ransac(
(src_pts, dst_pts),
AffineTransform, min_samples=4,
residual_threshold=8, max_trials=10000
)
n_inliers = np.sum(inliers)
inlier_keypoints_left = [cv2.KeyPoint(point[0], point[1], 1) for point in src_pts[inliers]]
inlier_keypoints_right = [cv2.KeyPoint(point[0], point[1], 1) for point in dst_pts[inliers]]
placeholder_matches = [cv2.DMatch(idx, idx, 1) for idx in range(n_inliers)]
image3 = cv2.drawMatches(img1, inlier_keypoints_left, img2, inlier_keypoints_right, placeholder_matches, None)
cv2.imshow('Matches', image3)
cv2.waitKey(0)
src_pts = np.float32([ inlier_keypoints_left[m.queryIdx].pt for m in placeholder_matches ]).reshape(-1, 2)
dst_pts = np.float32([ inlier_keypoints_right[m.trainIdx].pt for m in placeholder_matches ]).reshape(-1, 2)
return src_pts, dst_pts
Related
I'm trying to register 2 similar images; however I end up with the exact reference picture after executing my code.
My main aim from registering those 2 pictures is to find the differences, any ideas how can I obtain the difference?
orb= cv.ORB_create(1000)
kp1, des1 = orb.detectAndCompute(grey, None)
kp2, dess2 = orb.detectAndCompute(greyy, None)
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE_HAMMING)
matches = matcher.match(des1, des2, None)
matches = sorted(matches, key=lambda x:x.distance)
points1 = np.zeros((len(matches), 2), dtype=np.float32)
points2 = np.zeros((len(matches), 2), dtype=np.float32)
for i, match in enumerate(matches):
points1[i, :] = kp1[match.queryIdx].pt
points2[i, :] = kp2[match.trainIdx].pt
h, mask= cv.findHomography(points1, points2, cv.RANSAC)
regimg = cv.warpPerspective(img1, h, (width,height))
cv.imshow('registered', regimg)
As requested in the comments: Matched images using orb
I'm trying find difference in two image
db_image:
query_image:
abs_diff:
query_image is greater than db_image a little bit. I'm using opencv SIFT to extract feature and FlannBasedMatcher to find Homography finaly i'm using absdiff to extract difference area . It's work ! But some image not warpPerspective prefect and have some anti-aliased noise pixel i think. how to solve this.
Sorry for my bad english!
Here is my source code :
grayscale_db = cv2.cvtColor(db_img, cv2.COLOR_BGR2GRAY)
gray_scale_query = cv2.cvtColor(query_img, cv2.COLOR_BGR2GRAY)
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(grayscale_db, None)
kp2, des2 = sift.detectAndCompute(gray_scale_query, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
good = [m for m, n in matches if m.distance < 0.9 * n.distance]
result_area = None
h, w = grayscale_db.shape
if len(good) > threshold:
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
if return_area:
return cv2.minAreaRect(dst_pts)
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
result_area = cv2.warpPerspective(query_img, M, (w, h), borderMode=cv2.BORDER_CONSTANT,
borderValue=(255, 255, 255), flags=cv2.INTER_LINEAR)
diff = cv2.absdiff( db_img,result_area)
I am trying to preform face tracking with the Lucas Kanade algorithm with Haar Cascade Classification. The Lucas Kanade is successful and can track the user, but unfortunately, some of the good features to detect points are wasted on corners in the background. I wish to use Haar Cascade's ability to detect the fact to get coordinates of detected face and apply Lucas Kanade to only within that restricted area.
Basically, I want to use Haar Cascade to detect fact, get x, y, w, and h values, and use those coordinates to apply Lucas Kanade within that restricted area (so that none are wasted on assigning good features to the background and only facial features are detected)
The line of code that is doing the Lucas Kanade algorithm is this code:
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
How do I do that?
Code:
from matplotlib import pyplot as plt
import numpy as np
import cv2
rectangle_x = 0
face_classifier = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 200,
qualityLevel = 0.01,
minDistance = 10,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
cv2.imshow('Old_Frame', old_frame)
cv2.waitKey(0)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
restart = True
face = face_classifier.detectMultiScale(old_gray, 1.2, 4)
if len(face) == 0:
print "This is empty"
for (x,y,w,h) in face:
focused_face = old_frame[y: y+h, x: x+w]
cv2.imshow('Old_Frame', old_frame)
face_gray = cv2.cvtColor(old_frame,cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(focused_face,cv2.COLOR_BGR2GRAY)
corners_t = cv2.goodFeaturesToTrack(gray, mask = None, **feature_params)
corners = np.int0(corners_t)
for i in corners:
ix,iy = i.ravel()
cv2.circle(focused_face,(ix,iy),3,255,-1)
cv2.circle(old_frame,(x+ix,y+iy),3,255,-1)
print ix, " ", iy
plt.imshow(old_frame),plt.show()
##########
#############################
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
#############################
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
print "X: ", x
print "Y: ", y
while(1):
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the circles
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
cv2.circle(frame,(a, b),5,color[i].tolist(),-1)
if i == 99:
break
cv2.imshow('frame',frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release()
Here is the code snippet:
p0 = np.array([[[x,y]], [[x0,y0]]], np.float32)
just replace p0 in original code and and assign x,x0... with your desired points
- make sure its a 2d array
- and the type is float 32 for single precision
I am trying to use drawmatches function in OpenCV.
It put the image in left and right format. I want the images to be put in top-down format and then draw the matches for more clarity.
Is there a way in which it can be done in OpenCV? Or, I will have to write a new function?
I am afraid you have to write your own function. I think it should not be too complicated.
As a starting point just have a look at https://github.com/Itseez/opencv/blob/2.4/modules/features2d/src/draw.cpp where we have the function _prepareImgAndDrawKeypoints
static void _prepareImgAndDrawKeypoints( const Mat& img1, const vector<KeyPoint>& keypoints1,
const Mat& img2, const vector<KeyPoint>& keypoints2,
Mat& outImg, Mat& outImg1, Mat& outImg2,
const Scalar& singlePointColor, int flags )
{
Size size( img1.cols + img2.cols, MAX(img1.rows, img2.rows) );
for example size should be changed to
Size size( MAX(img1.cols + img2.cols), img1.rows + img2.rows );
and then you can continue to study that function (and the other ones) and complete your task. Maybe you can also contribute to OpenCV too with your new feature.
As a quick workaround, you can pre-rotate both images 90 degrees, detect features, draw the matches and then undo the rotation,
As an example, have these images (taken from here and here):
Result:
Python code : (the resize part is just for fitting the images to the screen size)
import cv2
im1 = cv2.imread('test1.jpg')
im2 = cv2.imread('test2.jpg')
# resize
scale=0.5
n1,m1 = int(im1.shape[0]*scale), int(im1.shape[1]*scale)
n2,m2 = int(im2.shape[0]*scale), int(im2.shape[1]*scale)
im1 = cv2.resize(im1, (m1,n1))
im2 = cv2.resize(im2, (m2,n2))
rotate=True
if rotate:
im1 = cv2.rotate(im1, cv2.ROTATE_90_COUNTERCLOCKWISE)
im2 = cv2.rotate(im2, cv2.ROTATE_90_COUNTERCLOCKWISE)
# gray versions:
im1g = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
im2g = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
# sift detections:
sift = cv2.SIFT_create()
kp1, ds1 = sift.detectAndCompute(im1g,None)
kp2, ds2 = sift.detectAndCompute(im2g,None)
# matching
matcher = cv2.DescriptorMatcher.create('BruteForce')
matches = matcher.knnMatch(ds1,ds2, 2)
# Filter matches using the Lowe's ratio test
ratio_thresh = 0.7
good_matches = []
for i, (m,n) in enumerate(matches):
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
# draw matches:
im_matches = cv2.drawMatches(im1, kp1, im2, kp2, good_matches,None,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# undo pre-rotation
if rotate:
im_matches = cv2.rotate(im_matches, cv2.ROTATE_90_CLOCKWISE)
cv2.imshow('matches', im_matches)
I know there are already several questions with the same subject asked here, but I couldn't find any help.
So I want to compare 2 images in order to see how similar they are and I'm using the well known find_obj.cpp demo to extract surf descriptors and then for the matching I use the flannFindPairs.
But as you know this method doesn't discard the outliers and I'd like to know the number of true positive matches so I can figure how similar those two images are.
I have already seen this question: Detecting outliers in SURF or SIFT algorithm with OpenCV and the guy there suggests to use the findFundamentalMat but once you get the fundamental matrix how can I get the number of outliers/true positive from that matrix? Thank you.
Here is a snippet from the descriptor_extractor_matcher.cpp sample available from OpenCV:
if( !isWarpPerspective && ransacReprojThreshold >= 0 )
{
cout << "< Computing homography (RANSAC)..." << endl;
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
H12 = findHomography( Mat(points1), Mat(points2), CV_RANSAC, ransacReprojThreshold );
cout << ">" << endl;
}
Mat drawImg;
if( !H12.empty() ) // filter outliers
{
vector<char> matchesMask( filteredMatches.size(), 0 );
vector<Point2f> points1; KeyPoint::convert(keypoints1, points1, queryIdxs);
vector<Point2f> points2; KeyPoint::convert(keypoints2, points2, trainIdxs);
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
double maxInlierDist = ransacReprojThreshold < 0 ? 3 : ransacReprojThreshold;
for( size_t i1 = 0; i1 < points1.size(); i1++ )
{
if( norm(points2[i1] - points1t.at<Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
matchesMask[i1] = 1;
}
// draw inliers
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 255, 0), CV_RGB(0, 0, 255), matchesMask
#if DRAW_RICH_KEYPOINTS_MODE
, DrawMatchesFlags::DRAW_RICH_KEYPOINTS
#endif
);
#if DRAW_OUTLIERS_MODE
// draw outliers
for( size_t i1 = 0; i1 < matchesMask.size(); i1++ )
matchesMask[i1] = !matchesMask[i1];
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg, CV_RGB(0, 0, 255), CV_RGB(255, 0, 0), matchesMask,
DrawMatchesFlags::DRAW_OVER_OUTIMG | DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
#endif
}
else
drawMatches( img1, keypoints1, img2, keypoints2, filteredMatches, drawImg );
The key lines for the filtering are performed here:
if( norm(points2[i1] - points1t.at<Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
matchesMask[i1] = 1;
Which is measuring the L2-norm distance between the points (either 3 pixels if nothing was specified, or user-defined number of pixels reprojection error).
Hope that helps!
you can use the size of the vector named "ptpairs" in order to decide how similiar the pictures are.
this vector contains the matching keypoints, so his size/2 is the number of matches.
i think you can use the size of ptpairs divided by the total number of keypoints in order to set an appropriate threshold.
this will probably give you an estimation to the similiarty between them.