Use opencv to crop boxes in car documents - opencv

I'm new to opencv and would like to use it to crop portions of an image and then use tesseract to read them. I'm not sure what's the best way to crop all the necessary boxes that i need.
Here is an easy example of the document i need to transform:
Any advice on what would be the best?
I tried with ORB and the following image as template:
But without success.
On the template, some lines are selected as keypoints but on the image i want to process it's mainly the text and not the lines. Is it a bad template? Do i need to process the image first?
and my code:
Feature2D f2d = ORB.create(5000); // SIFT.create(1000);
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
Mat descriptors1 = new Mat();
Mat mask1 = new Mat();
f2d.detectAndCompute(img1, mask1, keypoints1, descriptors1);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
Mat descriptors2 = new Mat();
Mat mask2 = new Mat();
f2d.detectAndCompute(img2, mask2, keypoints2, descriptors2);
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptors1, descriptors2, matches);
Mat outputImg = new Mat();
MatOfByte drawnMatches = new MatOfByte();
Features2d.drawMatches(img1, keypoints1, img2, keypoints2, matches, outputImg, new Scalar(0, 255, 0), new Scalar(255, 0, 0), drawnMatches, Features2d.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS);

I could get good results by using a template that contains all the text that never change in the form. Furthermore, creating 2 templates (1 per page) and using SIFT instead of ORB helped a lot too.
Here is my solution:
public static Mat matchTEmplateSIFT(Mat img1, Mat template, boolean showKeypoints, boolean drawMatchs) {
Feature2D f2d = SIFT.create(15000);
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_SL2); // or FLANNBASED for better performance
return matchTEmplate(img1, template, f2d, matcher);
}
public static Mat matchTEmplate(Mat baseImage, Mat template, Feature2D f2d, DescriptorMatcher matcher) {
int dilateSize = 5;
Mat scene = dilateBitwise(dilateSize, baseImage.clone());
template = dilateBitwise(dilateSize, template.clone());
MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
Mat descriptors1 = new Mat();
f2d.detectAndCompute(scene, new Mat(), keypoints1, descriptors1);
MatOfKeyPoint keypoints2 = new MatOfKeyPoint();
Mat descriptors2 = new Mat();
f2d.detectAndCompute(template, new Mat(), keypoints2, descriptors2);
List<MatOfDMatch> matches = new ArrayList<>();
matcher.knnMatch(descriptors1, descriptors2, matches, 2);
MatOfDMatch goodMatches = getBestMatches(matches);
Mat result = transformAndWarp(baseImage, template, keypoints1, keypoints2, goodMatches);
return result;
}
private static Mat transformAndWarp(Mat baseImage, Mat template, MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch goodMatches) {
Mat H = findHomographyMatrix(keypoints1, keypoints2, goodMatches);
perspectiveTransform(template, H);
Mat result = new Mat();
Imgproc.warpPerspective(baseImage, result, H, new Size(template.cols(), template.rows()));
return result;
}
private static void perspectiveTransform(Mat template, Mat H) {
Mat obj_corners = new Mat(4, 1, CvType.CV_32FC2);
obj_corners.put(0, 0, new double[]{0, 0});
obj_corners.put(0, 0, new double[]{template.cols(), 0});
obj_corners.put(0, 0, new double[]{template.cols(), template.rows()});
obj_corners.put(0, 0, new double[]{0, template.rows()});
Mat scene_corners = new Mat(4, 1, CvType.CV_32FC2);
Core.perspectiveTransform(obj_corners, scene_corners, H);
}
private static Mat findHomographyMatrix(MatOfKeyPoint keypoints1, MatOfKeyPoint keypoints2, MatOfDMatch goodMatches) {
LinkedList<Point> templateList = new LinkedList<>();
LinkedList<Point> sceneList = new LinkedList<>();
List<KeyPoint> templateKeyPoints = keypoints1.toList();
List<KeyPoint> sceneKeypoints = keypoints2.toList();
for (int i = 0; i < goodMatches.toList().size(); i++) {
templateList.addLast(templateKeyPoints.get(goodMatches.toList().get(i).queryIdx).pt);
sceneList.addLast(sceneKeypoints.get(goodMatches.toList().get(i).trainIdx).pt);
}
MatOfPoint2f templateMat = new MatOfPoint2f();
templateMat.fromList(templateList);
MatOfPoint2f sceneMat = new MatOfPoint2f();
sceneMat.fromList(sceneList);
return Calib3d.findHomography(templateMat, sceneMat, Calib3d.RANSAC);
}
// https://docs.opencv.org/3.4/d5/d6f/tutorial_feature_flann_matcher.html
private static MatOfDMatch getBestMatches(List<MatOfDMatch> knnMatches) {
//-- Filter matches using the Lowe's ratio test
float ratioThresh = 0.5f;
List<DMatch> listOfGoodMatches = new ArrayList<>();
for (int i = 0; i < knnMatches.size(); i++) {
if (knnMatches.get(i).rows() > 1) {
DMatch[] matches = knnMatches.get(i).toArray();
if (matches[0].distance < ratioThresh * matches[1].distance) {
listOfGoodMatches.add(matches[0]);
}
}
}
MatOfDMatch matOfDMatch = new MatOfDMatch();
matOfDMatch.fromList(listOfGoodMatches);
return matOfDMatch;
}

Related

Having problems rending 3d models on aruco markers using tvec/rvec

My graduation project team and I are working on marker-based AR application where in one of the tasks we want to draw some 3d models on markers, we are using opencv to detect markers, and rajawali for drawing the 3d models.
The problem is the tvec/rvec we get from Aruco.estimatePoseSingleMarkers(...) doesn't map correctly to location and rotation on the markers, although we can draw the axis accurately.
image of the 3d model on the marker
So we wanted to ask:
Is there a processing needed on the tvec/rvec needed before using them to get the position and rotation?
Is there some alternatives for marker-detection that are more convenient to use than opencv with rajawali?
What could be the cause for them to be inaccurate?
Code:
marker-detection
public void markerDetection(Mat frame)
{
ids = new Mat();
corners = new ArrayList<>();
Aruco.detectMarkers(frame, markerDictionary, corners, ids);
List<MarkerData> newDetectedMarkers = new ArrayList<>();
if(ids.size().height > 0)
{
rvecs = new Mat();
tvecs = new Mat();
Aruco.estimatePoseSingleMarkers(corners, markerLength, cameraMatrix, distortionCoef, rvecs, tvecs);
for(int i=0; i<ids.size().height; i++)
{
double[] rvecArray = rvecs.get(i, 0), tvecArray = tvecs.get(i, 0);
Mat rvec = new Mat(3, 1, CvType.CV_64FC1), tvec = new Mat(3, 1, CvType.CV_64FC1);
for (int j = 0; j < 3; ++j) {
rvec.put(j, 0, rvecArray[j]);
tvec.put(j, 0, tvecArray[j]);
}
multiply(rvec, new Scalar(180.0 / Math.PI), rvec); // transform them to degree
MarkerData newMarker = new MarkerData(rvec, tvec, corners.get(i), (int)ids.get(i, 0)[0]);
newDetectedMarkers.add(newMarker);
}
}
updateDetectedMarkers(newDetectedMarkers); // update the detected markers
}
Rendering
#Override
protected void onRender(long elapsedRealtime, double deltaTime) {
super.onRender(elapsedRealtime, deltaTime);
getCurrentScene().clearChildren();
List<MarkerData> markerData = markerDetector.getDetectedMarkers();
for (MarkerData marker : markerData) {
try {
int id = R.raw.monkey;
LoaderOBJ parser = new LoaderOBJ(mContext.getResources(), mTextureManager, id);
parser.parse();
Object3D object = parser.getParsedObject();
object.setMaterial(defaultMaterial);
object.setScale(0.3);
Mat rvec = marker.getRvec(); // 3x1 Mat
Mat tvec = marker.getTvec(); // 3x1 Mat
object.setRotation(new Vector3(rvec.get(0, 0)[0], rvec.get(1, 0)[0], rvec.get(2, 0)[0]));
object.setPosition(new Vector3(tvec.get(0, 0)[0], tvec.get(1, 0)[0], tvec.get(2, 0)[0]));
getCurrentScene().addChild(object);
} catch (ParsingException e) {
e.printStackTrace();
}
}
}

Extract centroids of connected components in EMGU

I have the following piece of code in EMGU to extract connected components:
Mat connected_array = new Mat();
Mat stats = new Mat();
Mat centroids = new Mat();
Mat ImgMat = new Mat();
CvInvoke.ConnectedComponentsWithStats(ImgThresh, connected_array, stats, centroids, LineType.EightConnected,DepthType.Cv32S);
I could not find any way to access the centroids.
EMGU wraps most arrays into Mat objects, which you then need to convert to arrays to access their contents (using mat.CopyTo(array)). This is not straight forward from the documentation - I've had to use trail & error to find out how it works:
Mat labels = new Mat();
Mat stats = new Mat();
Mat centroids = new Mat();
MCvPoint2D64f[] centroidPoints;
double x, y;
int n;
n = CvInvoke.ConnectedComponentsWithStats(image, labels, stats, centroids, LineType.EightConnected, DepthType.Cv32S);
centroidPoints = new MCvPoint2D64f[n];
centroids.CopyTo(centroidPoints);
foreach (MCvPoint2D64f point in centroidPoints)
{
x = point.X;
y = point.Y;
}
Another common method is to use Contours, similar functionality which EMGU also provides. I have used this for better performance. I include an example as well:
VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
MCvMoments moments;
double area;
MCvPoint2D64f center;
int n;
CvInvoke.FindContours(image, contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);
n = contours.Size;
for (int i = 0; i < n; i++)
{
area = CvInvoke.ContourArea(contours[i], false);
moments = CvInvoke.Moments(contours[i]);
center = moments.GravityCenter;
}

I was trying understand the working of SURF and SIFT, but I am getting fatal error when I execute the code

This is my code and the line where I am getting the error is :
featureDetector.detect(objectImage, objectKeyPoints);
I don't have any knowledge about what the error is all about since, error is not specified in brief.
I am running the code on Ubuntu.
The error I am getting is as follows :
A fatal error has been detected by the Java Runtime Environment:
SIGSEGV (0xb) at pc=0x00007f9b24f3d747, pid=29767,
tid=0x00007f9b438b2700
JRE version: OpenJDK Runtime Environment (8.0_121-b13) (build
1.8.0_121-8u121-b13-0ubuntu1.16.04.2-b13) Java VM: OpenJDK 64-Bit Server VM (25.121-b13 mixed mode linux-amd64 compressed oops)
Problematic frame: C [libopencv_features2d.so.2.4+0x59747]
cv::FeatureDetector::detect(cv::Mat const&, std::vector >&, cv::Mat const&) const+0x4d7
Failed to write core dump. Core dumps have been disabled. To enable
core dumping, try "ulimit -c unlimited" before starting Java again
An error report file with more information is saved as:
/home/chandansr/MajorProjectParts/MajorProjectPart1/MotionDetection/hs_err_pid29767.log
If you would like to submit a bug report, please visit:
http://bugreport.java.com/bugreport/crash.jsp The crash happened
outside the Java Virtual Machine in native code. See problematic
frame for where to report the bug.
public class SURFDetector {
static{
System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
}
public static void main(String[] args) {
String bookObject = "/home/chandansr/source2.png";
String bookScene = "/home/chandansr/temp3.png";
System.out.println("Started....");
System.out.println("Loading images...");
Mat objectImage = Highgui.imread(bookObject);
Mat sceneImage = Highgui.imread(bookScene);
MatOfKeyPoint objectKeyPoints = new MatOfKeyPoint();
FeatureDetector featureDetector = FeatureDetector.create(FeatureDetector.SURF);
System.out.println("Detecting key points...");
featureDetector.detect(objectImage, objectKeyPoints);
System.out.println("Detected key points...");
KeyPoint[] keypoints = objectKeyPoints.toArray();
System.out.println(keypoints);
MatOfKeyPoint objectDescriptors = new MatOfKeyPoint();
DescriptorExtractor descriptorExtractor = DescriptorExtractor.create(DescriptorExtractor.SURF);
System.out.println("Computing descriptors...");
descriptorExtractor.compute(objectImage, objectKeyPoints, objectDescriptors);
// Create the matrix for output image.
Mat outputImage = new Mat(objectImage.rows(), objectImage.cols(), Highgui.CV_LOAD_IMAGE_COLOR);
Scalar newKeypointColor = new Scalar(255, 0, 0);
System.out.println("Drawing key points on object image...");
Features2d.drawKeypoints(objectImage, objectKeyPoints, outputImage, newKeypointColor, 0);
// Match object image with the scene image
MatOfKeyPoint sceneKeyPoints = new MatOfKeyPoint();
MatOfKeyPoint sceneDescriptors = new MatOfKeyPoint();
System.out.println("Detecting key points in background image...");
featureDetector.detect(sceneImage, sceneKeyPoints);
System.out.println("Computing descriptors in background image...");
descriptorExtractor.compute(sceneImage, sceneKeyPoints, sceneDescriptors);
Mat matchoutput = new Mat(sceneImage.rows() * 2, sceneImage.cols() * 2, Highgui.CV_LOAD_IMAGE_COLOR);
Scalar matchestColor = new Scalar(0, 255, 0);
List<MatOfDMatch> matches = new LinkedList<MatOfDMatch>();
DescriptorMatcher descriptorMatcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
System.out.println("Matching object and scene images...");
descriptorMatcher.knnMatch(objectDescriptors, sceneDescriptors, matches, 2);
System.out.println("Calculating good match list...");
LinkedList<DMatch> goodMatchesList = new LinkedList<DMatch>();
float nndrRatio = 0.7f;
for (int i = 0; i < matches.size(); i++) {
MatOfDMatch matofDMatch = matches.get(i);
DMatch[] dmatcharray = matofDMatch.toArray();
DMatch m1 = dmatcharray[0];
DMatch m2 = dmatcharray[1];
if (m1.distance <= m2.distance * nndrRatio) {
goodMatchesList.addLast(m1);
}
}
if (goodMatchesList.size() >= 7) {
System.out.println("Object Found!!!");
List<KeyPoint> objKeypointlist = objectKeyPoints.toList();
List<KeyPoint> scnKeypointlist = sceneKeyPoints.toList();
LinkedList<Point> objectPoints = new LinkedList<>();
LinkedList<Point> scenePoints = new LinkedList<>();
for (int i = 0; i < goodMatchesList.size(); i++) {
objectPoints.addLast(objKeypointlist.get(goodMatchesList.get(i).queryIdx).pt);
scenePoints.addLast(scnKeypointlist.get(goodMatchesList.get(i).trainIdx).pt);
}
MatOfPoint2f objMatOfPoint2f = new MatOfPoint2f();
objMatOfPoint2f.fromList(objectPoints);
MatOfPoint2f scnMatOfPoint2f = new MatOfPoint2f();
scnMatOfPoint2f.fromList(scenePoints);
Mat homography = Calib3d.findHomography(objMatOfPoint2f, scnMatOfPoint2f, Calib3d.RANSAC, 3);
Mat obj_corners = new Mat(4, 1, CvType.CV_32FC2);
Mat scene_corners = new Mat(4, 1, CvType.CV_32FC2);
obj_corners.put(0, 0, new double[]{0, 0});
obj_corners.put(1, 0, new double[]{objectImage.cols(), 0});
obj_corners.put(2, 0, new double[]{objectImage.cols(), objectImage.rows()});
obj_corners.put(3, 0, new double[]{0, objectImage.rows()});
System.out.println("Transforming object corners to scene corners...");
Core.perspectiveTransform(obj_corners, scene_corners, homography);
Mat img = Highgui.imread(bookScene, Highgui.CV_LOAD_IMAGE_COLOR);
Core.line(img, new Point(scene_corners.get(0, 0)), new Point(scene_corners.get(1, 0)), new Scalar(0, 255, 0), 4);
Core.line(img, new Point(scene_corners.get(1, 0)), new Point(scene_corners.get(2, 0)), new Scalar(0, 255, 0), 4);
Core.line(img, new Point(scene_corners.get(2, 0)), new Point(scene_corners.get(3, 0)), new Scalar(0, 255, 0), 4);
Core.line(img, new Point(scene_corners.get(3, 0)), new Point(scene_corners.get(0, 0)), new Scalar(0, 255, 0), 4);
System.out.println("Drawing matches image...");
MatOfDMatch goodMatches = new MatOfDMatch();
goodMatches.fromList(goodMatchesList);
Features2d.drawMatches(objectImage, objectKeyPoints, sceneImage, sceneKeyPoints, goodMatches, matchoutput, matchestColor, newKeypointColor, new MatOfByte(), 2);
Highgui.imwrite("output//outputImage.jpg", outputImage);
Highgui.imwrite("output//matchoutput.jpg", matchoutput);
Highgui.imwrite("output//img.jpg", img);
} else {
System.out.println("Object Not Found");
}
System.out.println("Ended....");
}
}

Advanced denoise Image using Opencv

I am trying denoise this image to get better edges
I've tried bilaterFilter, GaussianBlur, morphological close and several threshold but every time I get an image like:
and when I do the HoughLinesP with dilatation of edges is really bad result.
Can some one help me to improve this? Is there a some way to take out those noise?
Frist try: using GaussianBlur, in this case, I must use equalizeHist or I cant get edges even if I use a really low threshold
public class TesteNormal {
static {
System.loadLibrary("opencv_java310");
}
public static void main(String args[]) {
Mat imgGrayscale = new Mat();
Mat imgBlurred = new Mat();
Mat imgCanny = new Mat();
Mat image = Imgcodecs.imread("c:\\cordova\\imagens\\teste.jpg", 1);
int imageWidth = image.width();
int imageHeight = image.height();
Imgproc.cvtColor(image, imgGrayscale, Imgproc.COLOR_BGR2GRAY);
Imgproc.equalizeHist(imgGrayscale, imgGrayscale);
Imgproc.GaussianBlur(imgGrayscale, imgBlurred, new Size(5, 5), 1.8);
Photo.fastNlMeansDenoising(imgBlurred, imgBlurred);
Imshow.show(imgBlurred);
Mat imgKernel = Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3));
Imgproc.Canny(imgBlurred, imgCanny, 0, 80);
Imshow.show(imgCanny);
Imgproc.dilate(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 2);
Imgproc.erode(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 1);
Imshow.show(imgCanny);
Mat lines = new Mat();
int threshold = 100;
int minLineSize = imageWidth < imageHeight ? imageWidth / 3 : imageHeight / 3;
int lineGap = 5;
Imgproc.HoughLinesP(imgCanny, lines, 1, Math.PI / 360, threshold, minLineSize, lineGap);
System.out.println(lines.rows());
for(int x = 0; x < lines.rows(); x++) {
double[] vec = lines.get(x, 0);
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Imgproc.line(image, start, end, new Scalar(255, 0, 0), 1);
}
Imshow.show(image);
}
}
Second try: using bilateral filter:
public class TesteNormal {
static {
System.loadLibrary("opencv_java310");
}
public static void main(String args[]) {
Mat imgBlurred = new Mat();
Mat imgCanny = new Mat();
Mat image = Imgcodecs.imread("c:\\cordova\\imagens\\teste.jpg", 1);
int imageWidth = image.width();
int imageHeight = image.height();
Imgproc.bilateralFilter(image, imgBlurred, 10, 35, 35);
Imshow.show(imgBlurred);
Mat imgKernel = Imgproc.getStructuringElement(Imgproc.MORPH_CROSS, new Size(3, 3));
Imgproc.Canny(imgBlurred, imgCanny, 0, 120);
Imshow.show(imgCanny);
Imgproc.dilate(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 2);
Imgproc.erode(imgCanny, imgCanny, imgKernel, new Point(-1, -1), 1);
Imshow.show(imgCanny);
Mat lines = new Mat();
int threshold = 100;
int minLineSize = imageWidth < imageHeight ? imageWidth / 3 : imageHeight / 3;
int lineGap = 5;
Imgproc.HoughLinesP(imgCanny, lines, 1, Math.PI / 360, threshold, minLineSize, lineGap);
System.out.println(lines.rows());
for(int x = 0; x < lines.rows(); x++) {
double[] vec = lines.get(x, 0);
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Imgproc.line(image, start, end, new Scalar(255, 0, 0), 1);
}
Imshow.show(image);
}
}
As suggested, I am trying use opencv contrib, using StructuredEdgeDetection. I am testing using a fixed image.
Frist I compile opencv with contrib
Segund I wrote the C++ code:
JNIEXPORT jobject JNICALL Java_vi_pdfscanner_main_ScannerEngine_getRandomFlorest(JNIEnv *env, jobject thiz) {
Mat mbgra = imread("/storage/emulated/0/Resp/coco.jpg", 1);
Mat3f fsrc;
mbgra.convertTo(fsrc, CV_32F, 1.0 / 255.0); // when I run those convertTo, I got all back image, that way I got no edges.
const String model = "/storage/emulated/0/Resp/model.yml.gz";
Ptr<cv::ximgproc::StructuredEdgeDetection> pDollar = cv::ximgproc::createStructuredEdgeDetection(model);
Mat edges;
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "chamando edges");
pDollar->detectEdges(fsrc, edges);
imwrite( "/storage/emulated/0/Resp/edges.jpg", edges);
jclass java_bitmap_class = (jclass)env->FindClass("android/graphics/Bitmap");
jmethodID mid = env->GetMethodID(java_bitmap_class, "getConfig", "()Landroid/graphics/Bitmap$Config;");
jobject bitmap_config = env->CallObjectMethod(bitmap, mid);
jobject _bitmap = mat_to_bitmap(env,edges,false,bitmap_config);
return _bitmap;
}
and I wrote this java wapper
public class ScannerEngine {
private static ScannerEngine ourInstance = new ScannerEngine();
public static ScannerEngine getInstance() {
return ourInstance;
}
private ScannerEngine() {
}
public native Bitmap getRandomFlorest(Bitmap bitmap);
static {
System.loadLibrary("opencv_java3");
System.loadLibrary("Scanner");
}
}
this point is, when I run those lines
Mat mbgra = imread("/storage/emulated/0/Resp/coco.jpg", 1); //image is ok
Mat3f fsrc;
mbgra.convertTo(fsrc, CV_32F, 1.0 / 255.0); //now image got all back, someone have some ideia why?
Thanks very much!
The Result about are strong, like this
Original Image:
http://prntscr.com/cyd8qi
Edges Image:
http://prntscr.com/cyd9ax
Its run on android 4.4 (api lvl 19) in a really old device.
That's all,
Thanks you very much

How to detect Multiple occurrences of same objects in an image using Feature detection

Is there any possible way to detect multiple occurrences of same object in an image using feature detection?
Earlier I tried it with Template Matching and I was able to detect the multiple occurrences but later I dropped because of its dependency on image orientation and size.
Single Occurrence of ocject
Multiple Occurrence
Question about template matching
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
using System.Collections.Specialized;
using OpenCVForUnity;
namespace OpenCVForUnitySample
{
public class Feature2DSample : MonoBehaviour
{
void Start ()
{
Texture2D imgTemplate = Resources.Load ("lena") as Texture2D;
Texture2D imgTexture = Resources.Load ("yVLsd_") as Texture2D;
Mat matSrc = new Mat (imgTemplate.height, imgTemplate.width, CvType.CV_8UC3);
Utils.texture2DToMat (imgTemplate, matSrc);
Debug.Log ("img1Mat dst ToString " + matSrc.ToString ());
Mat matScene = new Mat (imgTexture.height, imgTexture.width, CvType.CV_8UC3);
Utils.texture2DToMat (imgTexture, matScene);
Debug.Log ("img2Mat dst ToString " + matScene.ToString ());
FeatureDetector detector = FeatureDetector.create (FeatureDetector.ORB);
DescriptorExtractor extractor = DescriptorExtractor.create (DescriptorExtractor.ORB);
MatOfKeyPoint keypointsSrc = new MatOfKeyPoint ();
Mat descriptorsSrc = new Mat ();
detector.detect (matSrc, keypointsSrc);
extractor.compute (matSrc, keypointsSrc, descriptorsSrc);
MatOfKeyPoint keypointsScene = new MatOfKeyPoint ();
Mat descriptorsScene = new Mat ();
detector.detect (matScene, keypointsScene);
extractor.compute (matScene, keypointsScene, descriptorsScene);
DescriptorMatcher matcher = DescriptorMatcher.create (DescriptorMatcher.BRUTEFORCE_HAMMINGLUT);
MatOfDMatch matches = new MatOfDMatch ();
matcher.match (descriptorsSrc, descriptorsScene, matches);
//NEW CODE
List<DMatch> matchesList = matches.toList ();
//– Quick calculation of max and min distances between keypoints
double max_dist = 0;
double min_dist = 100;
for (int i = 0; i < descriptorsSrc.rows (); i++) {
double dist = (double)matchesList [i].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
List<DMatch> good_matches = new List<DMatch> ();
for (int i = 0; i < descriptorsSrc.rows (); i++) {
if (matchesList [i].distance < 3 * min_dist) {
good_matches.Add (matchesList [i]);
}
}
MatOfDMatch gm = new MatOfDMatch ();
gm.fromList (good_matches);
List<Point> objList = new List<Point> ();
List<Point> sceneList = new List<Point> ();
List<KeyPoint> keypoints_objectList = keypointsSrc.toList ();
List<KeyPoint> keypoints_sceneList = keypointsScene.toList ();
for (int i = 0; i < good_matches.Count; i++) {
objList.Add (keypoints_objectList [good_matches [i].queryIdx].pt);
sceneList.Add (keypoints_sceneList [good_matches [i].trainIdx].pt);
}
MatOfPoint2f obj = new MatOfPoint2f ();
MatOfPoint2f scene = new MatOfPoint2f ();
obj.fromList (objList);
scene.fromList (sceneList);
Mat H = Calib3d.findHomography (obj, scene);
Mat warpimg = matSrc.clone ();
Mat srcRectMat = new Mat (4, 1, CvType.CV_32FC2);
Mat dstRectMat = new Mat (4, 1, CvType.CV_32FC2);
Point[] obj_corners = new Point[4];
obj_corners [0] = new Point (0, 0);
obj_corners [1] = new Point ((double)matSrc.cols (), 0);
obj_corners [2] = new Point (matSrc.cols (), matSrc.rows ());
obj_corners [3] = new Point (0, matSrc.rows ());
Point[] scene_corners = new Point [4];
MatOfPoint2f srcPointMat = new MatOfPoint2f (obj_corners);
MatOfPoint2f dstPointMat = new MatOfPoint2f ();
Core.perspectiveTransform (srcPointMat, dstPointMat, H);
scene_corners = dstPointMat.toArray ();
Mat resultImg = new Mat ();
Features2d.drawMatches (matSrc, keypointsSrc, matScene, keypointsScene, matches, resultImg);
Core.line (resultImg, AddPoints (scene_corners [0], new Point ((double)matSrc.cols (), 0)), AddPoints (scene_corners [1], new Point ((double)matSrc.cols (), 0)), new Scalar (0, 255, 0), 4);
Core.line (resultImg, AddPoints (scene_corners [1], new Point ((double)matSrc.cols (), 0)), AddPoints (scene_corners [2], new Point ((double)matSrc.cols (), 0)), new Scalar (0, 255, 0), 4);
Core.line (resultImg, AddPoints (scene_corners [2], new Point ((double)matSrc.cols (), 0)), AddPoints (scene_corners [3], new Point ((double)matSrc.cols (), 0)), new Scalar (0, 255, 0), 4);
Core.line (resultImg, AddPoints (scene_corners [3], new Point ((double)matSrc.cols (), 0)), AddPoints (scene_corners [0], new Point ((double)matSrc.cols (), 0)), new Scalar (0, 255, 0), 4);
Texture2D texture = new Texture2D (resultImg.cols (), resultImg.rows (), TextureFormat.RGBA32, false);
Utils.matToTexture2D (resultImg, texture);
gameObject.GetComponent<Renderer> ().material.mainTexture = texture;
}
Point AddPoints (Point p1, Point p2)
{
return new Point (p1.x + p2.x, p1.y + p2.y);
}
}
}

Resources