Corner Detection - opencv

I am new in Open-CV.I am trying to detect 90 degree corner in fairly simple image.I need to detect corners of that rectangle which surround the object. I am using shi-Thomasi feature. following is my code :
for x in range(0, 50):
ret, frame = cap.read()
# make image gray scale
im = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#finding corners
corners = cv2.goodFeaturesToTrack(im, 1, 0.01, 10)
corners = np.int0(corners)
for i in corners:
x, y = i.ravel()
cv2.circle(frame, (x, y), 3, 255,-1)
cv2.imwrite("DetectedCorners.png", frame)
Problem: Always some corners in that object is detected. I need a method, to totally remove that object, and then detects the corners.
I don't know how to remove that object.
Any suggestions ? Photo shows my result.some times corners of surrounding rectangle are detected, some times some random points in that complex object.
I also used Canny before detecting corners, but result was 10 times worse.

Well, quick and dirty C++ solution, just for a proof of concept regarding using Hough transform to detect lines, and then compute their intersection.
You can eventually port the code to Python if needed.
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat3b img = imread("path_to_image");
// Convert to grayscale
Mat1b gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
// Compute edges
Mat1b edges;
Canny(gray, edges, 400, 100);
// Create the output result image
Mat3b res;
cvtColor(edges, res, COLOR_GRAY2BGR);
// Call hough
vector<Vec2f> lines;
HoughLines(edges, lines, 1, CV_PI / 180, 200, 0, 0);
vector<pair<Point,Point>> pts;
vector<Point> intersections;
for (size_t i = 0; i < lines.size(); i++)
{
float rho = lines[i][0], theta = lines[i][1];
// Get 2 points on each line
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
// Save the pair of points
pts.push_back(make_pair(pt1, pt2));
// Draw lines
line(res, pt1, pt2, Scalar(0, 0, 255), 3, CV_AA);
}
// for each couple of lines
for (int i = 0; i < pts.size() - 1; ++i)
{
// get the two points of the first line
const Point& p1 = pts[i].first;
const Point& p2 = pts[i].second;
for (int j = i + 1; j < pts.size(); ++j)
{
// Get the two points of the second line
const Point& p3 = pts[j].first;
const Point& p4 = pts[j].second;
// Compute intersection
Point p;
float den = (p1.x - p2.x) * (p3.y - p4.y) - (p1.y - p2.y) * (p3.x - p4.x);
if (den != 0) // if not parallel lines
{
p.x = ((p1.x*p2.y - p1.y*p2.x)*(p3.x - p4.x) - (p1.x - p2.x)*(p3.x*p4.y - p3.y*p4.x)) / den;
p.y = ((p1.x*p2.y - p1.y*p2.x)*(p3.y - p4.y) - (p1.y - p2.y)*(p3.x*p4.y - p3.y*p4.x)) / den;
// Draw intersection
circle(res, p, 7, Scalar(0, 255, 0), 2);
}
// Save intersections
intersections.push_back(p);
}
}
return 0;
}
Result:

Related

Applying perspective transform correct the degree of sheet of paper in a image

I am working on a project which will help us to correct the degree of orientation of image.
Here in this code i am detecting a sheet of paper.
Steps that i used
1.Apply houghLine transform
2.Detect corner.
3.Applied perspective transform.
And with all this I am able to detect sheet of paper but it only works for only one or two images it does not work on all the images and I am not understanding why,
The problem that I think in this code is that it is not able to detect the corners correctly , because of which I am not able to correct the perspective of a image .
it works on this image
but when i used some other image instead of this then i am not able to do so
#include <cv.h>
#include <highgui.h>
using namespace std;
using namespace cv;
Point2f center(0,0);
Point2f computeIntersect(Vec4i a, Vec4i b)
{
int x1 = a[0], y1 = a[1], x2 = a[2], y2 = a[3], x3 = b[0], y3 = b[1], x4 = b[2], y4 = b[3];
float denom;
if (float d = ((float)(x1 - x2) * (y3 - y4)) - ((y1 - y2) * (x3 - x4)))
{
Point2f pt;
pt.x = ((x1 * y2 - y1 * x2) * (x3 - x4) - (x1 - x2) * (x3 * y4 - y3 * x4)) / d;
pt.y = ((x1 * y2 - y1 * x2) * (y3 - y4) - (y1 - y2) * (x3 * y4 - y3 * x4)) / d;
return pt;
}
else
return Point2f(-1, -1);
}
void sortCorners(vector<Point2f>& corners, Point2f center)
{
vector<Point2f> top, bot;
for (int i = 0; i < corners.size(); i++)
{
if (corners[i].y < center.y)
top.push_back(corners[i]);
else
bot.push_back(corners[i]);
}
corners.clear();
if (top.size() == 2 && bot.size() == 2){
Point2f tl = top[0].x > top[1].x ? top[1] : top[0];
Point2f tr = top[0].x > top[1].x ? top[0] : top[1];
Point2f bl = bot[0].x > bot[1].x ? bot[1] : bot[0];
Point2f br = bot[0].x > bot[1].x ? bot[0] : bot[1];
corners.push_back(tl);
corners.push_back(tr);
corners.push_back(br);
corners.push_back(bl);
}
}
int main()
{
Mat src,cann,hsv;
src = imread("C:\\im.jpg",WINDOW_AUTOSIZE);
if (src.empty())
return -1;
imshow("original",src);
blur(src, src, Size(3, 3));
Canny(src, cann, 50, 200, 3);
cvtColor(cann, hsv, CV_GRAY2BGR);
vector<Vec4i> lines;
HoughLinesP(cann, lines, 1, CV_PI/180, 70, 30, 10);
for( size_t i = 0; i < lines.size(); i++ )
{
Vec4i l = lines[i];
line( hsv, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 2, CV_AA);
}
// Expand the lines
for (int i = 0; i < lines.size(); i++)
{
Vec4i v = lines[i];
lines[i][0] = 0;
lines[i][1] = ((float)v[1] - v[3]) / (v[0] - v[2]) * -v[0] + v[1];
lines[i][2] = src.cols;
lines[i][3] = ((float)v[1] - v[3]) / (v[0] - v[2]) * (src.cols - v[2]) + v[3];
}
vector<Point2f> corners;
for (int i = 0; i < lines.size(); i++)
{
for (int j = i+1; j < lines.size(); j++)
{
Point2f pt = computeIntersect(lines[i], lines[j]);
if (pt.x >= 0 && pt.y >= 0)
corners.push_back(pt);
}
}
vector<Point2f> approx;
approxPolyDP(Mat(corners), approx, arcLength(Mat(corners), true) * 0.02, true);
//if (approx.size() != 4)
// {
// cout << "The object is not quadrilateral!" << endl;
//return -1;
//}
// Get mass center
for (int i = 0; i < corners.size(); i++)
center += corners[i];
center *= (1. / corners.size());
sortCorners(corners, center);
if (corners.size() == 0)
{
cout << "The corners were not sorted correctly!" << endl;
return -1;
}
Mat dst = src.clone();
// Draw lines
for (int i = 0; i < lines.size(); i++)
{
Vec4i v = lines[i];
line(dst, Point(v[0], v[1]), Point(v[2], v[3]), CV_RGB(0,255,0));
}
// Draw corner points
circle(dst, corners[0], 3, CV_RGB(255,0,0), 2);
circle(dst, corners[1], 3, CV_RGB(0,255,0), 2);
circle(dst, corners[2], 3, CV_RGB(0,0,255), 2);
circle(dst, corners[3], 3, CV_RGB(255,255,255), 2);
// Draw mass center
circle(dst, center, 3, CV_RGB(255,255,0), 2);
Mat quad = Mat::zeros(300, 220, CV_8UC3);
vector<Point2f> quad_pts;
quad_pts.push_back(Point2f(0, 0));
quad_pts.push_back(Point2f(quad.cols, 0));
quad_pts.push_back(Point2f(quad.cols, quad.rows));
quad_pts.push_back(Point2f(0, quad.rows));
Mat transmtx = getPerspectiveTransform(corners, quad_pts);
warpPerspective(src, quad, transmtx, quad.size());
imshow("blurr",src);
imshow("canney",cann);
imshow("hough",hsv);
imshow("image", dst);
imshow("quadrilateral", quad);
waitKey(0);
return 0;
}
please please help me this i am really get stuck with this .
Your algorithm assumes that HoughLinesP function will always detect only 4 lines and that each one will lie on a different edge of the paper. However, this assumption is wrong. In your particular case, when you work with the second image, it returns 5 lines when you work on the second image. Click to see the detected lines (marked by non-gray colours).
Quick fix
I changed the value of 6th HoughLinesP argument (minLineThreshold parameter) to 70. After that, only four lines were detected in the image, but another bug surfaced; 5 corners were detected instead of 4. The reason? Two of the opposite edges were not parallel and they intersected far outside the image area. This condition was causing the problem:
if (pt.x >= 0 && pt.y >= 0)
corners.push_back(pt);
It is not enough to check whether corners coordinates are non-negative. Instead, you have to make sure that the corners are within certain boundaries that make sense; in your case these could be boundaries of the image.
if (pt.x >= 0 && pt.y >= 0 && pt.x <src.cols && pt.y < src.rows)
corners.push_back(pt);
After changing threshold and fixing the condition, I obtained this result: (Click to see an image)
Warning
As you can see, yet another problem surfaced - the corners are not detected as accurately as they could be. You can use information provided by canny edges to your advantage here. But I do not want to venture out of the scope of your question here, so I'll stop.
I named my solution as a "quick fix" because it only solves one particular case. If you want more general solution and if you want to keep using your algorithm, you will have to compute a reasonable threshold estimate every time before you use HoughLineP.

How to find a more accurate ellipse based on the current detected ellipse

I fitted an ellipse based on edges of extracted red ball. But it's not accurate.
I extracted this red ball based on HSV Color Space, but it always ignores the contour of this ball. (Perhaps because color of contour is much darker).
Any good ideas to let me fit a better ellipse for this ball? I want to find an ellipse which can embrace the red ball as accurate as possible.
It will be better if I can use existing functions of OpenCV.
I have fixed this problem. It is still unstable, but at most of time it works.
source image. All of those images can be detected: https://www.dropbox.com/sh/daerty94kv5k2n7/AABu9Axewe6mL0NdEX2nG5MIa?dl=0
Fit ellipse based on color
Re-fit ellipse based on color and edges
The Video link: https://www.youtube.com/watch?v=q0TQYREm9uA
Here is source code:
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
cv::Mat capturedImage = imread(argv[1]);
if( capturedImage.empty() )
{
cout << "Couldn't open image " << argv[1] << "\nUsage: fitellipse <image_name>\n";
return 0;
}
/*============================= Phase 1: Translate Color Space from RGB to HSV =====================================================*/
cv::Mat imgHSV;
cv::cvtColor(capturedImage, imgHSV, cv::COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
cv::Mat imgGray;
cv::cvtColor(capturedImage, imgGray, CV_RGB2GRAY);
cv::Mat imgThresholded;
cv::inRange(imgHSV, cv::Scalar(160, 80, 70), cv::Scalar(179, 255, 255), imgThresholded); //Threshold the image
//morphological opening
cv::erode(imgThresholded, imgThresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7)) );
cv::dilate( imgThresholded, imgThresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7)) );
//morphological closing (removes small holes from the foreground)
cv::dilate( imgThresholded, imgThresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7)) );
cv::erode(imgThresholded, imgThresholded, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(7, 7)) );
namedWindow("imgThresholded", WINDOW_NORMAL);
imshow("imgThresholded", imgThresholded);
/*============================= Phase 2: Fit a coarse ellipse based on red color ======================================================*/
vector<vector<cv::Point> > contours;
cv::findContours(imgThresholded, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cv::Point(0,0));
size_t index = 0;
size_t largestSize = 0;
for(size_t i = 0; i < contours.size(); i++)
{
if (contours[i].size() > largestSize)
{
largestSize = contours[i].size();
index = i;
}
}
if (contours[index].size() < 6)
{
cout << "Do not have enough points" << endl;
return -1;
}
cv::Mat imgContour;
cv::Mat(contours[index]).convertTo(imgContour, CV_32F);
cv::RotatedRect coarseEllipse = cv::fitEllipse(imgContour);
cv::Mat capturedImageClone = capturedImage.clone();
ellipse(capturedImageClone, coarseEllipse.center, coarseEllipse.size*0.5f, coarseEllipse.angle, 0.0, 360.0, cv::Scalar(0,255,255), 3, CV_AA);
namedWindow("capturedImageClone", CV_WINDOW_NORMAL);
imshow("capturedImageClone", capturedImageClone);
/*============================= Phase 3: Re-fit a final ellipse based on combination of color and edge ===============================*/
double cxc = coarseEllipse.center.x;
double cyc = coarseEllipse.center.y;
double ca = coarseEllipse.size.height/2;
double cb = coarseEllipse.size.width/2;
cv::Mat mask(capturedImage.rows, capturedImage.cols, CV_8UC3, cv::Scalar(0,0,0));
cv::circle(mask, cv::Point(coarseEllipse.center.x, coarseEllipse.center.y), coarseEllipse.size.height/2 + 100, cv::Scalar(255,255,255), -1);
cv::Mat imgMask;
cv::Mat edges;
cv::bitwise_and(capturedImage, mask, imgMask);
namedWindow("imgMask", CV_WINDOW_NORMAL);
imshow("imgMask", imgMask);
cv::GaussianBlur(imgMask, edges, cv::Size(5,5), 0);
cv::Canny(edges, edges, 50, 100);
namedWindow("edges", CV_WINDOW_NORMAL);
imshow("edges", edges);
cv::findContours(edges, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cv::Point(0,0));
index = -1;
double centerDistance = (numeric_limits<double>::max)();
double abRatio = (numeric_limits<double>::max)();
cv::RotatedRect finalEllipse;
for (size_t i = 0; i < contours.size(); i++)
{
if (contours[i].size() < 500 || i == contours.size() - 1 || i == contours.size() - 2)
continue;
cv::Mat(contours[i]).convertTo(imgContour, CV_32F);
cv::RotatedRect tmpEllipse = cv::fitEllipse(imgContour);
double txc = tmpEllipse.center.x;
double tyc = tmpEllipse.center.y;
double ta = tmpEllipse.size.height/2;
double tb = tmpEllipse.size.width/2;
double tmpDis = (cxc - txc) * (cxc - txc) + (cyc - tyc) * (cyc - tyc);
if (tmpDis < centerDistance && fabs(tb/ta - 1) < abRatio && ta > ca && tb > cb)
{
centerDistance = tmpDis;
abRatio = fabs(tb/ta - 1);
index = i;
finalEllipse = tmpEllipse;
}
}
if (index == -1)
finalEllipse = coarseEllipse;
ellipse(capturedImage, finalEllipse.center, finalEllipse.size*0.5f, finalEllipse.angle, 0.0, 360.0, cv::Scalar(0,255,255), 3, CV_AA);
double xc = finalEllipse.center.x; // center x
double yc = finalEllipse.center.y; // center y
double theta = finalEllipse.angle; // rotation angle theta
double a = finalEllipse.size.height / 2; // semi-major axis: a
double b = finalEllipse.size.width / 2; // semi-minor axis: b
double A = a * a * sin(theta) * sin(theta) + b * b * cos(theta) * cos(theta);
double B = 2 * (b * b - a * a) * sin(theta) * cos(theta);
double C = a * a * cos(theta) * cos(theta) + b * b * sin(theta) * sin(theta);
double D = -2 * A * xc - B * yc;
double E = -B * xc - 2 * C * yc;
double F = A * xc * xc + B * xc * yc + C * yc * yc - a * a * b * b;
A = A/F;
B = B/F;
C = C/F;
D = D/F;
E = E/F;
F = F/F;
double ellipseArray[3][3] = {{A, B/2, D/2},
{B/2, C, E/2},
{D/2, E/2, F}};
cv::Mat ellipseMatrix(3,3,CV_64FC1, ellipseArray);
cout << ellipseMatrix << endl;
namedWindow("capturedImage", CV_WINDOW_NORMAL);
imshow("capturedImage", capturedImage);
imwrite(argv[2],capturedImage);
imwrite(argv[3],edges);
imwrite(argv[4],capturedImageClone);
imwrite(argv[5],imgMask);
waitKey(0);
return 0;
}

openCV triangulatePoints

first at all thanks for reading.
I have an issue generating the point cloud with PCL given the info provided by openCV functions.
I'm using two images that the function recognized several keypoints.
Then i make the matches and calculate the fundemental function with RANSAC algorithm.
Then i printed the points in each image to see the related points and i have several points that good matched.
Now i'm trying to generate the point cloud to reproject those points cause the next step is making a bigger point cloud with more than two images.. to make a 3d reconstruction by 2d information.
My problem is that i cant fill propertly the cloud cause the points are in weird positions and all of the points seems very closer... There is something wrong with the code that i'm using?
Below functions and the matrixes that i'm using:
Calling triangulate function:
TriangulatePoints(keypoints1, keypoints2, K.t(), P, P1, pointCloud)
PopulateTheCloud
PopulatePCLPointCloud(pointCloud);
Populate Function:
void PopulatePCLPointCloud(const vector<Point3d>& pointcloud) //Populate point cloud
{
cout << "Creating point cloud...";
cloud.reset(new pcl::PointCloud<pcl::PointXYZRGB>);
for (unsigned int i = 0; i<pointcloud.size(); i++)
{
// get the RGB color value for the point
Vec3b rgbv(255,255, 0);
// check for erroneous coordinates (NaN, Inf, etc.)
if (pointcloud[i].x != pointcloud[i].x || _isnan(pointcloud[i].x) || pointcloud[i].y != pointcloud[i].y || _isnan(pointcloud[i].y) || pointcloud[i].z != pointcloud[i].z || _isnan(pointcloud[i].z) || fabsf(pointcloud[i].x) > 10.0 || fabsf(pointcloud[i].y) > 10.0 || fabsf(pointcloud[i].z) > 10.0)
{
continue;
}
pcl::PointXYZRGB pclp;
// 3D coordinates
pclp.x = pointcloud[i].x;
pclp.y = pointcloud[i].y;
pclp.z = pointcloud[i].z;
// RGB color, needs to be represented as an integer uint32_t
float rgb = ((uint32_t)rgbv[2] << 16 | (uint32_t)rgbv[1] << 8 | (uint32_t)rgbv[0]);
pclp.rgb = *reinterpret_cast<float*>(&rgb);
cloud->push_back(pclp);
}
cloud->width = (uint32_t)cloud->points.size();
// number of points
cloud->height = 1;
// a list of points, one row of data
}
The function that fill the cloud with the 3d points (i commented the reproj_error cause copied this code from masterinOpenCV but did not work.
double TriangulatePoints(const vector<KeyPoint>& pt_set1, const vector<KeyPoint>& pt_set2, const Mat&Kinv, const Matx34d& P, const Matx34d& P1, vector<Point3d>& pointcloud) {
vector<double> reproj_error;
for (unsigned int i = 0; i<min(pt_set1.size(), pt_set2.size()); i++) { //convert to normalized homogeneous coordinates
Point2f kp = pt_set1[i].pt;
Point3d u(kp.x, kp.y, 1.0);
Mat_<double> um = Kinv * Mat_<double>(u);
u = (Point3d)um(0, 0);
Point2f kp1 = pt_set2[i].pt;
Point3d u1(kp1.x, kp1.y, 1.0);
Mat_<double> um1 = Kinv * Mat_<double>(u1);
u1 = (Point3d)um1(0, 0);
//triangulate
Mat_<double> X = LinearLSTriangulation(u, P, u1, P1);
/*Mat_<double> xPt_img = Kinv.t() * Mat(P1) * X;
Point2f xPt_img_(xPt_img(0)/xPt_img(2),xPt_img(1)/xPt_img(2));
//calculate reprojection error
reproj_error.push_back(norm(xPt_img_-kp1)); //store 3D point */
//carga la nube de puntos
pointcloud.push_back(Point3d(X(0), X(1), X(2)));
} //return mean reprojection error
/*Scalar me = mean(reproj_error);
return me[0]; */
return 0;
}
Linear Triangulation:
Mat_<double> LinearLSTriangulation(Point3d u,//homogenous image point (u,v,1)
Matx34d P,//camera 1 matrix
Point3d u1,//homogenous image point in 2nd camera
Matx34d P1//camera 2 matrix
) {
//build A matrix
Matx43d A(u.x*P(2, 0) - P(0, 0), u.x*P(2, 1) - P(0, 1), u.x*P(2, 2) - P(0, 2), u.y*P(2, 0) - P(1, 0), u.y*P(2, 1) - P(1, 1), u.y*P(2, 2) - P(1, 2), u1.x*P1(2, 0) - P1(0, 0), u1.x*P1(2, 1) - P1(0, 1), u1.x*P1(2, 2) - P1(0, 2), u1.y*P1(2, 0) - P1(1, 0), u1.y*P1(2, 1) - P1(1, 1), u1.y*P1(2, 2) - P1(1, 2));
//build B vector
Matx41d B(-(u.x*P(2, 3) - P(0, 3)), -(u.y*P(2, 3) - P(1, 3)), -(u1.x*P1(2, 3) - P1(0, 3)), -(u1.y*P1(2, 3) - P1(1, 3))); //solve for X
Mat_<double> X;
solve(A, B, X, DECOMP_SVD);
return X;
}
Matrix:
Fundamental =
[-5.365548729323536e-007, 0.0003108718787914248, -0.0457266834161677;
-0.0003258809500026533, 4.695400741230473e-006, 1.295466303565132;
0.05008017646011816, -1.300323239531621, 1]
Calibration Matrix =
[744.2366711500123, 0, 304.166818982576;
0, 751.1308610972965, 225.3750058508892;
0, 0, 1]
Essential =
[-0.2971914249411831, 173.7833277398352, 17.99033324690517;
-182.1736856953757, 2.649133690692166, 899.405863948026;
-17.51073288084396, -904.8934348365967, 0.3895173270497594]
Rotation matrix =
[-0.9243506387712034, 0.03758098759490174, -0.3796887751496749;
0.03815782996164848, 0.9992536546828119, 0.006009460513344713;
-0.379631237671357, 0.008933251056327281, 0.9250947629349537]
Traslation matrix =
[-0.9818733349058273;
0.01972152607878091;
-0.1885094576142884]
P0 matrix =
[1, 0, 0, 0;
0, 1, 0, 0;
0, 0, 1, 0]
P1 matrix =
[-0.9243506387712034, 0.03758098759490174, -0.3796887751496749, -0.9818733349058273;
0.03815782996164848, 0.9992536546828119, 0.006009460513344713, 0.01972152607878091;
-0.379631237671357, 0.008933251056327281, 0.9250947629349537, -0.1885094576142884]
I solved the problem, i have two big problems..
First at all i was passing the non filtered keypoints to the triangulate function, so i saw the matches points and the non useful points. And probably we will have more unuseful than useful points...
So as you will see in the triangulate function i'm giving the matches points that i obtained with ransacTest and SymTest filtered. And then just using of the keypoints the index of the matches. SO everything is good =) just showing the good matchesl.
Second the triangulateFunctions was wrong.
Here its corrected:
double TriangulatePoints(const vector<KeyPoint>& pt_set1, const vector<KeyPoint>& pt_set2, const Mat&Kinv, const Matx34d& P, const Matx34d& P1, vector<Point3d>& pointcloud, vector<DMatch>& matches)
{
//Mat_<double> KP1 = Kinv.inv() *Mat(P1);
vector<double> reproj_error;
for (unsigned int i = 0; i < matches.size(); i++)
{ //convert to normalized homogeneous coordinates
Point2f kp = pt_set1[matches[i].queryIdx].pt;
Point3d u(kp.x, kp.y, 1.0);
Mat_<double> um = Kinv * Mat_<double>(u);
u.x = um(0);
u.y = um(1);
u.z = um(2);
Point2f kp1 = pt_set2[matches[i].trainIdx].pt;
Point3d u1(kp1.x, kp1.y, 1.0);
Mat_<double> um1 = Kinv * Mat_<double>(u1);
u1.x = um1(0);
u1.y = um1(1);
u1.z = um1(2);
//triangulate
Mat_<double> X = LinearLSTriangulation(u, P, u1, P1);
pointcloud.push_back(Point3d(X(0), X(1), X(2)));
}
cout << "cantidad Puntos" << pointcloud.size() << endl;
return 1;
}

Approximate photo of a simple drawing using lines

As an input I have a photo of a simple symbol, e.g.: https://www.dropbox.com/s/nrmsvfd0le0bkke/symbol.jpg
I would like to detect the straight lines in it, like points of start and ends of the lines. In this case, assuming the top left of the symbol is (0,0), the lines would be defined like this:
start end (coordinates of beginning and end of a line)
1. (0,0); (0,10) (vertical line)
2. (0,10); (15, 15)
3. (15,15); (0, 20)
4. (0,20); (0,30)
How can I do it (pereferably using OpenCV)? I though about Hough lines, but they seem to be good for perfect thin straight lines, which is not the case in a drawing. I'll probably work on binarized image, too.
Give a try on this,
Apply thinning algorithm on threshold image.
Find contours.
approxPolyDP for the found contour.
See some reference:
approxpolydp-for-edge-maps
Creating Bounding boxes and circles for contours
maybe you can work on this one.
assume a perfect binarization:
run HoughLinesP
(not implemented) try to group those detected lines
I used this code:
int main()
{
cv::Mat image = cv::imread("HoughLinesP_perfect.png");
cv::Mat gray;
cv::cvtColor(image,gray,CV_BGR2GRAY);
cv::Mat output; image.copyTo(output);
cv::Mat g_thres = gray == 0;
std::vector<cv::Vec4i> lines;
//cv::HoughLinesP( binary, lines, 1, 2*CV_PI/180, 100, 100, 50 );
// cv::HoughLinesP( h_thres, lines, 1, CV_PI/180, 100, image.cols/2, 10 );
cv::HoughLinesP( g_thres, lines, 1, CV_PI/(4*180.0), 50, image.cols/20, 10 );
for( size_t i = 0; i < lines.size(); i++ )
{
cv::line( output, cv::Point(lines[i][0], lines[i][3]),
cv::Point(lines[i][4], lines[i][3]), cv::Scalar(155,255,155), 1, 8 );
}
cv::imshow("g thres", g_thres);
cv::imwrite("HoughLinesP_out.png", output);
cv::resize(output, output, cv::Size(), 0.5,0.5);
cv::namedWindow("output"); cv::imshow("output", output);
cv::waitKey(-1);
std::cout << "finished" << std::endl;
return 0;
}
EDIT:
updated code with simple line clustering (`minimum_distance function taken from SO):
giving this result:
float minimum_distance(cv::Point2f v, cv::Point2f w, cv::Point2f p) {
// Return minimum distance between line segment vw and point p
const float l2 = cv::norm(w-v) * cv::norm(w-v); // i.e. |w-v|^2 - avoid a sqrt
if (l2 == 0.0) return cv::norm(p-v); // v == w case
// Consider the line extending the segment, parameterized as v + t (w - v).
// We find projection of point p onto the line.
// It falls where t = [(p-v) . (w-v)] / |w-v|^2
//const float t = dot(p - v, w - v) / l2;
float t = ((p-v).x * (w-v).x + (p-v).y * (w-v).y)/l2;
if (t < 0.0) return cv::norm(p-v); // Beyond the 'v' end of the segment
else if (t > 1.0) return cv::norm(p-w); // Beyond the 'w' end of the segment
const cv::Point2f projection = v + t * (w - v); // Projection falls on the segment
return cv::norm(p - projection);
}
int main()
{
cv::Mat image = cv::imread("HoughLinesP_perfect.png");
cv::Mat gray;
cv::cvtColor(image,gray,CV_BGR2GRAY);
cv::Mat output; image.copyTo(output);
cv::Mat g_thres = gray == 0;
std::vector<cv::Vec4i> lines;
cv::HoughLinesP( g_thres, lines, 1, CV_PI/(4*180.0), 50, image.cols/20, 10 );
float minDist = 100;
std::vector<cv::Vec4i> lines_filtered;
for( size_t i = 0; i < lines.size(); i++ )
{
bool keep = true;
int overwrite = -1;
cv::Point2f a(lines[i][0], lines[i][6]);
cv::Point2f b(lines[i][7], lines[i][3]);
float lengthAB = cv::norm(a-b);
for( size_t j = 0; j < lines_filtered.size(); j++ )
{
cv::Point2f c(lines_filtered[j][0], lines_filtered[j][8]);
cv::Point2f d(lines_filtered[j][9], lines_filtered[j][3]);
float distCDA = minimum_distance(c,d,a);
float distCDB = minimum_distance(c,d,b);
float lengthCD = cv::norm(c-d);
if((distCDA < minDist) && (distCDB < minDist))
{
if(lengthCD >= lengthAB)
{
keep = false;
}
else
{
overwrite = j;
}
}
}
if(keep)
{
if(overwrite >= 0)
{
lines_filtered[overwrite] = lines[i];
}
else
{
lines_filtered.push_back(lines[i]);
}
}
}
for( size_t i = 0; i < lines_filtered.size(); i++ )
{
cv::line( output, cv::Point(lines_filtered[i][0], lines_filtered[i][10]),
cv::Point(lines_filtered[i][11], lines_filtered[i][3]), cv::Scalar(155,255,155), 2, 8 );
}
cv::imshow("g thres", g_thres);
cv::imwrite("HoughLinesP_out.png", output);
cv::resize(output, output, cv::Size(), 0.5,0.5);
cv::namedWindow("output"); cv::imshow("output", output);
cv::waitKey(-1);
std::cout << "finished" << std::endl;
return 0;
}
You should try the Hough Line Transform. And here is an example from this website
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat src = imread("building.jpg", 0);
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
vector<Vec2f> lines;
// detect lines
HoughLines(dst, lines, 1, CV_PI/180, 150, 0, 0 );
// draw lines
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
imshow("source", src);
imshow("detected lines", cdst);
waitKey();
return 0;
}
With this you should be able to tweak and get the proprieties you are looking for (vertices).

How to capture hair wisp structure from an image?

I want to draw a cue from a specified point along its gradient direction to capture structure of hair wisp. Like Figure2. and Figure3. from an ACM paper, I linked here: Single-View Hair Modeling for Portrait Manipulation. Now I draw an orientation map by gradients, but the results look very chaotic.
This is my code:
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argv, char* argc[])
{
Mat image = imread("wavy.jpg", 0);
if(!image.data)
return -1;
Mat sobelX1;
Sobel(image, sobelX1, CV_8U, 1, 0, 3);
//imshow("X direction", sobelX);
Mat sobelY1;
Sobel(image, sobelY1, CV_8U, 1, 0, 3);
//imshow("Y direction", sobelY);
Mat sobelX, sobelY;
sobelX1.convertTo(sobelX, CV_32F, 1./255);
sobelY1.convertTo(sobelY, CV_32F, 1./255);
double l_max = -10;
for (int y = 0; y < image.rows; y+=3) // First iteration, to compute the maximum l (longest flow)
{
for (int x = 0; x < image.cols; x+=3)
{
double dx = sobelX.at<float>(y, x); // Gets X component of the flow
double dy = sobelY.at<float>(y, x); // Gets Y component of the flow
CvPoint p = cvPoint(y, x);
double l = sqrt(dx*dx + dy*dy); // This function sets a basic threshold for drawing on the image
if(l>l_max) l_max = l;
}
}
for (int y = 0; y < image.rows; y+=3)
{
for (int x = 0; x < image.cols; x+=3)
{
double dx = sobelX.at<float>(y, x); // Gets X component of the flow
double dy = sobelY.at<float>(y, x); // Gets Y component of the flow
CvPoint p = cvPoint(x, y);
double l = sqrt(dx*dx + dy*dy); // This function sets a basic threshold for drawing on the image
if (l > 0)
{
double spinSize = 5.0 * l/l_max; // Factor to normalise the size of the spin depending on the length of the arrow
CvPoint p2 = cvPoint(p.x + (int)(dx), p.y + (int)(dy));
line(image, p, p2, CV_RGB(0,255,0), 1, CV_AA);
double angle; // Draws the spin of the arrow
angle = atan2( (double) p.y - p2.y, (double) p.x - p2.x);
p.x = (int) (p2.x + spinSize * cos(angle + 3.1416 / 4));
p.y = (int) (p2.y + spinSize * sin(angle + 3.1416 / 4));
line(image, p, p2, CV_RGB(0,255,0), 1, CV_AA, 0 );
}
}
}
imshow("Orientation Map", image);
waitKey(0);
return 0;
}
Can any one give me some hints?
Your Sobels are the same while they supposed to have different code for x and y. 0, 1 and 1, 0.on top of that you loose resolution and sign by specifying cv8U as depth inSobel and only then converting to float. Also please provide input resolution and your outcome image.

Resources