openCV triangulatePoints - opencv

first at all thanks for reading.
I have an issue generating the point cloud with PCL given the info provided by openCV functions.
I'm using two images that the function recognized several keypoints.
Then i make the matches and calculate the fundemental function with RANSAC algorithm.
Then i printed the points in each image to see the related points and i have several points that good matched.
Now i'm trying to generate the point cloud to reproject those points cause the next step is making a bigger point cloud with more than two images.. to make a 3d reconstruction by 2d information.
My problem is that i cant fill propertly the cloud cause the points are in weird positions and all of the points seems very closer... There is something wrong with the code that i'm using?
Below functions and the matrixes that i'm using:
Calling triangulate function:
TriangulatePoints(keypoints1, keypoints2, K.t(), P, P1, pointCloud)
PopulateTheCloud
PopulatePCLPointCloud(pointCloud);
Populate Function:
void PopulatePCLPointCloud(const vector<Point3d>& pointcloud) //Populate point cloud
{
cout << "Creating point cloud...";
cloud.reset(new pcl::PointCloud<pcl::PointXYZRGB>);
for (unsigned int i = 0; i<pointcloud.size(); i++)
{
// get the RGB color value for the point
Vec3b rgbv(255,255, 0);
// check for erroneous coordinates (NaN, Inf, etc.)
if (pointcloud[i].x != pointcloud[i].x || _isnan(pointcloud[i].x) || pointcloud[i].y != pointcloud[i].y || _isnan(pointcloud[i].y) || pointcloud[i].z != pointcloud[i].z || _isnan(pointcloud[i].z) || fabsf(pointcloud[i].x) > 10.0 || fabsf(pointcloud[i].y) > 10.0 || fabsf(pointcloud[i].z) > 10.0)
{
continue;
}
pcl::PointXYZRGB pclp;
// 3D coordinates
pclp.x = pointcloud[i].x;
pclp.y = pointcloud[i].y;
pclp.z = pointcloud[i].z;
// RGB color, needs to be represented as an integer uint32_t
float rgb = ((uint32_t)rgbv[2] << 16 | (uint32_t)rgbv[1] << 8 | (uint32_t)rgbv[0]);
pclp.rgb = *reinterpret_cast<float*>(&rgb);
cloud->push_back(pclp);
}
cloud->width = (uint32_t)cloud->points.size();
// number of points
cloud->height = 1;
// a list of points, one row of data
}
The function that fill the cloud with the 3d points (i commented the reproj_error cause copied this code from masterinOpenCV but did not work.
double TriangulatePoints(const vector<KeyPoint>& pt_set1, const vector<KeyPoint>& pt_set2, const Mat&Kinv, const Matx34d& P, const Matx34d& P1, vector<Point3d>& pointcloud) {
vector<double> reproj_error;
for (unsigned int i = 0; i<min(pt_set1.size(), pt_set2.size()); i++) { //convert to normalized homogeneous coordinates
Point2f kp = pt_set1[i].pt;
Point3d u(kp.x, kp.y, 1.0);
Mat_<double> um = Kinv * Mat_<double>(u);
u = (Point3d)um(0, 0);
Point2f kp1 = pt_set2[i].pt;
Point3d u1(kp1.x, kp1.y, 1.0);
Mat_<double> um1 = Kinv * Mat_<double>(u1);
u1 = (Point3d)um1(0, 0);
//triangulate
Mat_<double> X = LinearLSTriangulation(u, P, u1, P1);
/*Mat_<double> xPt_img = Kinv.t() * Mat(P1) * X;
Point2f xPt_img_(xPt_img(0)/xPt_img(2),xPt_img(1)/xPt_img(2));
//calculate reprojection error
reproj_error.push_back(norm(xPt_img_-kp1)); //store 3D point */
//carga la nube de puntos
pointcloud.push_back(Point3d(X(0), X(1), X(2)));
} //return mean reprojection error
/*Scalar me = mean(reproj_error);
return me[0]; */
return 0;
}
Linear Triangulation:
Mat_<double> LinearLSTriangulation(Point3d u,//homogenous image point (u,v,1)
Matx34d P,//camera 1 matrix
Point3d u1,//homogenous image point in 2nd camera
Matx34d P1//camera 2 matrix
) {
//build A matrix
Matx43d A(u.x*P(2, 0) - P(0, 0), u.x*P(2, 1) - P(0, 1), u.x*P(2, 2) - P(0, 2), u.y*P(2, 0) - P(1, 0), u.y*P(2, 1) - P(1, 1), u.y*P(2, 2) - P(1, 2), u1.x*P1(2, 0) - P1(0, 0), u1.x*P1(2, 1) - P1(0, 1), u1.x*P1(2, 2) - P1(0, 2), u1.y*P1(2, 0) - P1(1, 0), u1.y*P1(2, 1) - P1(1, 1), u1.y*P1(2, 2) - P1(1, 2));
//build B vector
Matx41d B(-(u.x*P(2, 3) - P(0, 3)), -(u.y*P(2, 3) - P(1, 3)), -(u1.x*P1(2, 3) - P1(0, 3)), -(u1.y*P1(2, 3) - P1(1, 3))); //solve for X
Mat_<double> X;
solve(A, B, X, DECOMP_SVD);
return X;
}
Matrix:
Fundamental =
[-5.365548729323536e-007, 0.0003108718787914248, -0.0457266834161677;
-0.0003258809500026533, 4.695400741230473e-006, 1.295466303565132;
0.05008017646011816, -1.300323239531621, 1]
Calibration Matrix =
[744.2366711500123, 0, 304.166818982576;
0, 751.1308610972965, 225.3750058508892;
0, 0, 1]
Essential =
[-0.2971914249411831, 173.7833277398352, 17.99033324690517;
-182.1736856953757, 2.649133690692166, 899.405863948026;
-17.51073288084396, -904.8934348365967, 0.3895173270497594]
Rotation matrix =
[-0.9243506387712034, 0.03758098759490174, -0.3796887751496749;
0.03815782996164848, 0.9992536546828119, 0.006009460513344713;
-0.379631237671357, 0.008933251056327281, 0.9250947629349537]
Traslation matrix =
[-0.9818733349058273;
0.01972152607878091;
-0.1885094576142884]
P0 matrix =
[1, 0, 0, 0;
0, 1, 0, 0;
0, 0, 1, 0]
P1 matrix =
[-0.9243506387712034, 0.03758098759490174, -0.3796887751496749, -0.9818733349058273;
0.03815782996164848, 0.9992536546828119, 0.006009460513344713, 0.01972152607878091;
-0.379631237671357, 0.008933251056327281, 0.9250947629349537, -0.1885094576142884]

I solved the problem, i have two big problems..
First at all i was passing the non filtered keypoints to the triangulate function, so i saw the matches points and the non useful points. And probably we will have more unuseful than useful points...
So as you will see in the triangulate function i'm giving the matches points that i obtained with ransacTest and SymTest filtered. And then just using of the keypoints the index of the matches. SO everything is good =) just showing the good matchesl.
Second the triangulateFunctions was wrong.
Here its corrected:
double TriangulatePoints(const vector<KeyPoint>& pt_set1, const vector<KeyPoint>& pt_set2, const Mat&Kinv, const Matx34d& P, const Matx34d& P1, vector<Point3d>& pointcloud, vector<DMatch>& matches)
{
//Mat_<double> KP1 = Kinv.inv() *Mat(P1);
vector<double> reproj_error;
for (unsigned int i = 0; i < matches.size(); i++)
{ //convert to normalized homogeneous coordinates
Point2f kp = pt_set1[matches[i].queryIdx].pt;
Point3d u(kp.x, kp.y, 1.0);
Mat_<double> um = Kinv * Mat_<double>(u);
u.x = um(0);
u.y = um(1);
u.z = um(2);
Point2f kp1 = pt_set2[matches[i].trainIdx].pt;
Point3d u1(kp1.x, kp1.y, 1.0);
Mat_<double> um1 = Kinv * Mat_<double>(u1);
u1.x = um1(0);
u1.y = um1(1);
u1.z = um1(2);
//triangulate
Mat_<double> X = LinearLSTriangulation(u, P, u1, P1);
pointcloud.push_back(Point3d(X(0), X(1), X(2)));
}
cout << "cantidad Puntos" << pointcloud.size() << endl;
return 1;
}

Related

CvSVM predict not returning correct value

I was trying the example detailed within this article.
Training and further the loop to identify hyper-plane works well.
i.e.
// Data for visual representation
int width = 512, height = 512;
Mat image = Mat::zeros(height, width, CV_8UC3);
// Set up training data
float labels[4] = {1.0, -1.0, -1.0, -1.0};
Mat labelsMat(4, 1, CV_32FC1, labels);
float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} };
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
// Set up SVM's parameters
CvSVMParams svmparam;
svmparam.svm_type = CvSVM::C_SVC;
svmparam.kernel_type = CvSVM::LINEAR;
svmparam.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
// Train the SVM
CvSVM svm;
svm.train(trainingDataMat, labelsMat, Mat(), Mat(), svmparam);
svm.save("Training.xml");
// Train the SVM
svm->train(trainingDataMat, ml::ROW_SAMPLE, labelsMat);
Vec3b blue(255, 0 ,0);
Vec3b green(0, 255, 0);
for(int x = 0; x < image.rows; ++x)
{
for(int y = 0; y < image.cols; ++y)
{
Mat sampleMat = (Mat_<float>(1,2) << y,x);
float response = svm.predict(sampleMat);
if(response == 1)
image.at<Vec3b>(x,y) = green;
else if(response == -1)
image.at<Vec3b>(x,y) = blue;
}
}
But when I am trying to get support vector using api (svm.get_support_vector(i);), it returns a very small number (as 0.000876529e-28). Hence after type-casting to "int" the coordinates X,Y are becoming 0,0 respectively. So, even after getting the hyperplane, I am unable to get respective support - vectors.
i.e.
for (int i = 0; i < c; ++i)
{
const float* v = svm.get_support_vector(i);
cv::Point resCenter((int) v[0], (int) v[1]);
std::cout << v[0] << ":" << v[1] << "= " << resCenter << std::endl;
circle( image, resCenter, 6, Scalar(128, 128, 128), thickness, lineType);
}
I tried Normalizing the coordinate position as
X' = x - MinR / (MaxR - MinR) // Here MinR and MaxR are size of cols (0, 512)
Y' = y - MinR / (MaxR - MinR) // Here MinR and MaxR are size of rows (0, 512)
As I am new to Machine learning, I would be thankful, if you would suggest me something to read on the following questions:
What does Train internally do with the feature-vector we are passing. (I understand it creates a category with respect to Labels provided, but how is it happening.)
Internal functioning of predict.
Any pointers to understanding these would help me. Thanks for your precious time in advance.

How can I get ellipse coefficient from fitEllipse function of OpenCV?

I want to extract the red ball from one picture and get the detected ellipse matrix in picture.
Here is my example:
I threshold the picture, find the contour of red ball by using findContour() function and use fitEllipse() to fit an ellipse.
But what I want is to get coefficient of this ellipse. Because the fitEllipse() return a rotation rectangle (RotatedRect), so I need to re-write this function.
One Ellipse can be expressed as Ax^2 + By^2 + Cxy + Dx + Ey + F = 0; So I want to get u=(A,B,C,D,E,F) or u=(A,B,C,D,E) if F is 1 (to construct an ellipse matrix).
I read the source code of fitEllipse(), there are totally three SVD process, I think I can get the above coefficients from the results of those three SVD process. But I am quite confused what does each result (variable cv::Mat x) of each SVD process represent and why there are three SVD here?
Here is this function:
cv::RotatedRect cv::fitEllipse( InputArray _points )
{
Mat points = _points.getMat();
int i, n = points.checkVector(2);
int depth = points.depth();
CV_Assert( n >= 0 && (depth == CV_32F || depth == CV_32S));
RotatedRect box;
if( n < 5 )
CV_Error( CV_StsBadSize, "There should be at least 5 points to fit the ellipse" );
// New fitellipse algorithm, contributed by Dr. Daniel Weiss
Point2f c(0,0);
double gfp[5], rp[5], t;
const double min_eps = 1e-8;
bool is_float = depth == CV_32F;
const Point* ptsi = points.ptr<Point>();
const Point2f* ptsf = points.ptr<Point2f>();
AutoBuffer<double> _Ad(n*5), _bd(n);
double *Ad = _Ad, *bd = _bd;
// first fit for parameters A - E
Mat A( n, 5, CV_64F, Ad );
Mat b( n, 1, CV_64F, bd );
Mat x( 5, 1, CV_64F, gfp );
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
c += p;
}
c.x /= n;
c.y /= n;
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
bd[i] = 10000.0; // 1.0?
Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP
Ad[i*5 + 1] = -(double)p.y * p.y;
Ad[i*5 + 2] = -(double)p.x * p.y;
Ad[i*5 + 3] = p.x;
Ad[i*5 + 4] = p.y;
}
solve(A, b, x, DECOMP_SVD);
// now use general-form parameters A - E to find the ellipse center:
// differentiate general form wrt x/y to get two equations for cx and cy
A = Mat( 2, 2, CV_64F, Ad );
b = Mat( 2, 1, CV_64F, bd );
x = Mat( 2, 1, CV_64F, rp );
Ad[0] = 2 * gfp[0];
Ad[1] = Ad[2] = gfp[2];
Ad[3] = 2 * gfp[1];
bd[0] = gfp[3];
bd[1] = gfp[4];
solve( A, b, x, DECOMP_SVD );
// re-fit for parameters A - C with those center coordinates
A = Mat( n, 3, CV_64F, Ad );
b = Mat( n, 1, CV_64F, bd );
x = Mat( 3, 1, CV_64F, gfp );
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
bd[i] = 1.0;
Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]);
Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]);
Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]);
}
solve(A, b, x, DECOMP_SVD);
// store angle and radii
rp[4] = -0.5 * atan2(gfp[2], gfp[1] - gfp[0]); // convert from APP angle usage
if( fabs(gfp[2]) > min_eps )
t = gfp[2]/sin(-2.0 * rp[4]);
else // ellipse is rotated by an integer multiple of pi/2
t = gfp[1] - gfp[0];
rp[2] = fabs(gfp[0] + gfp[1] - t);
if( rp[2] > min_eps )
rp[2] = std::sqrt(2.0 / rp[2]);
rp[3] = fabs(gfp[0] + gfp[1] + t);
if( rp[3] > min_eps )
rp[3] = std::sqrt(2.0 / rp[3]);
box.center.x = (float)rp[0] + c.x;
box.center.y = (float)rp[1] + c.y;
box.size.width = (float)(rp[2]*2);
box.size.height = (float)(rp[3]*2);
if( box.size.width > box.size.height )
{
float tmp;
CV_SWAP( box.size.width, box.size.height, tmp );
box.angle = (float)(90 + rp[4]*180/CV_PI);
}
if( box.angle < -180 )
box.angle += 360;
if( box.angle > 360 )
box.angle -= 360;
return box;
}
The source code link: https://github.com/Itseez/opencv/blob/master/modules/imgproc/src/shapedescr.cpp
The function fitEllipse returns a RotatedRect that contains all the parameters of the ellipse.
An ellipse is defined by 5 parameters:
xc : x coordinate of the center
yc : y coordinate of the center
a : major semi-axis
b : minor semi-axis
theta : rotation angle
You can obtain these parameters like:
RotatedRect e = fitEllipse(points);
float xc = e.center.x;
float yc = e.center.y;
float a = e.size.width / 2; // width >= height
float b = e.size.height / 2;
float theta = e.angle; // in degrees
You can draw an ellipse with the function ellipse using the RotatedRect:
ellipse(image, e, Scalar(0,255,0));
or, equivalently using the ellipse parameters:
ellipse(res, Point(xc, yc), Size(a, b), theta, 0.0, 360.0, Scalar(0,255,0));
If you need the values of the coefficients of the implicit equation, you can do like (from Wikipedia):
So, you can get the parameters you need from the RotatedRect, and you don't need to change the function fitEllipse.
The solve function is used to solve linear systems or least-squares problems. Using the SVD decomposition method the system can be over-defined and/or the matrix src1 can be singular.
For more details on the algorithm, you can see the paper of Fitzgibbon that proposed this fit ellipse method.
Here is some code that worked for me which I based on the other responses on this thread.
def getConicCoeffFromEllipse(e):
# ellipse(Point(xc, yc),Size(a, b), theta)
xc = e[0][0]
yc = e[0][1]
a = e[1][0]/2
b = e[1][1]/2
theta = math.radians(e[2])
# See https://en.wikipedia.org/wiki/Ellipse
# Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0 is the equation
A = a*a*math.pow(math.sin(theta),2) + b*b*math.pow(math.cos(theta),2)
B = 2*(b*b - a*a)*math.sin(theta)*math.cos(theta)
C = a*a*math.pow(math.cos(theta),2) + b*b*math.pow(math.sin(theta),2)
D = -2*A*xc - B*yc
E = -B*xc - 2*C*yc
F = A*xc*xc + B*xc*yc + C*yc*yc - a*a*b*b
coef = np.array([A,B,C,D,E,F]) / F
return coef
def getConicMatrixFromCoeff(c):
C = np.array([[c[0], c[1]/2, c[3]/2], # [ a, b/2, d/2 ]
[c[1]/2, c[2], c[4]/2], # [b/2, c, e/2 ]
[c[3]/2, c[4]/2, c[5]]]) # [d/2], e/2, f ]
return C

Corner Detection

I am new in Open-CV.I am trying to detect 90 degree corner in fairly simple image.I need to detect corners of that rectangle which surround the object. I am using shi-Thomasi feature. following is my code :
for x in range(0, 50):
ret, frame = cap.read()
# make image gray scale
im = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#finding corners
corners = cv2.goodFeaturesToTrack(im, 1, 0.01, 10)
corners = np.int0(corners)
for i in corners:
x, y = i.ravel()
cv2.circle(frame, (x, y), 3, 255,-1)
cv2.imwrite("DetectedCorners.png", frame)
Problem: Always some corners in that object is detected. I need a method, to totally remove that object, and then detects the corners.
I don't know how to remove that object.
Any suggestions ? Photo shows my result.some times corners of surrounding rectangle are detected, some times some random points in that complex object.
I also used Canny before detecting corners, but result was 10 times worse.
Well, quick and dirty C++ solution, just for a proof of concept regarding using Hough transform to detect lines, and then compute their intersection.
You can eventually port the code to Python if needed.
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
Mat3b img = imread("path_to_image");
// Convert to grayscale
Mat1b gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
// Compute edges
Mat1b edges;
Canny(gray, edges, 400, 100);
// Create the output result image
Mat3b res;
cvtColor(edges, res, COLOR_GRAY2BGR);
// Call hough
vector<Vec2f> lines;
HoughLines(edges, lines, 1, CV_PI / 180, 200, 0, 0);
vector<pair<Point,Point>> pts;
vector<Point> intersections;
for (size_t i = 0; i < lines.size(); i++)
{
float rho = lines[i][0], theta = lines[i][1];
// Get 2 points on each line
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
// Save the pair of points
pts.push_back(make_pair(pt1, pt2));
// Draw lines
line(res, pt1, pt2, Scalar(0, 0, 255), 3, CV_AA);
}
// for each couple of lines
for (int i = 0; i < pts.size() - 1; ++i)
{
// get the two points of the first line
const Point& p1 = pts[i].first;
const Point& p2 = pts[i].second;
for (int j = i + 1; j < pts.size(); ++j)
{
// Get the two points of the second line
const Point& p3 = pts[j].first;
const Point& p4 = pts[j].second;
// Compute intersection
Point p;
float den = (p1.x - p2.x) * (p3.y - p4.y) - (p1.y - p2.y) * (p3.x - p4.x);
if (den != 0) // if not parallel lines
{
p.x = ((p1.x*p2.y - p1.y*p2.x)*(p3.x - p4.x) - (p1.x - p2.x)*(p3.x*p4.y - p3.y*p4.x)) / den;
p.y = ((p1.x*p2.y - p1.y*p2.x)*(p3.y - p4.y) - (p1.y - p2.y)*(p3.x*p4.y - p3.y*p4.x)) / den;
// Draw intersection
circle(res, p, 7, Scalar(0, 255, 0), 2);
}
// Save intersections
intersections.push_back(p);
}
}
return 0;
}
Result:

How to capture hair wisp structure from an image?

I want to draw a cue from a specified point along its gradient direction to capture structure of hair wisp. Like Figure2. and Figure3. from an ACM paper, I linked here: Single-View Hair Modeling for Portrait Manipulation. Now I draw an orientation map by gradients, but the results look very chaotic.
This is my code:
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argv, char* argc[])
{
Mat image = imread("wavy.jpg", 0);
if(!image.data)
return -1;
Mat sobelX1;
Sobel(image, sobelX1, CV_8U, 1, 0, 3);
//imshow("X direction", sobelX);
Mat sobelY1;
Sobel(image, sobelY1, CV_8U, 1, 0, 3);
//imshow("Y direction", sobelY);
Mat sobelX, sobelY;
sobelX1.convertTo(sobelX, CV_32F, 1./255);
sobelY1.convertTo(sobelY, CV_32F, 1./255);
double l_max = -10;
for (int y = 0; y < image.rows; y+=3) // First iteration, to compute the maximum l (longest flow)
{
for (int x = 0; x < image.cols; x+=3)
{
double dx = sobelX.at<float>(y, x); // Gets X component of the flow
double dy = sobelY.at<float>(y, x); // Gets Y component of the flow
CvPoint p = cvPoint(y, x);
double l = sqrt(dx*dx + dy*dy); // This function sets a basic threshold for drawing on the image
if(l>l_max) l_max = l;
}
}
for (int y = 0; y < image.rows; y+=3)
{
for (int x = 0; x < image.cols; x+=3)
{
double dx = sobelX.at<float>(y, x); // Gets X component of the flow
double dy = sobelY.at<float>(y, x); // Gets Y component of the flow
CvPoint p = cvPoint(x, y);
double l = sqrt(dx*dx + dy*dy); // This function sets a basic threshold for drawing on the image
if (l > 0)
{
double spinSize = 5.0 * l/l_max; // Factor to normalise the size of the spin depending on the length of the arrow
CvPoint p2 = cvPoint(p.x + (int)(dx), p.y + (int)(dy));
line(image, p, p2, CV_RGB(0,255,0), 1, CV_AA);
double angle; // Draws the spin of the arrow
angle = atan2( (double) p.y - p2.y, (double) p.x - p2.x);
p.x = (int) (p2.x + spinSize * cos(angle + 3.1416 / 4));
p.y = (int) (p2.y + spinSize * sin(angle + 3.1416 / 4));
line(image, p, p2, CV_RGB(0,255,0), 1, CV_AA, 0 );
}
}
}
imshow("Orientation Map", image);
waitKey(0);
return 0;
}
Can any one give me some hints?
Your Sobels are the same while they supposed to have different code for x and y. 0, 1 and 1, 0.on top of that you loose resolution and sign by specifying cv8U as depth inSobel and only then converting to float. Also please provide input resolution and your outcome image.

How to draw Optical flow images from ocl::PyrLKOpticalFlow::dense()

How to draw Optical flow images from ocl::PyrLKOpticalFlow::dense() Which actually calculates both horizontal and vertical component of the Optical flow? So I don't know how to draw them. I'm new to opencv . Can anyone help me?
Syntax :
ocl::PyrLKOpticalFlow::dense(oclMat &prevImg, oclMat& nextImg, oclMat& u, oclMat &v,oclMat &err)
A well establische method used in the optical flow community is to display a motion vector field as a color coded image as you can see at one of the various data sets. E.g MPI dataset or the Middlebury dataset.
Therefor you estimate the length and the angle of your motion vector. And use a HSV to RGB colorspace transformation (see OpenCV cvtColor function) to create your color coded image. Use the angle of your motion vector as H (Hue) - channel and the normalized length as the S (Saturation) - channel and set V (Value) to 1. The the color of your image will show you the direction of your motion and the saturation the length ( speed ).
The code will should like this ( Note if use_value == true, the Saturation will be set to 1 and the Value channel is related to the motion vector length):
void FlowToRGB(const cv::Mat & inpFlow,
cv::Mat & rgbFlow,
const float & max_size ,
bool use_value)
{
if(inpFlow.empty()) return;
if( inpFlow.depth() != CV_32F)
throw(std::exception("FlowToRGB: error inpFlow wrong data type ( has be CV_32FC2"));
const float grad2deg = (float)(90/3.141);
cv::Mat pol(inpFlow.size(), CV_32FC2);
float mean_val = 0, min_val = 1000, max_val = 0;
float _dx, _dy;
for(int r = 0; r < inpFlow.rows; r++)
{
for(int c = 0; c < inpFlow.cols; c++)
{
cv::Point2f polar = cvmath::toPolar(inpFlow.at<cv::Point2f>(r,c));
polar.y *= grad2deg;
mean_val +=polar.x;
max_val = MAX(max_val, polar.x);
min_val = MIN(min_val, polar.x);
pol.at<cv::Point2f>(r,c) = cv::Point2f(polar.y,polar.x);
}
}
mean_val /= inpFlow.size().area();
float scale = max_val - min_val;
float shift = -min_val;//-mean_val + scale;
scale = 255.f/scale;
if( max_size > 0)
{
scale = 255.f/max_size;
shift = 0;
}
//calculate the angle, motion value
cv::Mat hsv(inpFlow.size(), CV_8UC3);
uchar * ptrHSV = hsv.ptr<uchar>();
int idx_val = (use_value) ? 2:1;
int idx_sat = (use_value) ? 1:2;
for(int r = 0; r < inpFlow.rows; r++, ptrHSV += hsv.step1())
{
uchar * _ptrHSV = ptrHSV;
for(int c = 0; c < inpFlow.cols; c++, _ptrHSV+=3)
{
cv::Point2f vpol = pol.at<cv::Point2f>(r,c);
_ptrHSV[0] = cv::saturate_cast<uchar>(vpol.x);
_ptrHSV[idx_val] = cv::saturate_cast<uchar>( (vpol.y + shift) * scale);
_ptrHSV[idx_sat] = 255;
}
}
cv::Mat rgbFlow32F;
cv::cvtColor(hsv, rgbFlow32F, CV_HSV2BGR);
rgbFlow32F.convertTo(rgbFlow, CV_8UC3);}
}
Python
Please refer to opt_flow.py#draw_flow
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
C++
Please can refer to tvl1_optical_flow.cpp#drawOpticalFlow
static void drawOpticalFlow(const Mat_<Point2f>& flow, Mat& dst, float maxmotion = -1)
{
dst.create(flow.size(), CV_8UC3);
dst.setTo(Scalar::all(0));
// determine motion range:
float maxrad = maxmotion;
if (maxmotion <= 0)
{
maxrad = 1;
for (int y = 0; y < flow.rows; ++y)
{
for (int x = 0; x < flow.cols; ++x)
{
Point2f u = flow(y, x);
if (!isFlowCorrect(u))
continue;
maxrad = max(maxrad, sqrt(u.x * u.x + u.y * u.y));
}
}
}
for (int y = 0; y < flow.rows; ++y)
{
for (int x = 0; x < flow.cols; ++x)
{
Point2f u = flow(y, x);
if (isFlowCorrect(u))
dst.at<Vec3b>(y, x) = computeColor(u.x / maxrad, u.y / maxrad);
}
}
}
I did something like this in my code, a while ago:
calcOpticalFlowPyrLK(frame_prec,frame_cur,v_corners_prec[i],corners_cur,status, err);
for(int j=0; j<corners_cur.size(); j++){
if(status[j]){
line(frame_cur,v_corners_prec[i][j],corners_cur[j],colors[i]);
}
}
Basically I draw a line between the points tracked by the OF in this iteration and the previous ones, this draws the optical flow lines which represent the flow on the image.
Hope this helps..

Resources