How can I get ellipse coefficient from fitEllipse function of OpenCV? - opencv

I want to extract the red ball from one picture and get the detected ellipse matrix in picture.
Here is my example:
I threshold the picture, find the contour of red ball by using findContour() function and use fitEllipse() to fit an ellipse.
But what I want is to get coefficient of this ellipse. Because the fitEllipse() return a rotation rectangle (RotatedRect), so I need to re-write this function.
One Ellipse can be expressed as Ax^2 + By^2 + Cxy + Dx + Ey + F = 0; So I want to get u=(A,B,C,D,E,F) or u=(A,B,C,D,E) if F is 1 (to construct an ellipse matrix).
I read the source code of fitEllipse(), there are totally three SVD process, I think I can get the above coefficients from the results of those three SVD process. But I am quite confused what does each result (variable cv::Mat x) of each SVD process represent and why there are three SVD here?
Here is this function:
cv::RotatedRect cv::fitEllipse( InputArray _points )
{
Mat points = _points.getMat();
int i, n = points.checkVector(2);
int depth = points.depth();
CV_Assert( n >= 0 && (depth == CV_32F || depth == CV_32S));
RotatedRect box;
if( n < 5 )
CV_Error( CV_StsBadSize, "There should be at least 5 points to fit the ellipse" );
// New fitellipse algorithm, contributed by Dr. Daniel Weiss
Point2f c(0,0);
double gfp[5], rp[5], t;
const double min_eps = 1e-8;
bool is_float = depth == CV_32F;
const Point* ptsi = points.ptr<Point>();
const Point2f* ptsf = points.ptr<Point2f>();
AutoBuffer<double> _Ad(n*5), _bd(n);
double *Ad = _Ad, *bd = _bd;
// first fit for parameters A - E
Mat A( n, 5, CV_64F, Ad );
Mat b( n, 1, CV_64F, bd );
Mat x( 5, 1, CV_64F, gfp );
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
c += p;
}
c.x /= n;
c.y /= n;
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
bd[i] = 10000.0; // 1.0?
Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP
Ad[i*5 + 1] = -(double)p.y * p.y;
Ad[i*5 + 2] = -(double)p.x * p.y;
Ad[i*5 + 3] = p.x;
Ad[i*5 + 4] = p.y;
}
solve(A, b, x, DECOMP_SVD);
// now use general-form parameters A - E to find the ellipse center:
// differentiate general form wrt x/y to get two equations for cx and cy
A = Mat( 2, 2, CV_64F, Ad );
b = Mat( 2, 1, CV_64F, bd );
x = Mat( 2, 1, CV_64F, rp );
Ad[0] = 2 * gfp[0];
Ad[1] = Ad[2] = gfp[2];
Ad[3] = 2 * gfp[1];
bd[0] = gfp[3];
bd[1] = gfp[4];
solve( A, b, x, DECOMP_SVD );
// re-fit for parameters A - C with those center coordinates
A = Mat( n, 3, CV_64F, Ad );
b = Mat( n, 1, CV_64F, bd );
x = Mat( 3, 1, CV_64F, gfp );
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
bd[i] = 1.0;
Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]);
Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]);
Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]);
}
solve(A, b, x, DECOMP_SVD);
// store angle and radii
rp[4] = -0.5 * atan2(gfp[2], gfp[1] - gfp[0]); // convert from APP angle usage
if( fabs(gfp[2]) > min_eps )
t = gfp[2]/sin(-2.0 * rp[4]);
else // ellipse is rotated by an integer multiple of pi/2
t = gfp[1] - gfp[0];
rp[2] = fabs(gfp[0] + gfp[1] - t);
if( rp[2] > min_eps )
rp[2] = std::sqrt(2.0 / rp[2]);
rp[3] = fabs(gfp[0] + gfp[1] + t);
if( rp[3] > min_eps )
rp[3] = std::sqrt(2.0 / rp[3]);
box.center.x = (float)rp[0] + c.x;
box.center.y = (float)rp[1] + c.y;
box.size.width = (float)(rp[2]*2);
box.size.height = (float)(rp[3]*2);
if( box.size.width > box.size.height )
{
float tmp;
CV_SWAP( box.size.width, box.size.height, tmp );
box.angle = (float)(90 + rp[4]*180/CV_PI);
}
if( box.angle < -180 )
box.angle += 360;
if( box.angle > 360 )
box.angle -= 360;
return box;
}
The source code link: https://github.com/Itseez/opencv/blob/master/modules/imgproc/src/shapedescr.cpp

The function fitEllipse returns a RotatedRect that contains all the parameters of the ellipse.
An ellipse is defined by 5 parameters:
xc : x coordinate of the center
yc : y coordinate of the center
a : major semi-axis
b : minor semi-axis
theta : rotation angle
You can obtain these parameters like:
RotatedRect e = fitEllipse(points);
float xc = e.center.x;
float yc = e.center.y;
float a = e.size.width / 2; // width >= height
float b = e.size.height / 2;
float theta = e.angle; // in degrees
You can draw an ellipse with the function ellipse using the RotatedRect:
ellipse(image, e, Scalar(0,255,0));
or, equivalently using the ellipse parameters:
ellipse(res, Point(xc, yc), Size(a, b), theta, 0.0, 360.0, Scalar(0,255,0));
If you need the values of the coefficients of the implicit equation, you can do like (from Wikipedia):
So, you can get the parameters you need from the RotatedRect, and you don't need to change the function fitEllipse.
The solve function is used to solve linear systems or least-squares problems. Using the SVD decomposition method the system can be over-defined and/or the matrix src1 can be singular.
For more details on the algorithm, you can see the paper of Fitzgibbon that proposed this fit ellipse method.

Here is some code that worked for me which I based on the other responses on this thread.
def getConicCoeffFromEllipse(e):
# ellipse(Point(xc, yc),Size(a, b), theta)
xc = e[0][0]
yc = e[0][1]
a = e[1][0]/2
b = e[1][1]/2
theta = math.radians(e[2])
# See https://en.wikipedia.org/wiki/Ellipse
# Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0 is the equation
A = a*a*math.pow(math.sin(theta),2) + b*b*math.pow(math.cos(theta),2)
B = 2*(b*b - a*a)*math.sin(theta)*math.cos(theta)
C = a*a*math.pow(math.cos(theta),2) + b*b*math.pow(math.sin(theta),2)
D = -2*A*xc - B*yc
E = -B*xc - 2*C*yc
F = A*xc*xc + B*xc*yc + C*yc*yc - a*a*b*b
coef = np.array([A,B,C,D,E,F]) / F
return coef
def getConicMatrixFromCoeff(c):
C = np.array([[c[0], c[1]/2, c[3]/2], # [ a, b/2, d/2 ]
[c[1]/2, c[2], c[4]/2], # [b/2, c, e/2 ]
[c[3]/2, c[4]/2, c[5]]]) # [d/2], e/2, f ]
return C

Related

Convert cv::Vec4f line to cv::Vec2f

I have a pair of Cartesian coordinates that represent a line in an image. I would like to convert this line to polar form and draw it over the image.
e.g
cv::Vec4f line {10,20,60,70};
float x1 = line[0];
float y1 = line[1];
float x2 = line[2];
float y2 = line[3];
I want this line to be represented in cv::Vec2f form(rho,theta).
Taking care of rho & theta with all possible slopes.
Given are the image dimensions :: w and h;
w = image.cols
h = image.rows
How can I achieve this.
N.B: We can also assume that the line can be an extended one running across the image.
for (size_t i = 0; i < lines.size(); i++)
{
int x1 = lines[i][0];
int y1 = lines[i][1];
int x2 = lines[i][2];
int y2 = lines[i][3];
float d = sqrt(((y1-y2)*(y1-y2)) + ((x2-x1)*(x2-x1)) );
float rho = (y1*x2 - y2*x1)/d;
float theta = atan2(x2 - x1,y1-y2) ;
if(rho < 0){
theta *= -1;
rho *= -1;
}
linv2f.push_back(cv::Vec2f(rho,theta));
}
The above approach doesnt give me results when I plot the lines I dont get the lines that are overlapping their original vec4f form.
I use this to convert vec2f to vec4f for testing :
cv::Vec4f cvtVec2fLine(const cv::Vec2f& data, const cv::Mat& img)
{
float const rho = data[0];
float const theta = data[1];
cv::Point pt1,pt2;
if((theta < CV_PI/4. || theta > 3. * CV_PI/4.)){
pt1 = cv::Point(rho / std::cos(theta), 0);
pt2 = cv::Point( (rho - img.rows * std::sin(theta))/std::cos(theta), img.rows);
}else {
pt1 = cv::Point(0, rho / std::sin(theta));
pt2 = cv::Point(img.cols, (rho - img.cols * std::cos(theta))/std::sin(theta));
}
cv::Vec4f l;
l[0] = pt1.x;
l[1] = pt1.y;
l[2] = pt2.x;
l[3] = pt2.y;
return l;
}
rho-theta equation has form
x * Cos(Theta) + y * Sin(Theta) - Rho = 0
We want to represent equation 'by two points' into rho-theta form (page 92 in pdf here). If we have
x * A + y * B - C = 0
and need coefficients in trigonometric form, we can divide all equation by magnitude of (A,B) coefficient vector.
D = Length(A,B) = Math.Hypot(A,B)
x * A/D + y * B/D - C/D = 0
note that (A/D)^2 + (B/D)^2 = 1 - basic trigonometric equality, so we can consider A/D and B/D as cosine and sine of some angle theta.
Your line equation is
(y-y1) * (x2-x1) - (x-x1) * (y2-y1) = 0
or
x * (y1-y2) + y * (x2-x1) - (y1 * x2 - y2 * x1) = 0
let
D = Sqrt((y1-y2)^2 + (x2-x1)^2)
so
Theta = ArcTan2(x2-x1, y1-y2)
Rho = (y1 * x2 - y2 * x1) / D
edited
If Rho is negative, change sign of Rho and shift Theta by Pi
Example:
x1=1,y1=0, x2=0,y2=1
Theta = atan2(-1,-1)=-3*Pi/4
D=Sqrt(2)
Rho=-Sqrt(2)/2 negative =>
Rho = Sqrt(2)/2
Theta = Pi/4
Back substitutuon - find points of intersection with axes
0 * Sqrt(2)/2 + y0 * Sqrt(2)/2 - Sqrt(2)/2 = 0
x=0 y=1
x0 * Sqrt(2)/2 + 0 * Sqrt(2)/2 - Sqrt(2)/2 = 0
x=1 y=0

Un-Distort raw images received from the Leap motion cameras

I've been working with the leap for a long time now. 2.1.+ SDK version allows us to access the cameras and get raw images. I want to use those images with OpenCV for square/circle detection and stuff... the problem is i can't get those images undistorted. i read the docs, but don't quite get what they mean. here's one thing i need to understand properly before going forward
distortion_data_ = image.distortion();
for (int d = 0; d < image.distortionWidth() * image.distortionHeight(); d += 2)
{
float dX = distortion_data_[d];
float dY = distortion_data_[d + 1];
if(!((dX < 0) || (dX > 1)) && !((dY < 0) || (dY > 1)))
{
//what do i do now to undistort the image?
}
}
data = image.data();
mat.put(0, 0, data);
//Imgproc.Canny(mat, mat, 100, 200);
//mat = findSquare(mat);
ok.showImage(mat);
in the docs it says something like this "
The calibration map can be used to correct image distortion due to lens curvature and other imperfections. The map is a 64x64 grid of points. Each point consists of two 32-bit values....(the rest on the dev website)"
can someone explain this in detail please, OR OR, just post the java code to undistort the images give me an output MAT image so i may continue processing that (i'd still prefer a good explanation if possible)
Ok, I have no leap camera to test all this, but this is how I understand the documentation:
The calibration map does not hold offsets but full point positions. An entry says where the pixel has to be placed instead. Those values are mapped between 0 and 1, which means that you have to mutiply them by your real image width and height.
What isnt explained explicitly is, how you pixel positions are mapped to 64 x 64 positions of your calibration map. I assume that it's the same way: 640 pixels width are mapped to 64 pixels width and 240 pixels height are mapped to 64 pixels height.
So in general, to move from one of your 640 x 240 pixel positions (pX, pY) to the undistorted position you will:
compute corresponding pixel position in the calibration map: float cX = pX/640.0f * 64.0f; float cY = pY/240.0f * 64.0f;
(cX, cY) is now the locaion of that pixel in the calibration map. You will have to interpolate between two pixel locaions, but I will now only explain how to go on for a discrete location in the calibration map (cX', cY') = rounded locations of (cX, cY).
read the x and y values out of the calibration map: dX, dY as in the documentation. You have to compute the location in the array by: d = dY*calibrationMapWidth*2 + dX*2;
dX and dY are values between 0 and 1 (if not: dont undistort this point because there is no undistortion available. To find out the pixel location in your real image, multiply by the image size: uX = dX*640; uY = dY*240;
set your pixel to the undistorted value: undistortedImage(pX,pY) = distortedImage(uX,uY);
but you dont have discrete point positions in your calibration map, so you have to interpolate. I'll give you an example:
let be (cX,cY) = (13.7, 10.4)
so you read from your calibration map four values:
calibMap(13,10) = (dX1, dY1)
calibMap(14,10) = (dX2, dY2)
calibMap(13,11) = (dX3, dY3)
calibMap(14,11) = (dX4, dY4)
now your undistorted pixel position for (13.7, 10.4) is (multiply each with 640 or 240 to get uX1, uY1, uX2, etc):
// interpolate in x direction first:
float tmpUX1 = uX1*0.3 + uX2*0.7
float tmpUY1 = uY1*0.3 + uY2*0.7
float tmpUX2 = uX3*0.3 + uX4*0.7
float tmpUY2 = uY3*0.3 + uY4*0.7
// now interpolate in y direction
float combinedX = tmpUX1*0.6 + tmpUX2*0.4
float combinedY = tmpUY1*0.6 + tmpUY2*0.4
and your undistorted point is:
undistortedImage(pX,pY) = distortedImage(floor(combinedX+0.5),floor(combinedY+0.5)); or interpolate pixel values there too.
Hope this helps for a basic understanding. I'll try to add openCV remap code soon! The only point thats unclear for me is, whether the mapping between pX/Y and cX/Y is correct, cause thats not explicitly explained in the documentation.
Here is some code. You can skip the first part, where I am faking a distortion and creating the map, which is your initial state.
With openCV it is simple, just resize the calibration map to your image size and multiply all the values with your resolution. The nice thing is, that openCV performs the interpolation "automatically" while resizing.
int main()
{
cv::Mat input = cv::imread("../Data/Lenna.png");
cv::Mat distortedImage = input.clone();
// now i fake some distortion:
cv::Mat transformation = cv::Mat::eye(3,3,CV_64FC1);
transformation.at<double>(0,0) = 2.0;
cv::warpPerspective(input,distortedImage,transformation,input.size());
cv::imshow("distortedImage", distortedImage);
//cv::imwrite("../Data/LenaFakeDistorted.png", distortedImage);
// now fake a calibration map corresponding to my faked distortion:
const unsigned int cmWidth = 64;
const unsigned int cmHeight = 64;
// compute the calibration map by transforming image locations to values between 0 and 1 for legal positions.
float calibMap[cmWidth*cmHeight*2];
for(unsigned int y = 0; y < cmHeight; ++y)
for(unsigned int x = 0; x < cmWidth; ++x)
{
float xx = (float)x/(float)cmWidth;
xx = xx*2.0f; // this if from my fake distortion... this gives some values bigger than 1
float yy = (float)y/(float)cmHeight;
calibMap[y*cmWidth*2+ 2*x] = xx;
calibMap[y*cmWidth*2+ 2*x+1] = yy;
}
// NOW you have the initial situation of your scenario: calibration map and distorted image...
// compute the image locations of calibration map values:
cv::Mat cMapMatX = cv::Mat(cmHeight, cmWidth, CV_32FC1);
cv::Mat cMapMatY = cv::Mat(cmHeight, cmWidth, CV_32FC1);
for(int j=0; j<cmHeight; ++j)
for(int i=0; i<cmWidth; ++i)
{
cMapMatX.at<float>(j,i) = calibMap[j*cmWidth*2 +2*i];
cMapMatY.at<float>(j,i) = calibMap[j*cmWidth*2 +2*i+1];
}
//cv::imshow("mapX",cMapMatX);
//cv::imshow("mapY",cMapMatY);
// interpolate those values for each of your original images pixel:
// here I use linear interpolation, you could use cubic or other interpolation too.
cv::resize(cMapMatX, cMapMatX, distortedImage.size(), 0,0, CV_INTER_LINEAR);
cv::resize(cMapMatY, cMapMatY, distortedImage.size(), 0,0, CV_INTER_LINEAR);
// now the calibration map has the size of your original image, but its values are still between 0 and 1 (for legal positions)
// so scale to image size:
cMapMatX = distortedImage.cols * cMapMatX;
cMapMatY = distortedImage.rows * cMapMatY;
// now create undistorted image:
cv::Mat undistortedImage = cv::Mat(distortedImage.rows, distortedImage.cols, CV_8UC3);
undistortedImage.setTo(cv::Vec3b(0,0,0)); // initialize black
//cv::imshow("undistorted", undistortedImage);
for(int j=0; j<undistortedImage.rows; ++j)
for(int i=0; i<undistortedImage.cols; ++i)
{
cv::Point undistPosition;
undistPosition.x =(cMapMatX.at<float>(j,i)); // this will round the position, maybe you want interpolation instead
undistPosition.y =(cMapMatY.at<float>(j,i));
if(undistPosition.x >= 0 && undistPosition.x < distortedImage.cols
&& undistPosition.y >= 0 && undistPosition.y < distortedImage.rows)
{
undistortedImage.at<cv::Vec3b>(j,i) = distortedImage.at<cv::Vec3b>(undistPosition);
}
}
cv::imshow("undistorted", undistortedImage);
cv::waitKey(0);
//cv::imwrite("../Data/LenaFakeUndistorted.png", undistortedImage);
}
cv::Mat SelfDescriptorDistances(cv::Mat descr)
{
cv::Mat selfDistances = cv::Mat::zeros(descr.rows,descr.rows, CV_64FC1);
for(int keyptNr = 0; keyptNr < descr.rows; ++keyptNr)
{
for(int keyptNr2 = 0; keyptNr2 < descr.rows; ++keyptNr2)
{
double euclideanDistance = 0;
for(int descrDim = 0; descrDim < descr.cols; ++descrDim)
{
double tmp = descr.at<float>(keyptNr,descrDim) - descr.at<float>(keyptNr2, descrDim);
euclideanDistance += tmp*tmp;
}
euclideanDistance = sqrt(euclideanDistance);
selfDistances.at<double>(keyptNr, keyptNr2) = euclideanDistance;
}
}
return selfDistances;
}
I use this as input and fake a remap/distortion from which I compute my calib mat:
input:
faked distortion:
used the map to undistort the image:
TODO: after those computatons use a opencv map with those values to perform faster remapping.
Here's an example on how to do it without using OpenCV. The following seems to be faster than using the Leap::Image::warp() method (probably due to the additional function call overhead when using warp()):
float destinationWidth = 320;
float destinationHeight = 120;
unsigned char destination[(int)destinationWidth][(int)destinationHeight];
//define needed variables outside the inner loop
float calX, calY, weightX, weightY, dX1, dX2, dX3, dX4, dY1, dY2, dY3, dY4, dX, dY;
int x1, x2, y1, y2, denormalizedX, denormalizedY;
int x, y;
const unsigned char* raw = image.data();
const float* distortion_buffer = image.distortion();
//Local variables for values needed in loop
const int distortionWidth = image.distortionWidth();
const int width = image.width();
const int height = image.height();
for (x = 0; x < destinationWidth; x++) {
for (y = 0; y < destinationHeight; y++) {
//Calculate the position in the calibration map (still with a fractional part)
calX = 63 * x/destinationWidth;
calY = 63 * y/destinationHeight;
//Save the fractional part to use as the weight for interpolation
weightX = calX - truncf(calX);
weightY = calY - truncf(calY);
//Get the x,y coordinates of the closest calibration map points to the target pixel
x1 = calX; //Note truncation to int
y1 = calY;
x2 = x1 + 1;
y2 = y1 + 1;
//Look up the x and y values for the 4 calibration map points around the target
// (x1, y1) .. .. .. (x2, y1)
// .. ..
// .. (x, y) ..
// .. ..
// (x1, y2) .. .. .. (x2, y2)
dX1 = distortion_buffer[x1 * 2 + y1 * distortionWidth];
dX2 = distortion_buffer[x2 * 2 + y1 * distortionWidth];
dX3 = distortion_buffer[x1 * 2 + y2 * distortionWidth];
dX4 = distortion_buffer[x2 * 2 + y2 * distortionWidth];
dY1 = distortion_buffer[x1 * 2 + y1 * distortionWidth + 1];
dY2 = distortion_buffer[x2 * 2 + y1 * distortionWidth + 1];
dY3 = distortion_buffer[x1 * 2 + y2 * distortionWidth + 1];
dY4 = distortion_buffer[x2 * 2 + y2 * distortionWidth + 1];
//Bilinear interpolation of the looked-up values:
// X value
dX = dX1 * (1 - weightX) * (1- weightY) + dX2 * weightX * (1 - weightY) + dX3 * (1 - weightX) * weightY + dX4 * weightX * weightY;
// Y value
dY = dY1 * (1 - weightX) * (1- weightY) + dY2 * weightX * (1 - weightY) + dY3 * (1 - weightX) * weightY + dY4 * weightX * weightY;
// Reject points outside the range [0..1]
if((dX >= 0) && (dX <= 1) && (dY >= 0) && (dY <= 1)) {
//Denormalize from [0..1] to [0..width] or [0..height]
denormalizedX = dX * width;
denormalizedY = dY * height;
//look up the brightness value for the target pixel
destination[x][y] = raw[denormalizedX + denormalizedY * width];
} else {
destination[x][y] = -1;
}
}
}

What's the best way to fit a set of points in an image one or more good lines using RANSAC using OpenCV?

What's the best way to fit a set of points in an image one or more good lines using RANSAC using OpenCV?
Is RANSAC is the most efficient way to fit a line?
RANSAC is not the most efficient but it is better for a large number of outliers. Here is how to do it using opencv:
A useful structure-
struct SLine
{
SLine():
numOfValidPoints(0),
params(-1.f, -1.f, -1.f, -1.f)
{}
cv::Vec4f params;//(cos(t), sin(t), X0, Y0)
int numOfValidPoints;
};
Total Least squares used to make a fit for a successful pair
cv::Vec4f TotalLeastSquares(
std::vector<cv::Point>& nzPoints,
std::vector<int> ptOnLine)
{
//if there are enough inliers calculate model
float x = 0, y = 0, x2 = 0, y2 = 0, xy = 0, w = 0;
float dx2, dy2, dxy;
float t;
for( size_t i = 0; i < nzPoints.size(); ++i )
{
x += ptOnLine[i] * nzPoints[i].x;
y += ptOnLine[i] * nzPoints[i].y;
x2 += ptOnLine[i] * nzPoints[i].x * nzPoints[i].x;
y2 += ptOnLine[i] * nzPoints[i].y * nzPoints[i].y;
xy += ptOnLine[i] * nzPoints[i].x * nzPoints[i].y;
w += ptOnLine[i];
}
x /= w;
y /= w;
x2 /= w;
y2 /= w;
xy /= w;
//Covariance matrix
dx2 = x2 - x * x;
dy2 = y2 - y * y;
dxy = xy - x * y;
t = (float) atan2( 2 * dxy, dx2 - dy2 ) / 2;
cv::Vec4f line;
line[0] = (float) cos( t );
line[1] = (float) sin( t );
line[2] = (float) x;
line[3] = (float) y;
return line;
}
The actual RANSAC
SLine LineFitRANSAC(
float t,//distance from main line
float p,//chance of hitting a valid pair
float e,//percentage of outliers
int T,//number of expected minimum inliers
std::vector<cv::Point>& nzPoints)
{
int s = 2;//number of points required by the model
int N = (int)ceilf(log(1-p)/log(1 - pow(1-e, s)));//number of independent trials
std::vector<SLine> lineCandidates;
std::vector<int> ptOnLine(nzPoints.size());//is inlier
RNG rng((uint64)-1);
SLine line;
for (int i = 0; i < N; i++)
{
//pick two points
int idx1 = (int)rng.uniform(0, (int)nzPoints.size());
int idx2 = (int)rng.uniform(0, (int)nzPoints.size());
cv::Point p1 = nzPoints[idx1];
cv::Point p2 = nzPoints[idx2];
//points too close - discard
if (cv::norm(p1- p2) < t)
{
continue;
}
//line equation -> (y1 - y2)X + (x2 - x1)Y + x1y2 - x2y1 = 0
float a = static_cast<float>(p1.y - p2.y);
float b = static_cast<float>(p2.x - p1.x);
float c = static_cast<float>(p1.x*p2.y - p2.x*p1.y);
//normalize them
float scale = 1.f/sqrt(a*a + b*b);
a *= scale;
b *= scale;
c *= scale;
//count inliers
int numOfInliers = 0;
for (size_t i = 0; i < nzPoints.size(); ++i)
{
cv::Point& p0 = nzPoints[i];
float rho = abs(a*p0.x + b*p0.y + c);
bool isInlier = rho < t;
if ( isInlier ) numOfInliers++;
ptOnLine[i] = isInlier;
}
if ( numOfInliers < T)
{
continue;
}
line.params = TotalLeastSquares( nzPoints, ptOnLine);
line.numOfValidPoints = numOfInliers;
lineCandidates.push_back(line);
}
int bestLineIdx = 0;
int bestLineScore = 0;
for (size_t i = 0; i < lineCandidates.size(); i++)
{
if (lineCandidates[i].numOfValidPoints > bestLineScore)
{
bestLineIdx = i;
bestLineScore = lineCandidates[i].numOfValidPoints;
}
}
if ( lineCandidates.empty() )
{
return SLine();
}
else
{
return lineCandidates[bestLineIdx];
}
}
Take a look at Least Mean Square metod. It's faster and simplier than RANSAC.
Also take look at OpenCV's fitLine method.
RANSAC performs better when you have a lot of outliers in your data, or a complex hypothesis.

Search for lines with a small range of angles in OpenCV

I'm using the Hough transform in OpenCV to detect lines. However, I know in advance that I only need lines within a very limited range of angles (about 10 degrees or so). I'm doing this in a very performance sensitive setting, so I'd like to avoid the extra work spent detecting lines at other angles, lines I know in advance I don't care about.
I could extract the Hough source from OpenCV and just hack it to take min_rho and max_rho parameters, but I'd like a less fragile approach (have to manually update my code w/ each OpenCV update, etc.).
What's the best approach here?
Well, i've modified the icvHoughlines function to go for a certain range of angles. I'm sure there's cleaner ways that plays with memory allocation as well, but I got a speed gain going from 100ms to 33ms for a range of angle going from 180deg to 60deg, so i'm happy with that.
Note that this code also outputs the accumulator value. Also, I only output 1 line because that fit my purposes but there was no gain really there.
static void
icvHoughLinesStandard2( const CvMat* img, float rho, float theta,
int threshold, CvSeq *lines, int linesMax )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
float ang;
int r, n;
int i, j;
float irho = 1 / rho;
double scale;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
// find n and ang limits (in our case we want 60 to 120
float limit_min = 60.0/180.0*PI;
float limit_max = 120.0/180.0*PI;
//num_steps = (limit_max - limit_min)/theta;
int start_n = floor(limit_min/theta);
int stop_n = floor(limit_max/theta);
for( ang = limit_min, n = start_n; n < stop_n; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
//
for( n = start_n; n < stop_n; n++ )
{
r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
accum[(n+1) * (numrho+2) + r+1]++;
}
}
int max_accum = 0;
int max_ind = 0;
for( r = 0; r < numrho; r++ )
{
for( n = start_n; n < stop_n; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if (accum[base] > max_accum)
{
max_accum = accum[base];
max_ind = base;
}
}
}
CvLinePolar2 line;
scale = 1./(numrho+2);
int idx = max_ind;
n = cvFloor(idx*scale) - 1;
r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.votes = accum[idx];
cvSeqPush( lines, &line );
}
If you use the Probabilistic Hough transform then the output is in the form of a cvPoint each for lines[0] and lines[1] parameters. We can get x and y co-ordinated for each of the two points by pt1.x, pt1.y and pt2.x and pt2.y.
Then use the simple formula for finding slope of a line - (y2-y1)/(x2-x1). Taking arctan (tan inverse) of that will yield that angle in radians. Then simply filter out desired angles from the values for each hough line obtained.
I think it's more natural to use standart HoughLines(...) function, which gives collection of lines directly in rho and theta terms and select nessessary angle range from it, rather than recalculate angle from segment end points.

OpenCV: get Hough accumulator value?

Is it possible to get the accumulator value along with rho and theta from a Hough transform?
I ask because I'd like to differentiate between lines which are "well defined" (ie, which have a high accumulator value) and lines which aren't as well defined.
Thanks!
Ok, so looking at the cvhough.cpp file, the structure CvLinePolar is only defined by rho and angle.
This is all that is passed back as a result of our call to HoughLines. I am in the process of modifying the c++ file and see if i can get the votes out.
Update oct 26: just realized these are not really answers but more like questions. apparently frowned upon. I found some instructions on recompiling OpenCV. I guess we'll have to go in the code and modify it and recompile.
How to install OpenCV 2.0 on win32
update Oct 27: well, i failed at compiling the dlls for OpenCV with my new code so I ended up copy-pasting the specific parts I want to modify into my own files.
I like to add new functions so to avoid overloading the already defined functions.
There are 4 main things you need to copy over:
1- some random defines
#define hough_cmp_gt(l1,l2) (aux[l1] > aux[l2])
static CV_IMPLEMENT_QSORT_EX( icvHoughSortDescent32s, int, hough_cmp_gt, const int* )
2- redefining the struct for line parameters
typedef struct CvLinePolar2
{
float rho;
float angle;
float votes;
}
CvLinePolar2;
3- the main function that was modified
static void
icvHoughLinesStandard2( const CvMat* img, float rho, float theta,
int threshold, CvSeq *lines, int linesMax )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
float ang;
int r, n;
int i, j;
float irho = 1 / rho;
double scale;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
for( ang = 0, n = 0; n < numangle; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
for( n = 0; n < numangle; n++ )
{
r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
accum[(n+1) * (numrho+2) + r+1]++;
}
}
// stage 2. find local maximums
for( r = 0; r < numrho; r++ )
for( n = 0; n < numangle; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if( accum[base] > threshold &&
accum[base] > accum[base - 1] && accum[base] >= accum[base + 1] &&
accum[base] > accum[base - numrho - 2] && accum[base] >= accum[base + numrho + 2] )
sort_buf[total++] = base;
}
// stage 3. sort the detected lines by accumulator value
icvHoughSortDescent32s( sort_buf, total, accum );
// stage 4. store the first min(total,linesMax) lines to the output buffer
linesMax = MIN(linesMax, total);
scale = 1./(numrho+2);
for( i = 0; i < linesMax; i++ )
{
CvLinePolar2 line;
int idx = sort_buf[i];
int n = cvFloor(idx*scale) - 1;
int r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.votes = accum[idx];
cvSeqPush( lines, &line );
}
cvFree( (void**)&sort_buf );
cvFree( (void**)&accum );
cvFree( (void**)&tabSin );
cvFree( (void**)&tabCos);
}
4- the function that calls that function
CV_IMPL CvSeq*
cvHoughLines3( CvArr* src_image, void* lineStorage, int method,
double rho, double theta, int threshold,
double param1, double param2 )
{
CvSeq* result = 0;
CvMat stub, *img = (CvMat*)src_image;
CvMat* mat = 0;
CvSeq* lines = 0;
CvSeq lines_header;
CvSeqBlock lines_block;
int lineType, elemSize;
int linesMax = INT_MAX;
int iparam1, iparam2;
img = cvGetMat( img, &stub );
if( !CV_IS_MASK_ARR(img))
CV_Error( CV_StsBadArg, "The source image must be 8-bit, single-channel" );
if( !lineStorage )
CV_Error( CV_StsNullPtr, "NULL destination" );
if( rho <= 0 || theta <= 0 || threshold <= 0 )
CV_Error( CV_StsOutOfRange, "rho, theta and threshold must be positive" );
if( method != CV_HOUGH_PROBABILISTIC )
{
lineType = CV_32FC3;
elemSize = sizeof(float)*3;
}
else
{
lineType = CV_32SC4;
elemSize = sizeof(int)*4;
}
if( CV_IS_STORAGE( lineStorage ))
{
lines = cvCreateSeq( lineType, sizeof(CvSeq), elemSize, (CvMemStorage*)lineStorage );
}
else if( CV_IS_MAT( lineStorage ))
{
mat = (CvMat*)lineStorage;
if( !CV_IS_MAT_CONT( mat->type ) || (mat->rows != 1 && mat->cols != 1) )
CV_Error( CV_StsBadArg,
"The destination matrix should be continuous and have a single row or a single column" );
if( CV_MAT_TYPE( mat->type ) != lineType )
CV_Error( CV_StsBadArg,
"The destination matrix data type is inappropriate, see the manual" );
lines = cvMakeSeqHeaderForArray( lineType, sizeof(CvSeq), elemSize, mat->data.ptr,
mat->rows + mat->cols - 1, &lines_header, &lines_block );
linesMax = lines->total;
cvClearSeq( lines );
}
else
CV_Error( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );
iparam1 = cvRound(param1);
iparam2 = cvRound(param2);
switch( method )
{
case CV_HOUGH_STANDARD:
icvHoughLinesStandard2( img, (float)rho,
(float)theta, threshold, lines, linesMax );
break;
default:
CV_Error( CV_StsBadArg, "Unrecognized method id" );
}
if( mat )
{
if( mat->cols > mat->rows )
mat->cols = lines->total;
else
mat->rows = lines->total;
}
else
result = lines;
return result;
}
And i guess you could uninstall opencv so it takes off all those automatic path setting and recompile it yourself using the CMake method and then the OpenCV is really whatever you make it.
Although this is an old question, I had the same problem, so I might as well put up my solution. The threshold in houghlines() returns 1 for any point that cleared the threshold for votes. The solution is to run houghlines() for every threshold value (until there are no more votes) and add up the votes in another array. In python (maybe with other languages too) when you have no more votes, it throws an error, so use try/except.
Here is an example in python. The array I used was for rho values of -199 to 200 with a max vote of less than 100. You can play around with those constants to suit your needs. You may need to add a line to convert the source image to grayscale.
import matplotlib.pyplot as plt
import cv2
import math
############ make houghspace array ############
houghspace = []
c = 0
height = 400
while c <= height:
houghspace.append([])
cc = 0
while cc <= 180:
houghspace[c].append(0)
cc += 1
c+=1
############ do transform ############
degree_tick = 1 #by how many degrees to check
total_votes = 1 #votes counter
highest_vote = 0 #highest vote in the array
while total_votes < 100:
img = cv2.imread('source.pgm')
edges = cv2.Canny(img,50,150,apertureSize = 3)
lines = cv2.HoughLines(edges,1,math.pi*degree_tick/180,total_votes)
try:
for rho,theta in lines[0]:
a = math.cos(theta)
b = math.sin(theta)
x1 = int((a*rho) + 1000*(-b))
y1 = int((b*rho) + 1000*(a))
x2 = int((a*rho) - 1000*(-b))
y2 = int((b*rho) - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(50,200,255),2)
#################add votes into the array################
deradian = 180/math.pi #used to convert to degrees
for rho,theta in lines[0]:
degree = int(round(theta*deradian))
rho_pos = int(rho - 200)
houghspace[rho_pos][degree] += 1
#when lines[0] has no votes, it throws an error which is caught here
except:
total_votes = 999 #exit loop
highest_vote = total_votes
total_votes += 1
del lines
########### loop finished ###############################
print highest_vote
#############################################################
################### plot the houghspace ###################
maxy = 200 #used to offset the y-axis
miny = -200 #used to offset the y-axis
#the main graph
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
ax.set_title('Houghspace')
plt.imshow(houghspace, cmap='gist_stern')
ax.set_aspect('equal')
plt.yticks([0,-miny,maxy-miny], [miny,0,maxy])
#the legend
cax = fig.add_axes([0, 0.1, 0.78, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
cax.patch.set_alpha(0)
cax.set_frame_on(False)
plt.colorbar(orientation='vertical')
#plot
plt.show()

Resources