Is there an easy and fast OpenCV solution that will convert a set of CvPoint vertices defining a polygon, to a contour with many more vertices?
I have simple polygons in one space, and I want to transform the polygons into another space, where straight lines will become curvy. Thus its important that I add more vertices to my straight lines. This would be the opposite of cvApproxPoly().
Internally cvPolyLine() is doing exactly what I want. Is there any way to access that?
I am using OpenCV 1.1.
So I wrote my own function GetLineFromEndPts() that should solve this problem in a kludgy way. Given two points a and b, this function outputs a list of points on a line connecting a and b. Therefore, to find a contour with many points from a polygon defined by a few vertices, I can run this function on consecutive pairs of vertices.
/*
* Given two points a and b , and a sequence of CvPoints
* this function will find the points that walk the line
* between a and b and append those
* the end of the sequence
*
* Note that the output includes point a, but not point b.
*/
int GetLineFromEndPts(CvPoint a, CvPoint b, CvSeq* contour){
if (contour==NULL) {
printf("ERROR! contour in GetLineFromEndPts() is NULL!\n");
return -1;
}
float d=dist(a,b);
/** Normalized vector with components i and j pointing along the line**/
float ihat= ( (float) (b.x -a.x) ) /d;
float jhat= ( (float) (b.y -a.y) ) /d;
CvPoint currPt; /* Current Point On integer grid*/
CvPoint prevPt=a; /* Prev Point on integer grid */
currPt=a;
/** Prepare Writer for Appending Points to Seq **/
CvSeqWriter writer;
cvStartAppendToSeq( contour, &writer );
int t;
float tempPtx;
float tempPty;
int signx=1;
int signy=1;
for (t = 0; t < (int) (d+0.5) ; ++t) {
/** Our target point is now defined by a parametric equation **/
tempPtx=(float) t * ihat + (float) a.x;
tempPty=(float) t * jhat + (float) a.y;
/** We will want to round and we need to know the number's sign to round correctly **/
if (tempPtx<0) {
signx=-1;
} else {
signx=1;
}
if (tempPty<0) {
signy=-1;
} else{
signy=1;
}
/** Round to an integer value. Note that we need the sign before we add/subtract .5 **/
currPt=cvPoint((int) ( tempPtx + (float) signx * 0.5 ) ,
(int) ( tempPty + (float) signy * 0.5 ));
/** If first point, OR the current approx point is not the same as prev **/
if ( t==0 || !( currPt.x == prevPt.x && currPt.y == prevPt.y ) ){
/** Write out the point **/
CV_WRITE_SEQ_ELEM( currPt, writer );
printf(" t=%d\n",t);
printf(" currPt.x=%d\n",currPt.x);
printf(" currPt.y=%d\n",currPt.y);
}
prevPt=currPt;
}
cvEndWriteSeq( &writer );
return 1;
}
And associated functions:
/*
* Returns the squared distance between two points
*/
int sqDist(CvPoint pta, CvPoint ptb){
return ( ((pta.x - ptb.x)*(pta.x - ptb.x) ) + ((pta.y - ptb.y)*(pta.y - ptb.y) ) );
}
And finally:
/*
* Finds the distance between two points
* and returns the value as a float
*/
float dist(CvPoint a, CvPoint b){
return ( sqrt( (float) sqDist(a,b)) );
}
So, for example, if called with:
GetLineFromEndPts(cvPoint(0,0),cvPoint(10,15),test);
the function outputs:
t=0
currPt.x=0
currPt.y=0
t=1
currPt.x=1
currPt.y=1
t=2
currPt.x=1
currPt.y=2
t=3
currPt.x=2
currPt.y=2
t=4
currPt.x=2
currPt.y=3
t=5
currPt.x=3
currPt.y=4
t=6
currPt.x=3
currPt.y=5
t=7
currPt.x=4
currPt.y=6
t=8
currPt.x=4
currPt.y=7
t=9
currPt.x=5
currPt.y=7
t=10
currPt.x=6
currPt.y=8
t=11
currPt.x=6
currPt.y=9
t=12
currPt.x=7
currPt.y=10
t=13
currPt.x=7
currPt.y=11
t=14
currPt.x=8
currPt.y=12
t=16
currPt.x=9
currPt.y=13
t=17
currPt.x=9
currPt.y=14
Related
I am wondering which DDX DDY values the SampleGrad() function expects for a TextureCube object.
I know that it's the change in UV coordinates for 2D textures. So I thought, it would be the change in the direction in this case. However, this does not seem to be the case.
I get different results if I try to use the Sample function vs. SampleGrad:
Sample:
// calculate reflected ray
float3 reflRay = reflect(-viewDir, normal);
// reflection map lookup
return reflectionMap.Sample(linearSampler, reflRay);
SampleGrad:
// calculate reflected ray
float3 reflRay = reflect(-viewDir, normal);
// reflection map lookup
float3 dxr = ddx(reflRay);
float3 dyr = ddy(reflRay);
return reflectionMap.SampleGrad(linearSampler, reflRay, dxr, dyr);
I still don't know which values for DDX and DDY are required, but if found an acceptable workaround that computes the level of detail for my gradients. Unfortunately, the quality of this solution is not as good as a real Sample function with anisotropic filtering.
In case anyone needs it:
The computation is described in: https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#LODCalculation
My HLSL implementation:
// calculate reflected ray
float3 reflRay = reflect(-viewDir, normal);
// reflection map lookup
float3 dxr = ddx(reflRay);
float3 dyr = ddy(reflRay);
// cubemap size for lod computation
float reflWidth, reflHeight;
reflectionMap.GetDimensions(reflWidth, reflHeight);
// calculate lod based on raydiffs
float lod = calcLod(getCubeDiff(reflRay, dxr).xy * reflWidth, getCubeDiff(reflRay, dyr).xy * reflHeight);
return reflectionMap.SampleLevel(linearSampler, reflRay, lod).rgb;
Helper functions:
float pow2(float x) {
return x * x;
}
// calculates texture coordinates [-1, 1] for the view direction (xy values must be divided by axisMajorValue for proper [-1, 1] range).else
// z coordinate is the faceId
float3 getCubeCoord(float3 viewDir, out float axisMajorValue)
{
// according to dx spec: https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#PointSampling
// Choose the largest magnitude component of the input vector. Call this magnitude of this value AxisMajor. In the case of a tie, the following precedence should occur: Z, Y, X.
int axisMajor = 0;
int axisFlip = 0;
axisMajorValue = 0.0;
[unroll] for (int i = 0; i < 3; ++i)
{
if (abs(viewDir[i]) >= axisMajorValue)
{
axisMajor = i;
axisFlip = viewDir[i] < 0.0f ? 1 : 0;
axisMajorValue = abs(viewDir[i]);
}
}
int faceId = axisMajor * 2 + axisFlip;
// Select and mirror the minor axes as defined by the TextureCube coordinate space. Call this new 2d coordinate Position.
int axisMinor1 = axisMajor == 0 ? 2 : 0; // first coord is x or z
int axisMinor2 = 3 - axisMajor - axisMinor1;
// Project the coordinate onto the cube by dividing the components Position by AxisMajor.
//float u = viewDir[axisMinor1] / axisMajorValue;
//float v = -viewDir[axisMinor2] / axisMajorValue;
// don't project for getCubeDiff function!
float u = viewDir[axisMinor1];
float v = -viewDir[axisMinor2];
switch (faceId)
{
case 0:
case 5:
u *= -1.0f;
break;
case 2:
v *= -1.0f;
break;
}
return float3(u, v, float(faceId));
}
float3 getCubeDiff(float3 ray, float3 diff)
{
// from: https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#LODCalculation
// Using TC, determine which component is of the largest magnitude, as when calculating the texel location. If any of the components are equivalent, precedence is as follows: Z, Y, X. The absolute value of this will be referred to as AxisMajor.
// select and mirror the minor axes of TC as defined by the TextureCube coordinate space to generate TC'.uv
float axisMajor;
float3 tuv = getCubeCoord(ray, axisMajor);
// select and mirror the minor axes of the partial derivative vectors as defined by the TextureCube coordinate space, generating 2 new partial derivative vectors dX'.uv & dY'.uv.
float derivateMajor;
float3 duv = getCubeCoord(diff, derivateMajor);
// Calculate 2 new dX and dY vectors for future calculations as follows:
// dX.uv = (AxisMajor*dX'.uv - TC'.uv*DerivativeMajorX)/(AxisMajor*AxisMajor)
float3 res;
res.z = 0.0;
res.xy = (axisMajor * duv.xy - tuv.xy * derivateMajor) / (axisMajor * axisMajor);
return res * 0.5;
}
// dx, dy in pixel coordinates
float calcLod(float2 dX, float2 dY)
{
// from: https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#LODCalculation
float A = pow2(dX.y) + pow2(dY.y);
float B = -2.0 * (dX.x * dX.y + dY.x * dY.y);
float C = pow2(dX.x) + pow2(dY.x);
float F = pow2(dX.x * dY.y - dY.x * dX.y);
float p = A - C;
float q = A + C;
float t = sqrt(pow2(p) + pow2(B));
float lengthX = sqrt(abs(F * (t+p) / ( t * (q+t))) + abs(F * (t-p) / ( t * (q+t))));
float lengthY = sqrt(abs(F * (t-p) / ( t * (q-t))) + abs(F * (t+p) / ( t * (q-t))));
return log2(max(lengthX,lengthY));
}
I have implemented Sobel operator in vertical direction. But the result which I am getting is very poor. I have attached my code below.
int mask_size= 3;
char mask [3][3]= {{-1,0,1},{-2,0,2},{-1,0,1}};
void sobel(Mat input_image)
{
/**Padding m-1 and n-1 zeroes to the result where m and n are mask_size**/
Mat result=Mat::zeros(input_image.rows+(mask_size - 1) * 2,input_image.cols+(mask_size - 1) * 2,CV_8UC1);
Mat result1=Mat::zeros(result.rows,result.cols,CV_8UC1);
int sum= 0;
/*For loop for copying original values to new padded image **/
for(int i=0;i<input_image.rows;i++)
for(int j=0;j<input_image.cols;j++)
result.at<uchar>(i+(mask_size-1),j+(mask_size-1))=input_image.at<uchar>(i,j);
GaussianBlur( result, result, Size(5,5), 0, 0, BORDER_DEFAULT );
/**For loop to implement the convolution **/
for(int i=0;i<result.rows-(mask_size - 1);i++)
for(int j=0;j<result.cols-(mask_size - 1);j++)
{
int counter=0;
int counterX=0,counterY=0;
sum= 0;
for(int k= i ; k < i + mask_size ; k++)
{
for(int l= j ; l< j + mask_size ; l++)
{
sum+=result.at<uchar>(k,l) * mask[counterX][counterY];
counterY++;
}
counterY=0;
counterX++;
}
result1.at<uchar>(i+mask_size/2,j+mask_size/2)=sum/(mask_size * mask_size);
}
/** Truncating all the extras rows and columns **/
result=Mat::zeros( result1.rows - (mask_size - 1) * 2, result1.cols - (mask_size - 1) * 2,CV_8UC1);
for(int i=0;i<result.rows;i++)
for(int j=0;j<result.cols;j++)
result.at<uchar>(i,j)=result1.at<uchar>(i+(mask_size - 1),j+(mask_size - 1));
imshow("Input",result);
imwrite("output2.tif",result);
}
My input to the algorithm is
My output is
I have also tried using Gaussian blur before actually convolving an image and the output I got is
The output which I am expecting is
The guide I am using is: https://www.tutorialspoint.com/dip/sobel_operator.htm
Your convolution looks ok although I only had a quick look.
Check your output type. It's unsigned char.
Now think about the values your output pixels may have if you have negative kernel values and if it is a good idea to store them in uchar directly.
If you store -1 in an unsigned char it will be wrapped around and your output is 255. In case you're wondering where all that excess white stuff is coming from. That's actually small negative gradients.
The desired result looks like the absolute of the Sobel output values.
I am currently doing a project which requires me use some structural analysis like finding the perimeter and area. I have successfully obtained the contour of the object.
when I use contour.size() function it return 1108(in this case)
when I used cv::arclength(contour) function it returns 1200.
shouldn't the Perimeter be the number of points of the contour.(the contour is the external boundary of the object)? Which should I trust?
not necessarily, with cv::arclength you summarize the euclidean distances between the consecutive points in the curve.
Here is a code snippet of cv::arclength:
...
const Point2f* ptf = curve.ptr<Point2f>();
...
for( i = 0; i < count; i++ )
{
Point2f p = ptf[i];
float dx = p.x - prev.x, dy = p.y - prev.y;
buf[j] = dx*dx + dy*dy;
if( ++j == N || i == count-1 )
{
Mat bufmat(1, j, CV_32F, buf);
sqrt(bufmat, bufmat);
for( ; j > 0; j-- )
perimeter += buf[j-1];
}
prev = p;
}
return perimeter;
I've been working with the leap for a long time now. 2.1.+ SDK version allows us to access the cameras and get raw images. I want to use those images with OpenCV for square/circle detection and stuff... the problem is i can't get those images undistorted. i read the docs, but don't quite get what they mean. here's one thing i need to understand properly before going forward
distortion_data_ = image.distortion();
for (int d = 0; d < image.distortionWidth() * image.distortionHeight(); d += 2)
{
float dX = distortion_data_[d];
float dY = distortion_data_[d + 1];
if(!((dX < 0) || (dX > 1)) && !((dY < 0) || (dY > 1)))
{
//what do i do now to undistort the image?
}
}
data = image.data();
mat.put(0, 0, data);
//Imgproc.Canny(mat, mat, 100, 200);
//mat = findSquare(mat);
ok.showImage(mat);
in the docs it says something like this "
The calibration map can be used to correct image distortion due to lens curvature and other imperfections. The map is a 64x64 grid of points. Each point consists of two 32-bit values....(the rest on the dev website)"
can someone explain this in detail please, OR OR, just post the java code to undistort the images give me an output MAT image so i may continue processing that (i'd still prefer a good explanation if possible)
Ok, I have no leap camera to test all this, but this is how I understand the documentation:
The calibration map does not hold offsets but full point positions. An entry says where the pixel has to be placed instead. Those values are mapped between 0 and 1, which means that you have to mutiply them by your real image width and height.
What isnt explained explicitly is, how you pixel positions are mapped to 64 x 64 positions of your calibration map. I assume that it's the same way: 640 pixels width are mapped to 64 pixels width and 240 pixels height are mapped to 64 pixels height.
So in general, to move from one of your 640 x 240 pixel positions (pX, pY) to the undistorted position you will:
compute corresponding pixel position in the calibration map: float cX = pX/640.0f * 64.0f; float cY = pY/240.0f * 64.0f;
(cX, cY) is now the locaion of that pixel in the calibration map. You will have to interpolate between two pixel locaions, but I will now only explain how to go on for a discrete location in the calibration map (cX', cY') = rounded locations of (cX, cY).
read the x and y values out of the calibration map: dX, dY as in the documentation. You have to compute the location in the array by: d = dY*calibrationMapWidth*2 + dX*2;
dX and dY are values between 0 and 1 (if not: dont undistort this point because there is no undistortion available. To find out the pixel location in your real image, multiply by the image size: uX = dX*640; uY = dY*240;
set your pixel to the undistorted value: undistortedImage(pX,pY) = distortedImage(uX,uY);
but you dont have discrete point positions in your calibration map, so you have to interpolate. I'll give you an example:
let be (cX,cY) = (13.7, 10.4)
so you read from your calibration map four values:
calibMap(13,10) = (dX1, dY1)
calibMap(14,10) = (dX2, dY2)
calibMap(13,11) = (dX3, dY3)
calibMap(14,11) = (dX4, dY4)
now your undistorted pixel position for (13.7, 10.4) is (multiply each with 640 or 240 to get uX1, uY1, uX2, etc):
// interpolate in x direction first:
float tmpUX1 = uX1*0.3 + uX2*0.7
float tmpUY1 = uY1*0.3 + uY2*0.7
float tmpUX2 = uX3*0.3 + uX4*0.7
float tmpUY2 = uY3*0.3 + uY4*0.7
// now interpolate in y direction
float combinedX = tmpUX1*0.6 + tmpUX2*0.4
float combinedY = tmpUY1*0.6 + tmpUY2*0.4
and your undistorted point is:
undistortedImage(pX,pY) = distortedImage(floor(combinedX+0.5),floor(combinedY+0.5)); or interpolate pixel values there too.
Hope this helps for a basic understanding. I'll try to add openCV remap code soon! The only point thats unclear for me is, whether the mapping between pX/Y and cX/Y is correct, cause thats not explicitly explained in the documentation.
Here is some code. You can skip the first part, where I am faking a distortion and creating the map, which is your initial state.
With openCV it is simple, just resize the calibration map to your image size and multiply all the values with your resolution. The nice thing is, that openCV performs the interpolation "automatically" while resizing.
int main()
{
cv::Mat input = cv::imread("../Data/Lenna.png");
cv::Mat distortedImage = input.clone();
// now i fake some distortion:
cv::Mat transformation = cv::Mat::eye(3,3,CV_64FC1);
transformation.at<double>(0,0) = 2.0;
cv::warpPerspective(input,distortedImage,transformation,input.size());
cv::imshow("distortedImage", distortedImage);
//cv::imwrite("../Data/LenaFakeDistorted.png", distortedImage);
// now fake a calibration map corresponding to my faked distortion:
const unsigned int cmWidth = 64;
const unsigned int cmHeight = 64;
// compute the calibration map by transforming image locations to values between 0 and 1 for legal positions.
float calibMap[cmWidth*cmHeight*2];
for(unsigned int y = 0; y < cmHeight; ++y)
for(unsigned int x = 0; x < cmWidth; ++x)
{
float xx = (float)x/(float)cmWidth;
xx = xx*2.0f; // this if from my fake distortion... this gives some values bigger than 1
float yy = (float)y/(float)cmHeight;
calibMap[y*cmWidth*2+ 2*x] = xx;
calibMap[y*cmWidth*2+ 2*x+1] = yy;
}
// NOW you have the initial situation of your scenario: calibration map and distorted image...
// compute the image locations of calibration map values:
cv::Mat cMapMatX = cv::Mat(cmHeight, cmWidth, CV_32FC1);
cv::Mat cMapMatY = cv::Mat(cmHeight, cmWidth, CV_32FC1);
for(int j=0; j<cmHeight; ++j)
for(int i=0; i<cmWidth; ++i)
{
cMapMatX.at<float>(j,i) = calibMap[j*cmWidth*2 +2*i];
cMapMatY.at<float>(j,i) = calibMap[j*cmWidth*2 +2*i+1];
}
//cv::imshow("mapX",cMapMatX);
//cv::imshow("mapY",cMapMatY);
// interpolate those values for each of your original images pixel:
// here I use linear interpolation, you could use cubic or other interpolation too.
cv::resize(cMapMatX, cMapMatX, distortedImage.size(), 0,0, CV_INTER_LINEAR);
cv::resize(cMapMatY, cMapMatY, distortedImage.size(), 0,0, CV_INTER_LINEAR);
// now the calibration map has the size of your original image, but its values are still between 0 and 1 (for legal positions)
// so scale to image size:
cMapMatX = distortedImage.cols * cMapMatX;
cMapMatY = distortedImage.rows * cMapMatY;
// now create undistorted image:
cv::Mat undistortedImage = cv::Mat(distortedImage.rows, distortedImage.cols, CV_8UC3);
undistortedImage.setTo(cv::Vec3b(0,0,0)); // initialize black
//cv::imshow("undistorted", undistortedImage);
for(int j=0; j<undistortedImage.rows; ++j)
for(int i=0; i<undistortedImage.cols; ++i)
{
cv::Point undistPosition;
undistPosition.x =(cMapMatX.at<float>(j,i)); // this will round the position, maybe you want interpolation instead
undistPosition.y =(cMapMatY.at<float>(j,i));
if(undistPosition.x >= 0 && undistPosition.x < distortedImage.cols
&& undistPosition.y >= 0 && undistPosition.y < distortedImage.rows)
{
undistortedImage.at<cv::Vec3b>(j,i) = distortedImage.at<cv::Vec3b>(undistPosition);
}
}
cv::imshow("undistorted", undistortedImage);
cv::waitKey(0);
//cv::imwrite("../Data/LenaFakeUndistorted.png", undistortedImage);
}
cv::Mat SelfDescriptorDistances(cv::Mat descr)
{
cv::Mat selfDistances = cv::Mat::zeros(descr.rows,descr.rows, CV_64FC1);
for(int keyptNr = 0; keyptNr < descr.rows; ++keyptNr)
{
for(int keyptNr2 = 0; keyptNr2 < descr.rows; ++keyptNr2)
{
double euclideanDistance = 0;
for(int descrDim = 0; descrDim < descr.cols; ++descrDim)
{
double tmp = descr.at<float>(keyptNr,descrDim) - descr.at<float>(keyptNr2, descrDim);
euclideanDistance += tmp*tmp;
}
euclideanDistance = sqrt(euclideanDistance);
selfDistances.at<double>(keyptNr, keyptNr2) = euclideanDistance;
}
}
return selfDistances;
}
I use this as input and fake a remap/distortion from which I compute my calib mat:
input:
faked distortion:
used the map to undistort the image:
TODO: after those computatons use a opencv map with those values to perform faster remapping.
Here's an example on how to do it without using OpenCV. The following seems to be faster than using the Leap::Image::warp() method (probably due to the additional function call overhead when using warp()):
float destinationWidth = 320;
float destinationHeight = 120;
unsigned char destination[(int)destinationWidth][(int)destinationHeight];
//define needed variables outside the inner loop
float calX, calY, weightX, weightY, dX1, dX2, dX3, dX4, dY1, dY2, dY3, dY4, dX, dY;
int x1, x2, y1, y2, denormalizedX, denormalizedY;
int x, y;
const unsigned char* raw = image.data();
const float* distortion_buffer = image.distortion();
//Local variables for values needed in loop
const int distortionWidth = image.distortionWidth();
const int width = image.width();
const int height = image.height();
for (x = 0; x < destinationWidth; x++) {
for (y = 0; y < destinationHeight; y++) {
//Calculate the position in the calibration map (still with a fractional part)
calX = 63 * x/destinationWidth;
calY = 63 * y/destinationHeight;
//Save the fractional part to use as the weight for interpolation
weightX = calX - truncf(calX);
weightY = calY - truncf(calY);
//Get the x,y coordinates of the closest calibration map points to the target pixel
x1 = calX; //Note truncation to int
y1 = calY;
x2 = x1 + 1;
y2 = y1 + 1;
//Look up the x and y values for the 4 calibration map points around the target
// (x1, y1) .. .. .. (x2, y1)
// .. ..
// .. (x, y) ..
// .. ..
// (x1, y2) .. .. .. (x2, y2)
dX1 = distortion_buffer[x1 * 2 + y1 * distortionWidth];
dX2 = distortion_buffer[x2 * 2 + y1 * distortionWidth];
dX3 = distortion_buffer[x1 * 2 + y2 * distortionWidth];
dX4 = distortion_buffer[x2 * 2 + y2 * distortionWidth];
dY1 = distortion_buffer[x1 * 2 + y1 * distortionWidth + 1];
dY2 = distortion_buffer[x2 * 2 + y1 * distortionWidth + 1];
dY3 = distortion_buffer[x1 * 2 + y2 * distortionWidth + 1];
dY4 = distortion_buffer[x2 * 2 + y2 * distortionWidth + 1];
//Bilinear interpolation of the looked-up values:
// X value
dX = dX1 * (1 - weightX) * (1- weightY) + dX2 * weightX * (1 - weightY) + dX3 * (1 - weightX) * weightY + dX4 * weightX * weightY;
// Y value
dY = dY1 * (1 - weightX) * (1- weightY) + dY2 * weightX * (1 - weightY) + dY3 * (1 - weightX) * weightY + dY4 * weightX * weightY;
// Reject points outside the range [0..1]
if((dX >= 0) && (dX <= 1) && (dY >= 0) && (dY <= 1)) {
//Denormalize from [0..1] to [0..width] or [0..height]
denormalizedX = dX * width;
denormalizedY = dY * height;
//look up the brightness value for the target pixel
destination[x][y] = raw[denormalizedX + denormalizedY * width];
} else {
destination[x][y] = -1;
}
}
}
I'm using the Hough transform in OpenCV to detect lines. However, I know in advance that I only need lines within a very limited range of angles (about 10 degrees or so). I'm doing this in a very performance sensitive setting, so I'd like to avoid the extra work spent detecting lines at other angles, lines I know in advance I don't care about.
I could extract the Hough source from OpenCV and just hack it to take min_rho and max_rho parameters, but I'd like a less fragile approach (have to manually update my code w/ each OpenCV update, etc.).
What's the best approach here?
Well, i've modified the icvHoughlines function to go for a certain range of angles. I'm sure there's cleaner ways that plays with memory allocation as well, but I got a speed gain going from 100ms to 33ms for a range of angle going from 180deg to 60deg, so i'm happy with that.
Note that this code also outputs the accumulator value. Also, I only output 1 line because that fit my purposes but there was no gain really there.
static void
icvHoughLinesStandard2( const CvMat* img, float rho, float theta,
int threshold, CvSeq *lines, int linesMax )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
float ang;
int r, n;
int i, j;
float irho = 1 / rho;
double scale;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
// find n and ang limits (in our case we want 60 to 120
float limit_min = 60.0/180.0*PI;
float limit_max = 120.0/180.0*PI;
//num_steps = (limit_max - limit_min)/theta;
int start_n = floor(limit_min/theta);
int stop_n = floor(limit_max/theta);
for( ang = limit_min, n = start_n; n < stop_n; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
//
for( n = start_n; n < stop_n; n++ )
{
r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
accum[(n+1) * (numrho+2) + r+1]++;
}
}
int max_accum = 0;
int max_ind = 0;
for( r = 0; r < numrho; r++ )
{
for( n = start_n; n < stop_n; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if (accum[base] > max_accum)
{
max_accum = accum[base];
max_ind = base;
}
}
}
CvLinePolar2 line;
scale = 1./(numrho+2);
int idx = max_ind;
n = cvFloor(idx*scale) - 1;
r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.votes = accum[idx];
cvSeqPush( lines, &line );
}
If you use the Probabilistic Hough transform then the output is in the form of a cvPoint each for lines[0] and lines[1] parameters. We can get x and y co-ordinated for each of the two points by pt1.x, pt1.y and pt2.x and pt2.y.
Then use the simple formula for finding slope of a line - (y2-y1)/(x2-x1). Taking arctan (tan inverse) of that will yield that angle in radians. Then simply filter out desired angles from the values for each hough line obtained.
I think it's more natural to use standart HoughLines(...) function, which gives collection of lines directly in rho and theta terms and select nessessary angle range from it, rather than recalculate angle from segment end points.