Is it possible to get the accumulator value along with rho and theta from a Hough transform?
I ask because I'd like to differentiate between lines which are "well defined" (ie, which have a high accumulator value) and lines which aren't as well defined.
Thanks!
Ok, so looking at the cvhough.cpp file, the structure CvLinePolar is only defined by rho and angle.
This is all that is passed back as a result of our call to HoughLines. I am in the process of modifying the c++ file and see if i can get the votes out.
Update oct 26: just realized these are not really answers but more like questions. apparently frowned upon. I found some instructions on recompiling OpenCV. I guess we'll have to go in the code and modify it and recompile.
How to install OpenCV 2.0 on win32
update Oct 27: well, i failed at compiling the dlls for OpenCV with my new code so I ended up copy-pasting the specific parts I want to modify into my own files.
I like to add new functions so to avoid overloading the already defined functions.
There are 4 main things you need to copy over:
1- some random defines
#define hough_cmp_gt(l1,l2) (aux[l1] > aux[l2])
static CV_IMPLEMENT_QSORT_EX( icvHoughSortDescent32s, int, hough_cmp_gt, const int* )
2- redefining the struct for line parameters
typedef struct CvLinePolar2
{
float rho;
float angle;
float votes;
}
CvLinePolar2;
3- the main function that was modified
static void
icvHoughLinesStandard2( const CvMat* img, float rho, float theta,
int threshold, CvSeq *lines, int linesMax )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
float ang;
int r, n;
int i, j;
float irho = 1 / rho;
double scale;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
for( ang = 0, n = 0; n < numangle; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
for( n = 0; n < numangle; n++ )
{
r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
accum[(n+1) * (numrho+2) + r+1]++;
}
}
// stage 2. find local maximums
for( r = 0; r < numrho; r++ )
for( n = 0; n < numangle; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if( accum[base] > threshold &&
accum[base] > accum[base - 1] && accum[base] >= accum[base + 1] &&
accum[base] > accum[base - numrho - 2] && accum[base] >= accum[base + numrho + 2] )
sort_buf[total++] = base;
}
// stage 3. sort the detected lines by accumulator value
icvHoughSortDescent32s( sort_buf, total, accum );
// stage 4. store the first min(total,linesMax) lines to the output buffer
linesMax = MIN(linesMax, total);
scale = 1./(numrho+2);
for( i = 0; i < linesMax; i++ )
{
CvLinePolar2 line;
int idx = sort_buf[i];
int n = cvFloor(idx*scale) - 1;
int r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.votes = accum[idx];
cvSeqPush( lines, &line );
}
cvFree( (void**)&sort_buf );
cvFree( (void**)&accum );
cvFree( (void**)&tabSin );
cvFree( (void**)&tabCos);
}
4- the function that calls that function
CV_IMPL CvSeq*
cvHoughLines3( CvArr* src_image, void* lineStorage, int method,
double rho, double theta, int threshold,
double param1, double param2 )
{
CvSeq* result = 0;
CvMat stub, *img = (CvMat*)src_image;
CvMat* mat = 0;
CvSeq* lines = 0;
CvSeq lines_header;
CvSeqBlock lines_block;
int lineType, elemSize;
int linesMax = INT_MAX;
int iparam1, iparam2;
img = cvGetMat( img, &stub );
if( !CV_IS_MASK_ARR(img))
CV_Error( CV_StsBadArg, "The source image must be 8-bit, single-channel" );
if( !lineStorage )
CV_Error( CV_StsNullPtr, "NULL destination" );
if( rho <= 0 || theta <= 0 || threshold <= 0 )
CV_Error( CV_StsOutOfRange, "rho, theta and threshold must be positive" );
if( method != CV_HOUGH_PROBABILISTIC )
{
lineType = CV_32FC3;
elemSize = sizeof(float)*3;
}
else
{
lineType = CV_32SC4;
elemSize = sizeof(int)*4;
}
if( CV_IS_STORAGE( lineStorage ))
{
lines = cvCreateSeq( lineType, sizeof(CvSeq), elemSize, (CvMemStorage*)lineStorage );
}
else if( CV_IS_MAT( lineStorage ))
{
mat = (CvMat*)lineStorage;
if( !CV_IS_MAT_CONT( mat->type ) || (mat->rows != 1 && mat->cols != 1) )
CV_Error( CV_StsBadArg,
"The destination matrix should be continuous and have a single row or a single column" );
if( CV_MAT_TYPE( mat->type ) != lineType )
CV_Error( CV_StsBadArg,
"The destination matrix data type is inappropriate, see the manual" );
lines = cvMakeSeqHeaderForArray( lineType, sizeof(CvSeq), elemSize, mat->data.ptr,
mat->rows + mat->cols - 1, &lines_header, &lines_block );
linesMax = lines->total;
cvClearSeq( lines );
}
else
CV_Error( CV_StsBadArg, "Destination is not CvMemStorage* nor CvMat*" );
iparam1 = cvRound(param1);
iparam2 = cvRound(param2);
switch( method )
{
case CV_HOUGH_STANDARD:
icvHoughLinesStandard2( img, (float)rho,
(float)theta, threshold, lines, linesMax );
break;
default:
CV_Error( CV_StsBadArg, "Unrecognized method id" );
}
if( mat )
{
if( mat->cols > mat->rows )
mat->cols = lines->total;
else
mat->rows = lines->total;
}
else
result = lines;
return result;
}
And i guess you could uninstall opencv so it takes off all those automatic path setting and recompile it yourself using the CMake method and then the OpenCV is really whatever you make it.
Although this is an old question, I had the same problem, so I might as well put up my solution. The threshold in houghlines() returns 1 for any point that cleared the threshold for votes. The solution is to run houghlines() for every threshold value (until there are no more votes) and add up the votes in another array. In python (maybe with other languages too) when you have no more votes, it throws an error, so use try/except.
Here is an example in python. The array I used was for rho values of -199 to 200 with a max vote of less than 100. You can play around with those constants to suit your needs. You may need to add a line to convert the source image to grayscale.
import matplotlib.pyplot as plt
import cv2
import math
############ make houghspace array ############
houghspace = []
c = 0
height = 400
while c <= height:
houghspace.append([])
cc = 0
while cc <= 180:
houghspace[c].append(0)
cc += 1
c+=1
############ do transform ############
degree_tick = 1 #by how many degrees to check
total_votes = 1 #votes counter
highest_vote = 0 #highest vote in the array
while total_votes < 100:
img = cv2.imread('source.pgm')
edges = cv2.Canny(img,50,150,apertureSize = 3)
lines = cv2.HoughLines(edges,1,math.pi*degree_tick/180,total_votes)
try:
for rho,theta in lines[0]:
a = math.cos(theta)
b = math.sin(theta)
x1 = int((a*rho) + 1000*(-b))
y1 = int((b*rho) + 1000*(a))
x2 = int((a*rho) - 1000*(-b))
y2 = int((b*rho) - 1000*(a))
cv2.line(img,(x1,y1),(x2,y2),(50,200,255),2)
#################add votes into the array################
deradian = 180/math.pi #used to convert to degrees
for rho,theta in lines[0]:
degree = int(round(theta*deradian))
rho_pos = int(rho - 200)
houghspace[rho_pos][degree] += 1
#when lines[0] has no votes, it throws an error which is caught here
except:
total_votes = 999 #exit loop
highest_vote = total_votes
total_votes += 1
del lines
########### loop finished ###############################
print highest_vote
#############################################################
################### plot the houghspace ###################
maxy = 200 #used to offset the y-axis
miny = -200 #used to offset the y-axis
#the main graph
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
ax.set_title('Houghspace')
plt.imshow(houghspace, cmap='gist_stern')
ax.set_aspect('equal')
plt.yticks([0,-miny,maxy-miny], [miny,0,maxy])
#the legend
cax = fig.add_axes([0, 0.1, 0.78, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
cax.patch.set_alpha(0)
cax.set_frame_on(False)
plt.colorbar(orientation='vertical')
#plot
plt.show()
Related
Can anyone help me with this problem, how to do flipping of an image without using the inbuilt flipping function i.e. flip(src image, destination image , 1 or 0) in C++ using OpenCV. I am new to this software so please help.
OpenCV's flip function uses internal flipHoriz or flipVert functions.
static void
flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz )
{
int i, j, limit = (int)(((size.width + 1)/2)*esz);
AutoBuffer<int> _tab(size.width*esz);
int* tab = _tab;
for( i = 0; i < size.width; i++ )
for( size_t k = 0; k < esz; k++ )
tab[i*esz + k] = (int)((size.width - i - 1)*esz + k);
for( ; size.height--; src += sstep, dst += dstep )
{
for( i = 0; i < limit; i++ )
{
j = tab[i];
uchar t0 = src[i], t1 = src[j];
dst[i] = t1; dst[j] = t0;
}
}
}
static void
flipVert( const uchar* src0, size_t sstep, uchar* dst0, size_t dstep, Size size, size_t esz )
{
const uchar* src1 = src0 + (size.height - 1)*sstep;
uchar* dst1 = dst0 + (size.height - 1)*dstep;
size.width *= (int)esz;
for( int y = 0; y < (size.height + 1)/2; y++, src0 += sstep, src1 -= sstep,
dst0 += dstep, dst1 -= dstep )
{
int i = 0;
if( ((size_t)src0|(size_t)dst0|(size_t)src1|(size_t)dst1) % sizeof(int) == 0 )
{
for( ; i <= size.width - 16; i += 16 )
{
int t0 = ((int*)(src0 + i))[0];
int t1 = ((int*)(src1 + i))[0];
((int*)(dst0 + i))[0] = t1;
((int*)(dst1 + i))[0] = t0;
t0 = ((int*)(src0 + i))[1];
t1 = ((int*)(src1 + i))[1];
((int*)(dst0 + i))[1] = t1;
((int*)(dst1 + i))[1] = t0;
t0 = ((int*)(src0 + i))[2];
t1 = ((int*)(src1 + i))[2];
((int*)(dst0 + i))[2] = t1;
((int*)(dst1 + i))[2] = t0;
t0 = ((int*)(src0 + i))[3];
t1 = ((int*)(src1 + i))[3];
((int*)(dst0 + i))[3] = t1;
((int*)(dst1 + i))[3] = t0;
}
for( ; i <= size.width - 4; i += 4 )
{
int t0 = ((int*)(src0 + i))[0];
int t1 = ((int*)(src1 + i))[0];
((int*)(dst0 + i))[0] = t1;
((int*)(dst1 + i))[0] = t0;
}
}
for( ; i < size.width; i++ )
{
uchar t0 = src0[i];
uchar t1 = src1[i];
dst0[i] = t1;
dst1[i] = t0;
}
}
}
// you can use it with a small modification as below
void myflip( InputArray _src, OutputArray _dst, int flip_mode )
{
CV_Assert( _src.dims() <= 2 );
Size size = _src.size();
if (flip_mode < 0)
{
if (size.width == 1)
flip_mode = 0;
if (size.height == 1)
flip_mode = 1;
}
if ((size.width == 1 && flip_mode > 0) ||
(size.height == 1 && flip_mode == 0) ||
(size.height == 1 && size.width == 1 && flip_mode < 0))
{
return _src.copyTo(_dst);
}
Mat src = _src.getMat();
int type = src.type();
_dst.create( size, type );
Mat dst = _dst.getMat();
size_t esz = CV_ELEM_SIZE(type);
if( flip_mode <= 0 )
flipVert( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
else
flipHoriz( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
if( flip_mode < 0 )
flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
}
Assuming you have a good reason not to use OpenCV flip function, you can write your custom one.
For this example, I'll use CV_8UC3 images. I'll point out at the end how to expand this to different formats.
Let's see first how to flip an image x axis, which corresponds to cv::flip(src, dst, 1). Given an src image, the dst image will have the same y coordinate, and x coordinate as src.cols - 1 - x coordinates. In practice:
void flip_lr(const Mat3b& src, Mat3b& dst)
{
Mat3b _dst(src.rows, src.cols);
for (int r = 0; r < _dst.rows; ++r) {
for (int c = 0; c < _dst.cols; ++c) {
_dst(r, c) = src(r, src.cols - 1 - c);
}
}
dst = _dst;
}
Then, to flip around y axis (corresponding to cv::flip(src, dst, 0)), dst will have the same x coordinate, and y as src.rows - 1 - y. However, you can reuse the above-mentioned function, simply transposing the dst matrix, apply flip on x axis, and then transpose back. In practice:
dst = src.t();
flip_lr(dst, dst);
dst = dst.t();
Then, to flip both axis, corresponding to cv::flip(src, dst, -1), you need simply to combine the flip on x and y axis:
flip_lr(src, dst);
dst = dst.t();
flip_lr(dst, dst);
dst = dst.t();
You can wrap this functionality in a custom flip function that takes the same parameters as cv::flip:
void custom_flip(const Mat3b& src, Mat3b& dst, int code)
{
if (code > 0)
{ // Flip x axis
flip_lr(src, dst);
}
else if (code == 0)
{
// Flip y axis
dst = src.t();
flip_lr(dst, dst);
dst = dst.t();
}
else // code < 0
{
// Flip x and y axis
flip_lr(src, dst);
dst = dst.t();
flip_lr(dst, dst);
dst = dst.t();
}
}
Note that you can adapt this to different format simply modifing the flip_lr function, and taking care to call the appropriate version inside custom_flip, that will now accept Mat instead of Mat3b.
Full code for reference:
void flip_lr(const Mat3b& src, Mat3b& dst)
{
Mat3b _dst(src.rows, src.cols);
for (int r = 0; r < _dst.rows; ++r) {
for (int c = 0; c < _dst.cols; ++c) {
_dst(r, c) = src(r, src.cols - 1 - c);
}
}
dst = _dst;
}
void custom_flip(const Mat3b& src, Mat3b& dst, int code)
{
if (code > 0)
{ // Flip x axis
flip_lr(src, dst);
}
else if (code == 0)
{
// Flip y axis
dst = src.t();
flip_lr(dst, dst);
dst = dst.t();
}
else // code < 0
{
// Flip x and y axis
flip_lr(src, dst);
dst = dst.t();
flip_lr(dst, dst);
dst = dst.t();
}
}
int main(void)
{
Mat3b img = imread("path_to_image");
Mat3b flipped;
flip(img, flipped, -1);
Mat3b custom;
custom_flip(img, custom, -1);
imshow("OpenCV flip", flipped);
imshow("Custom flip", custom);
waitKey();
return 0;
}
I want to extract the red ball from one picture and get the detected ellipse matrix in picture.
Here is my example:
I threshold the picture, find the contour of red ball by using findContour() function and use fitEllipse() to fit an ellipse.
But what I want is to get coefficient of this ellipse. Because the fitEllipse() return a rotation rectangle (RotatedRect), so I need to re-write this function.
One Ellipse can be expressed as Ax^2 + By^2 + Cxy + Dx + Ey + F = 0; So I want to get u=(A,B,C,D,E,F) or u=(A,B,C,D,E) if F is 1 (to construct an ellipse matrix).
I read the source code of fitEllipse(), there are totally three SVD process, I think I can get the above coefficients from the results of those three SVD process. But I am quite confused what does each result (variable cv::Mat x) of each SVD process represent and why there are three SVD here?
Here is this function:
cv::RotatedRect cv::fitEllipse( InputArray _points )
{
Mat points = _points.getMat();
int i, n = points.checkVector(2);
int depth = points.depth();
CV_Assert( n >= 0 && (depth == CV_32F || depth == CV_32S));
RotatedRect box;
if( n < 5 )
CV_Error( CV_StsBadSize, "There should be at least 5 points to fit the ellipse" );
// New fitellipse algorithm, contributed by Dr. Daniel Weiss
Point2f c(0,0);
double gfp[5], rp[5], t;
const double min_eps = 1e-8;
bool is_float = depth == CV_32F;
const Point* ptsi = points.ptr<Point>();
const Point2f* ptsf = points.ptr<Point2f>();
AutoBuffer<double> _Ad(n*5), _bd(n);
double *Ad = _Ad, *bd = _bd;
// first fit for parameters A - E
Mat A( n, 5, CV_64F, Ad );
Mat b( n, 1, CV_64F, bd );
Mat x( 5, 1, CV_64F, gfp );
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
c += p;
}
c.x /= n;
c.y /= n;
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
bd[i] = 10000.0; // 1.0?
Ad[i*5] = -(double)p.x * p.x; // A - C signs inverted as proposed by APP
Ad[i*5 + 1] = -(double)p.y * p.y;
Ad[i*5 + 2] = -(double)p.x * p.y;
Ad[i*5 + 3] = p.x;
Ad[i*5 + 4] = p.y;
}
solve(A, b, x, DECOMP_SVD);
// now use general-form parameters A - E to find the ellipse center:
// differentiate general form wrt x/y to get two equations for cx and cy
A = Mat( 2, 2, CV_64F, Ad );
b = Mat( 2, 1, CV_64F, bd );
x = Mat( 2, 1, CV_64F, rp );
Ad[0] = 2 * gfp[0];
Ad[1] = Ad[2] = gfp[2];
Ad[3] = 2 * gfp[1];
bd[0] = gfp[3];
bd[1] = gfp[4];
solve( A, b, x, DECOMP_SVD );
// re-fit for parameters A - C with those center coordinates
A = Mat( n, 3, CV_64F, Ad );
b = Mat( n, 1, CV_64F, bd );
x = Mat( 3, 1, CV_64F, gfp );
for( i = 0; i < n; i++ )
{
Point2f p = is_float ? ptsf[i] : Point2f((float)ptsi[i].x, (float)ptsi[i].y);
p -= c;
bd[i] = 1.0;
Ad[i * 3] = (p.x - rp[0]) * (p.x - rp[0]);
Ad[i * 3 + 1] = (p.y - rp[1]) * (p.y - rp[1]);
Ad[i * 3 + 2] = (p.x - rp[0]) * (p.y - rp[1]);
}
solve(A, b, x, DECOMP_SVD);
// store angle and radii
rp[4] = -0.5 * atan2(gfp[2], gfp[1] - gfp[0]); // convert from APP angle usage
if( fabs(gfp[2]) > min_eps )
t = gfp[2]/sin(-2.0 * rp[4]);
else // ellipse is rotated by an integer multiple of pi/2
t = gfp[1] - gfp[0];
rp[2] = fabs(gfp[0] + gfp[1] - t);
if( rp[2] > min_eps )
rp[2] = std::sqrt(2.0 / rp[2]);
rp[3] = fabs(gfp[0] + gfp[1] + t);
if( rp[3] > min_eps )
rp[3] = std::sqrt(2.0 / rp[3]);
box.center.x = (float)rp[0] + c.x;
box.center.y = (float)rp[1] + c.y;
box.size.width = (float)(rp[2]*2);
box.size.height = (float)(rp[3]*2);
if( box.size.width > box.size.height )
{
float tmp;
CV_SWAP( box.size.width, box.size.height, tmp );
box.angle = (float)(90 + rp[4]*180/CV_PI);
}
if( box.angle < -180 )
box.angle += 360;
if( box.angle > 360 )
box.angle -= 360;
return box;
}
The source code link: https://github.com/Itseez/opencv/blob/master/modules/imgproc/src/shapedescr.cpp
The function fitEllipse returns a RotatedRect that contains all the parameters of the ellipse.
An ellipse is defined by 5 parameters:
xc : x coordinate of the center
yc : y coordinate of the center
a : major semi-axis
b : minor semi-axis
theta : rotation angle
You can obtain these parameters like:
RotatedRect e = fitEllipse(points);
float xc = e.center.x;
float yc = e.center.y;
float a = e.size.width / 2; // width >= height
float b = e.size.height / 2;
float theta = e.angle; // in degrees
You can draw an ellipse with the function ellipse using the RotatedRect:
ellipse(image, e, Scalar(0,255,0));
or, equivalently using the ellipse parameters:
ellipse(res, Point(xc, yc), Size(a, b), theta, 0.0, 360.0, Scalar(0,255,0));
If you need the values of the coefficients of the implicit equation, you can do like (from Wikipedia):
So, you can get the parameters you need from the RotatedRect, and you don't need to change the function fitEllipse.
The solve function is used to solve linear systems or least-squares problems. Using the SVD decomposition method the system can be over-defined and/or the matrix src1 can be singular.
For more details on the algorithm, you can see the paper of Fitzgibbon that proposed this fit ellipse method.
Here is some code that worked for me which I based on the other responses on this thread.
def getConicCoeffFromEllipse(e):
# ellipse(Point(xc, yc),Size(a, b), theta)
xc = e[0][0]
yc = e[0][1]
a = e[1][0]/2
b = e[1][1]/2
theta = math.radians(e[2])
# See https://en.wikipedia.org/wiki/Ellipse
# Ax^2 + Bxy + Cy^2 + Dx + Ey + F = 0 is the equation
A = a*a*math.pow(math.sin(theta),2) + b*b*math.pow(math.cos(theta),2)
B = 2*(b*b - a*a)*math.sin(theta)*math.cos(theta)
C = a*a*math.pow(math.cos(theta),2) + b*b*math.pow(math.sin(theta),2)
D = -2*A*xc - B*yc
E = -B*xc - 2*C*yc
F = A*xc*xc + B*xc*yc + C*yc*yc - a*a*b*b
coef = np.array([A,B,C,D,E,F]) / F
return coef
def getConicMatrixFromCoeff(c):
C = np.array([[c[0], c[1]/2, c[3]/2], # [ a, b/2, d/2 ]
[c[1]/2, c[2], c[4]/2], # [b/2, c, e/2 ]
[c[3]/2, c[4]/2, c[5]]]) # [d/2], e/2, f ]
return C
I would like to convert pixel coordinates to meter coordinates. My camera has radial distortion, and I already have camera parameters, I use cvUndistort function of Opencv to get Undistort image, then use these 2 formulas:
(u0, v0 are principal points coordinates(in pixel). px, py are focal lengths(in pixel))
x = (u-u0)/px
y = (v-v0)/py
I finally want to calculate moments in metric coordinates.
my code:
CvMat* camera_matrix;
CvMat* distortion_coeffs;
camera_matrix = cvCreateMat(3, 3, CV_32FC1 );
distortion_coeffs = cvCreateMat(1, 5, CV_32FC1 );
double k1 = -0.33060;
double k2 = 0.11170;
double k3 = 0;
double k4 = 0;
double k5 = 0;
double fx = 195.0507;
double fy = 195.0992;
double cx = 162.4085;
double cy = 127.1973;
CV_MAT_ELEM(*(camera_matrix),float,0, 0 ) = (float)fx;
CV_MAT_ELEM(*(camera_matrix),float,0, 1 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,0, 2 ) = (float)cx;
CV_MAT_ELEM(*(camera_matrix),float,1, 0 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,1, 1 ) = (float)fy;
CV_MAT_ELEM(*(camera_matrix),float,1, 2 ) = (float)cy;
CV_MAT_ELEM(*(camera_matrix),float,2, 0 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,2, 1 ) = 0;
CV_MAT_ELEM(*(camera_matrix),float,2, 2 ) = 1;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 0 ) = (float)k1;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 1 ) = (float)k2;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 2 ) = (float)k3;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 3 ) = (float)k4;
CV_MAT_ELEM(*(distortion_coeffs),float,0, 4 ) = (float)k5;
cvUndistort2( image, Undistort_img, camera_matrix,distortion_coeffs );
//**********************************************************************
//Threshold image
//**********************************************************************
cvSmooth( Undistort_img, smoothed_image,CV_BLUR,3,3,0,0);
for(int i = 0; i < smoothed_image->height; i++)
{
for(int j = 0; j < smoothed_image->width; j++)
{
s = cvGet2D(smoothed_image,i,j);
if (((float)s.val[0]) >= 210)
cvSet2D(tr_img, i, j, white_value);
else
cvSet2D(tr_img, i, j, black_value);
}
}
//*************************************************************
//moment calculation in metric coordinates
//*************************************************************
for(int i = 0;i < tr_img->height; i++)
{
for(int j = 0; j < tr_img->width; j++)
{
if(CV_MAT_ELEM(*(tr_img),float,i,j) != 0)
{
x = (i-u0)/px;
y = (j-v0)/py;
kern1[0][0] = 1 + kern1[0][0];
kern1[1][0] = x * 1 + kern1[1][0];
kern1[0][1] = y * 1 + kern1[0][1];
kern1[2][0] = x * x * 1 + kern1[2][0];
kern1[0][2] = y * y * 1 + kern1[0][2];
kern1[1][1] = x * y * 1 + kern1[1][1];
}
}
}
moments->m00 = (float)kern1[0][0];
moments->m10 = (float)kern1[0][1];
moments->m01 = (float)kern1[1][0];
moments->m11 = (float)kern1[1][1];
moments->m20 = (float)kern1[0][2];
moments->m02 = (float)kern1[2][0];
as we know, normalizing centers of mass are calculated based on:(Z_star and m00_star are the values in initial point):
xn = xg * an = (m10/m00)* Z_star*sqrt(m00_star/m00)
yn = yg * an = (m01/m00)* Z_star*sqrt(m00_star/m00)
these two values should not change when camera just has transmission along optical axis(z axis). but my values change considerably, would you please help me?
I am trying to carry out multi-thresholding with otsu. The method I am using currently is actually via maximising the between class variance, I have managed to get the same threshold value given as that by the OpenCV library. However, that is just via running otsu method once.
Documentation on how to do multi-level thresholding or rather recursive thresholding is rather limited. Where do I do after obtaining the original otsu's value? Would appreciate some hints, I been playing around with the code, adding one external for loop, but the next value calculated is always 254 for any given image:(
My code if need be:
//compute histogram first
cv::Mat imageh; //image edited to grayscale for histogram purpose
//imageh=image; //to delete and uncomment below;
cv::cvtColor(image, imageh, CV_BGR2GRAY);
int histSize[1] = {256}; // number of bins
float hranges[2] = {0.0, 256.0}; // min andax pixel value
const float* ranges[1] = {hranges};
int channels[1] = {0}; // only 1 channel used
cv::MatND hist;
// Compute histogram
calcHist(&imageh, 1, channels, cv::Mat(), hist, 1, histSize, ranges);
IplImage* im = new IplImage(imageh);//assign the image to an IplImage pointer
IplImage* finalIm = cvCreateImage(cvSize(im->width, im->height), IPL_DEPTH_8U, 1);
double otsuThreshold= cvThreshold(im, finalIm, 0, 255, cv::THRESH_BINARY | cv::THRESH_OTSU );
cout<<"opencv otsu gives "<<otsuThreshold<<endl;
int totalNumberOfPixels= imageh.total();
cout<<"total number of Pixels is " <<totalNumberOfPixels<< endl;
float sum = 0;
for (int t=0 ; t<256 ; t++)
{
sum += t * hist.at<float>(t);
}
cout<<"sum is "<<sum<<endl;
float sumB = 0; //sum of background
int wB = 0; // weight of background
int wF = 0; //weight of foreground
float varMax = 0;
int threshold = 0;
//run an iteration to find the maximum value of the between class variance(as between class variance shld be maximise)
for (int t=0 ; t<256 ; t++)
{
wB += hist.at<float>(t); // Weight Background
if (wB == 0) continue;
wF = totalNumberOfPixels - wB; // Weight Foreground
if (wF == 0) break;
sumB += (float) (t * hist.at<float>(t));
float mB = sumB / wB; // Mean Background
float mF = (sum - sumB) / wF; // Mean Foreground
// Calculate Between Class Variance
float varBetween = (float)wB * (float)wF * (mB - mF) * (mB - mF);
// Check if new maximum found
if (varBetween > varMax) {
varMax = varBetween;
threshold = t;
}
}
cout<<"threshold value is: "<<threshold;
To extend Otsu's thresholding method to multi-level thresholding the between class variance equation becomes:
Please check out Deng-Yuan Huang, Ta-Wei Lin, Wu-Chih Hu, Automatic
Multilevel Thresholding Based on Two-Stage Otsu's Method with Cluster
Determination by Valley Estimation, Int. Journal of Innovative
Computing, 2011, 7:5631-5644 for more information.
http://www.ijicic.org/ijicic-10-05033.pdf
Here is my C# implementation of Otsu Multi for 2 thresholds:
/* Otsu (1979) - multi */
Tuple < int, int > otsuMulti(object sender, EventArgs e) {
//image histogram
int[] histogram = new int[256];
//total number of pixels
int N = 0;
//accumulate image histogram and total number of pixels
foreach(int intensity in image.Data) {
if (intensity != 0) {
histogram[intensity] += 1;
N++;
}
}
double W0K, W1K, W2K, M0, M1, M2, currVarB, optimalThresh1, optimalThresh2, maxBetweenVar, M0K, M1K, M2K, MT;
optimalThresh1 = 0;
optimalThresh2 = 0;
W0K = 0;
W1K = 0;
M0K = 0;
M1K = 0;
MT = 0;
maxBetweenVar = 0;
for (int k = 0; k <= 255; k++) {
MT += k * (histogram[k] / (double) N);
}
for (int t1 = 0; t1 <= 255; t1++) {
W0K += histogram[t1] / (double) N; //Pi
M0K += t1 * (histogram[t1] / (double) N); //i * Pi
M0 = M0K / W0K; //(i * Pi)/Pi
W1K = 0;
M1K = 0;
for (int t2 = t1 + 1; t2 <= 255; t2++) {
W1K += histogram[t2] / (double) N; //Pi
M1K += t2 * (histogram[t2] / (double) N); //i * Pi
M1 = M1K / W1K; //(i * Pi)/Pi
W2K = 1 - (W0K + W1K);
M2K = MT - (M0K + M1K);
if (W2K <= 0) break;
M2 = M2K / W2K;
currVarB = W0K * (M0 - MT) * (M0 - MT) + W1K * (M1 - MT) * (M1 - MT) + W2K * (M2 - MT) * (M2 - MT);
if (maxBetweenVar < currVarB) {
maxBetweenVar = currVarB;
optimalThresh1 = t1;
optimalThresh2 = t2;
}
}
}
return new Tuple(optimalThresh1, optimalThresh2);
}
And this is the result I got by thresholding an image scan of soil with the above code:
(T1 = 110, T2 = 147).
Otsu's original paper: "Nobuyuki Otsu, A Threshold Selection Method
from Gray-Level Histogram, IEEE Transactions on Systems, Man, and
Cybernetics, 1979, 9:62-66" also briefly mentions the extension to
Multithresholding.
https://engineering.purdue.edu/kak/computervision/ECE661.08/OTSU_paper.pdf
Hope this helps.
Here is a simple general approach for 'n' thresholds in python (>3.0) :
# developed by- SUJOY KUMAR GOSWAMI
# source paper- https://people.ece.cornell.edu/acharya/papers/mlt_thr_img.pdf
import cv2
import numpy as np
import math
img = cv2.imread('path-to-image')
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
a = 0
b = 255
n = 6 # number of thresholds (better choose even value)
k = 0.7 # free variable to take any positive value
T = [] # list which will contain 'n' thresholds
def sujoy(img, a, b):
if a>b:
s=-1
m=-1
return m,s
img = np.array(img)
t1 = (img>=a)
t2 = (img<=b)
X = np.multiply(t1,t2)
Y = np.multiply(img,X)
s = np.sum(X)
m = np.sum(Y)/s
return m,s
for i in range(int(n/2-1)):
img = np.array(img)
t1 = (img>=a)
t2 = (img<=b)
X = np.multiply(t1,t2)
Y = np.multiply(img,X)
mu = np.sum(Y)/np.sum(X)
Z = Y - mu
Z = np.multiply(Z,X)
W = np.multiply(Z,Z)
sigma = math.sqrt(np.sum(W)/np.sum(X))
T1 = mu - k*sigma
T2 = mu + k*sigma
x, y = sujoy(img, a, T1)
w, z = sujoy(img, T2, b)
T.append(x)
T.append(w)
a = T1+1
b = T2-1
k = k*(i+1)
T1 = mu
T2 = mu+1
x, y = sujoy(img, a, T1)
w, z = sujoy(img, T2, b)
T.append(x)
T.append(w)
T.sort()
print(T)
For full paper and more informations visit this link.
I've written an example on how otsu thresholding work in python before. You can see the source code here: https://github.com/subokita/Sandbox/blob/master/otsu.py
In the example there's 2 variants, otsu2() which is the optimised version, as seen on Wikipedia page, and otsu() which is more naive implementation based on the algorithm description itself.
If you are okay in reading python codes (in this case, they are pretty simple, almost pseudo code like), you might want to look at otsu() in the example and modify it. Porting it to C++ code is not hard either.
#Antoni4 gives the best answer in my opinion and it's very straight forward to increase the number of levels.
This is for three-level thresholding:
#include "Shadow01-1.cuh"
void multiThresh(double &optimalThresh1, double &optimalThresh2, double &optimalThresh3, cv::Mat &imgHist, cv::Mat &src)
{
double W0K, W1K, W2K, W3K, M0, M1, M2, M3, currVarB, maxBetweenVar, M0K, M1K, M2K, M3K, MT;
unsigned char *histogram = (unsigned char*)(imgHist.data);
int N = src.rows*src.cols;
W0K = 0;
W1K = 0;
M0K = 0;
M1K = 0;
MT = 0;
maxBetweenVar = 0;
for (int k = 0; k <= 255; k++) {
MT += k * (histogram[k] / (double) N);
}
for (int t1 = 0; t1 <= 255; t1++)
{
W0K += histogram[t1] / (double) N; //Pi
M0K += t1 * (histogram[t1] / (double) N); //i * Pi
M0 = M0K / W0K; //(i * Pi)/Pi
W1K = 0;
M1K = 0;
for (int t2 = t1 + 1; t2 <= 255; t2++)
{
W1K += histogram[t2] / (double) N; //Pi
M1K += t2 * (histogram[t2] / (double) N); //i * Pi
M1 = M1K / W1K; //(i * Pi)/Pi
W2K = 1 - (W0K + W1K);
M2K = MT - (M0K + M1K);
if (W2K <= 0) break;
M2 = M2K / W2K;
W3K = 0;
M3K = 0;
for (int t3 = t2 + 1; t3 <= 255; t3++)
{
W2K += histogram[t3] / (double) N; //Pi
M2K += t3 * (histogram[t3] / (double) N); // i*Pi
M2 = M2K / W2K; //(i*Pi)/Pi
W3K = 1 - (W1K + W2K);
M3K = MT - (M1K + M2K);
M3 = M3K / W3K;
currVarB = W0K * (M0 - MT) * (M0 - MT) + W1K * (M1 - MT) * (M1 - MT) + W2K * (M2 - MT) * (M2 - MT) + W3K * (M3 - MT) * (M3 - MT);
if (maxBetweenVar < currVarB)
{
maxBetweenVar = currVarB;
optimalThresh1 = t1;
optimalThresh2 = t2;
optimalThresh3 = t3;
}
}
}
}
}
#Guilherme Silva
Your code has a BUG
You Must Replace:
W3K = 0;
M3K = 0;
with
W2K = 0;
M2K = 0;
and
W3K = 1 - (W1K + W2K);
M3K = MT - (M1K + M2K);
with
W3K = 1 - (W0K + W1K + W2K);
M3K = MT - (M0K + M1K + M2K);
;-)
Regards
EDIT(1): [Toby Speight]
I discovered this bug by applying the effect to the same picture at different resoultions(Sizes) and seeing that the output results were to much different from each others (Even changing resolution a little bit)
W3K and M3K must be the totals minus the Previous WKs and MKs.
(I thought about this for Code-similarity with the one with one level less)
At the moment due to my lacks of English I cannot explain Better How and Why
To be honest I'm still not 100% sure that this way is correct, even thought from my outputs I could tell that it gives better results. (Even with 1 Level more (5 shades of gray))
You could try yourself ;-)
Sorry
My Outputs:
3 Thresholds
4 Thresholds
I found a useful piece of code in this thread. I was looking for a multi-level Otsu implementation for double/float images. So, I tried to generalize example for N-levels with double/float matrix as input. In my code below I am using armadillo library as dependency. But this code can be easily adapted for standard C++ arrays, just replace vec, uvec objects with single dimensional double and integer arrays, mat and umat with two-dimensional. Two other functions from armadillo used here are: vectorise and hist.
// Input parameters:
// map - input image (double matrix)
// mask - region of interest to be thresholded
// nBins - number of bins
// nLevels - number of Otsu thresholds
#include <armadillo>
#include <algorithm>
#include <vector>
mat OtsuFilterMulti(mat map, int nBins, int nLevels) {
mat mapr; // output thresholded image
mapr = zeros<mat>(map.n_rows, map.n_cols);
unsigned int numElem = 0;
vec threshold = zeros<vec>(nLevels);
vec q = zeros<vec>(nLevels + 1);
vec mu = zeros<vec>(nLevels + 1);
vec muk = zeros<vec>(nLevels + 1);
uvec binv = zeros<uvec>(nLevels);
if (nLevels <= 1) return mapr;
numElem = map.n_rows*map.n_cols;
uvec histogram = hist(vectorise(map), nBins);
double maxval = map.max();
double minval = map.min();
double odelta = (maxval - abs(minval)) / nBins; // distance between histogram bins
vec oval = zeros<vec>(nBins);
double mt = 0, variance = 0.0, bestVariance = 0.0;
for (int ii = 0; ii < nBins; ii++) {
oval(ii) = (double)odelta*ii + (double)odelta*0.5; // centers of histogram bins
mt += (double)ii*((double)histogram(ii)) / (double)numElem;
}
for (int ii = 0; ii < nLevels; ii++) {
binv(ii) = ii;
}
double sq, smuk;
int nComb;
nComb = nCombinations(nBins,nLevels);
std::vector<bool> v(nBins);
std::fill(v.begin(), v.begin() + nLevels, true);
umat ibin = zeros<umat>(nComb, nLevels); // indices from combinations will be stored here
int cc = 0;
int ci = 0;
do {
for (int i = 0; i < nBins; ++i) {
if(ci==nLevels) ci=0;
if (v[i]) {
ibin(cc,ci) = i;
ci++;
}
}
cc++;
} while (std::prev_permutation(v.begin(), v.end()));
uvec lastIndex = zeros<uvec>(nLevels);
// Perform operations on pre-calculated indices
for (int ii = 0; ii < nComb; ii++) {
for (int jj = 0; jj < nLevels; jj++) {
smuk = 0;
sq = 0;
if (lastIndex(jj) != ibin(ii, jj) || ii == 0) {
q(jj) += double(histogram(ibin(ii, jj))) / (double)numElem;
muk(jj) += ibin(ii, jj)*(double(histogram(ibin(ii, jj)))) / (double)numElem;
mu(jj) = muk(jj) / q(jj);
q(jj + 1) = 0.0;
muk(jj + 1) = 0.0;
if (jj>0) {
for (int kk = 0; kk <= jj; kk++) {
sq += q(kk);
smuk += muk(kk);
}
q(jj + 1) = 1 - sq;
muk(jj + 1) = mt - smuk;
mu(jj + 1) = muk(jj + 1) / q(jj + 1);
}
if (jj>0 && jj<(nLevels - 1)) {
q(jj + 1) = 0.0;
muk(jj + 1) = 0.0;
}
lastIndex(jj) = ibin(ii, jj);
}
}
variance = 0.0;
for (int jj = 0; jj <= nLevels; jj++) {
variance += q(jj)*(mu(jj) - mt)*(mu(jj) - mt);
}
if (variance > bestVariance) {
bestVariance = variance;
for (int jj = 0; jj<nLevels; jj++) {
threshold(jj) = oval(ibin(ii, jj));
}
}
}
cout << "Optimized thresholds: ";
for (int jj = 0; jj<nLevels; jj++) {
cout << threshold(jj) << " ";
}
cout << endl;
for (unsigned int jj = 0; jj<map.n_rows; jj++) {
for (unsigned int kk = 0; kk<map.n_cols; kk++) {
for (int ll = 0; ll<nLevels; ll++) {
if (map(jj, kk) >= threshold(ll)) {
mapr(jj, kk) = ll+1;
}
}
}
}
return mapr;
}
int nCombinations(int n, int r) {
if (r>n) return 0;
if (r*2 > n) r = n-r;
if (r == 0) return 1;
int ret = n;
for( int i = 2; i <= r; ++i ) {
ret *= (n-i+1);
ret /= i;
}
return ret;
}
I'm using the Hough transform in OpenCV to detect lines. However, I know in advance that I only need lines within a very limited range of angles (about 10 degrees or so). I'm doing this in a very performance sensitive setting, so I'd like to avoid the extra work spent detecting lines at other angles, lines I know in advance I don't care about.
I could extract the Hough source from OpenCV and just hack it to take min_rho and max_rho parameters, but I'd like a less fragile approach (have to manually update my code w/ each OpenCV update, etc.).
What's the best approach here?
Well, i've modified the icvHoughlines function to go for a certain range of angles. I'm sure there's cleaner ways that plays with memory allocation as well, but I got a speed gain going from 100ms to 33ms for a range of angle going from 180deg to 60deg, so i'm happy with that.
Note that this code also outputs the accumulator value. Also, I only output 1 line because that fit my purposes but there was no gain really there.
static void
icvHoughLinesStandard2( const CvMat* img, float rho, float theta,
int threshold, CvSeq *lines, int linesMax )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
float ang;
int r, n;
int i, j;
float irho = 1 / rho;
double scale;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
// find n and ang limits (in our case we want 60 to 120
float limit_min = 60.0/180.0*PI;
float limit_max = 120.0/180.0*PI;
//num_steps = (limit_max - limit_min)/theta;
int start_n = floor(limit_min/theta);
int stop_n = floor(limit_max/theta);
for( ang = limit_min, n = start_n; n < stop_n; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
//
for( n = start_n; n < stop_n; n++ )
{
r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
accum[(n+1) * (numrho+2) + r+1]++;
}
}
int max_accum = 0;
int max_ind = 0;
for( r = 0; r < numrho; r++ )
{
for( n = start_n; n < stop_n; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if (accum[base] > max_accum)
{
max_accum = accum[base];
max_ind = base;
}
}
}
CvLinePolar2 line;
scale = 1./(numrho+2);
int idx = max_ind;
n = cvFloor(idx*scale) - 1;
r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.votes = accum[idx];
cvSeqPush( lines, &line );
}
If you use the Probabilistic Hough transform then the output is in the form of a cvPoint each for lines[0] and lines[1] parameters. We can get x and y co-ordinated for each of the two points by pt1.x, pt1.y and pt2.x and pt2.y.
Then use the simple formula for finding slope of a line - (y2-y1)/(x2-x1). Taking arctan (tan inverse) of that will yield that angle in radians. Then simply filter out desired angles from the values for each hough line obtained.
I think it's more natural to use standart HoughLines(...) function, which gives collection of lines directly in rho and theta terms and select nessessary angle range from it, rather than recalculate angle from segment end points.