I'm writing a code for the image denoising and came across a strange problem with stripes in the processed images. Basically when I'm calculating X-gradient of image the horizontal stripes appear (or vertical for Y direction) Lena X gradient.
The whole algorithm works OK and it looks like I'm getting the correct answer (I'm comparing with program in C) except those annoying stripes Lena result.
The distance between stripes is changing with different block sizes. I'm also having different stripes positions each time I run the program! Here is the part of the program related to the gradient calculation. I have a feeling that I'm doing something very stupid :) Thank you!
#define BLKXSIZE 16
#define BLKYSIZE 16
#define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) )
void Diff4th_GPU(float* A, float* B, int N, int M, int Z, float sigma, int iter, float tau, int type)
{
float *Ad;
dim3 dimBlock(BLKXSIZE,BLKYSIZE);
dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE));
cudaMalloc((void**)&Ad,N*M*sizeof(float));
cudaMemcpy(Ad,A,N*M*sizeof(float),cudaMemcpyHostToDevice);
cudaCheckErrors("cc1");
int n = 1;
while (n <= iter) {
Diff4th2D<<<dimGrid,dimBlock>>>(Ad, N, M, sigma, iter, tau, type);
n++;
cudaDeviceSynchronize();
cudaCheckErrors("kernel");}
cudaMemcpy(B,Ad,N*M*sizeof(float),cudaMemcpyDeviceToHost);
cudaCheckErrors("cc2");
cudaFree(Ad);
}
__global__ void Diff4th2D(float* A, int N, int M, float sigma, int iter, float tau, int type)
{
float gradX, gradX_sq, gradY, gradY_sq, gradXX, gradYY, gradXY, sq_sum, xy_2, Lam, V_norm, V_orth, c, c_sq, lam_t;
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int index = j + i*N;
if ((i < N) && (j < M))
{
float gradX = 0, gradY = 0, gradXX = 0, gradYY = 0, gradXY = 0;
if ((i>1) && (i<N)) {
if ((j>1) && (j<M)){
int indexN = (j)+(i-1)*(N);
if (indexN > ((N*M)-1)) indexN = (N*M)-1;
if (indexN < 0) indexN = 0;
int indexS = (j)+(i+1)*(N);
if (indexS > ((N*M)-1)) indexS = (N*M)-1;
if (indexS < 0) indexS = 0;
int indexW = (j-1)+(i)*(N);
if (indexW > ((N*M)-1)) indexW = (N*M)-1;
if (indexW < 0) indexW = 0;
int indexE = (j+1)+(i)*(N);
if (indexE > ((N*M)-1)) indexE = (N*M)-1;
if (indexE < 0) indexE = 0;
gradX = 0.5*(A[indexN]-A[indexS]);
A[index] = gradX;
}
}
}
}
You have a race condition inside your kernel, as elements of A may or may not be overwritten before they are used.
Use different arrays for input and output.
Related
I'm trying grayscale contrast stretching with c++ program, so a source I follow for that is here. There is a code I've got so far:
#include<iostream>
#include<opencv2/opencv.hpp>
#include<opencv2/core/core.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
int computeStretched(int x, int l1, int l2, int r1, int r2);
int main() {
Mat img = imread("pict6.jpg"); // Mat type initialization of an original image
Mat grayScaleImg(img.size(), CV_8UC1);; // Gray scale mat initialization
cvtColor(img, grayScaleImg, COLOR_BGR2GRAY); // conversion of image input to grayscaled view
for (int i = 0; i < img.rows; i++) {
for (int j = 0; j < img.cols; j++) {
for (int k = 0; k < 3; k++) {
int output = computeStretched(grayScaleImg.at<Vec3b>(i, j)[k], 70, 0, 200, 255);
stretch_result.at<Vec3b>(i, j) = saturate_cast<uchar>(output);
}
}
}
waitKey(0);
return 0;
}
int computeStretched(int x, int l1, int l2, int r1, int r2) {
float calcVal;
if (0 <= x && x <= l1) {
calcVal = (l2 / l1) * x;
}else if (l1 < x && x <= r1) {
calcVal = ((r2 - l2) / (r1 - l1)) * (x - l1) + l2;
}else if (r1 < x && x <= 255) {
calcVal = ((255 - r2)/(255 - r1)) * (x - r1) + r2;
}
return (int)calcVal;
}
Hovewer, the image I put to processing isn't RGB-converted, but grayscale one. I want to do same operation as in the sample code using plainly grayscale picture too. What must be changed in the listing above to enable that?
You have GrayScale image that means 1 channel.
so modify your code accordingly :
for (int i = 0; i < img.rows; i++) {
for (int j = 0; j < img.cols; j++) {
int output = computeStretched(grayScaleImg.at<uchar>(i, j), 70, 0, 200, 255);
stretch_result.at<uchar>(i, j) = saturate_cast<uchar>(output);
}
}
Initlize "stretch_result
I am trying to understand the LASSO algorithm for linear regression. I have implemented the algorithm using naive coordinate descent method for optimization. However the coefficients that I obtained from my code, wasn't matching with those obtained from the 'glmnet'package for LASSO in R. I wanted to understand how I could make the algorithm more accurate, so that the coefficients match with those obtained from R. I think they use coordinate descent as well.
Note: I have generated some toy data with 11 observations, and 6
features(x,x^2 ,x^3,...,x^6). The last column contains the y values
generated from a dummy function (e^(-x^2)). I wanted to use LASSO to
estimate this function. Also, I have randomly picked the initial
weight vector, multiple times to crosscheck my results.
Here is my code:
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
#include<math.h>
#include<time.h>
int num_dim = 6;
int num_obs = 11;
/*Computes the normalization factor*/
float norm_feature(int j,double arr[][7],int n){
float sum = 0.0;
int i;
for(i=0;i<n;i++){
sum = sum + pow(arr[i][j],2);
}
return sum;
}
/*Computes the partial sum*/
float approx(int dim,int d_ignore,float weights[],double arr[][7],int
i){
int flag = 1;
if(d_ignore == -1)
flag = 0;
int j;
float sum = 0.0;
for(j=0;j<dim;j++){
if(j != d_ignore)
sum = sum + weights[j]*arr[i][j];
else
continue;
}
return sum;
}
/* Computes rho-j */
float rho_j(double arr[][7],int n,int j,float weights[7]){
float sum = 0.0;
int i;
float partial_sum ;
for(i=0;i<n;i++){
partial_sum = approx(num_dim,j,weights,arr,i);
sum = sum + arr[i][j]*(arr[i][num_dim]-partial_sum);
}
return sum;
}
float intercept(float arr1[7],double arr[][7],int dim) {
int i;
float sum =0.0;
for (i = 0; i < num_obs; i++) {
sum = sum + pow((arr[i][num_dim]) - approx(num_dim, -1, arr1, arr,
i), 1);
}
return sum;
}
int main(){
double data[num_obs][7];
int i=0,j=0;
float a = 1.0;
float lambda = 0.1; //Setting lambda
float weights[7]; //weights[6] contains the intercept
srand((unsigned int) time(NULL));
/*Generating the data matrix */
for(i=0;i<11;i++)
data[i][0] = ((float)rand()/(float)(RAND_MAX)) * a;
for(i=0;i<11;i++)
for(j=1;j<6;j++)
data[i][j] = pow(data[i][0],j+1);
for(i=0;i<11;i++)
data[i][6] = exp(-pow(data[i][0],2)); // the last column in the
datamatrix contains the y values generated by the dummy function
/*Printing the data matrix */
printf("Data Matrix:\n");
for(i=0;i<11;i++){
for(j=0;j<7;j++){
printf("%lf ",data[i][j]);}
printf("\n");}
printf("\n");
int seed =0;
while(seed<20) {
//Initializing the weight vector
for (i = 0; i < 7; i++)
weights[i] = ((float) rand() / (float) (RAND_MAX)) * a;
int iter = 500;
int t = 0;
int r, l;
double rho[num_dim];
for (i = 0; i < 6; i++) {
rho[i] = rho_j(data, num_obs, r, weights);
}
// Intercept initialization
weights[num_dim] = intercept(weights,data,num_dim);
printf("Weights initialization: ");
for (i = 0; i < (num_dim+1); i++)
printf("%f ", weights[i]);
printf("\n");
while (t < iter) {
for (r = 0; r < num_dim; r++) {
rho[r] = rho_j(data, num_obs, r, weights);
//printf("rho %d:%f ",r,rho[r]);
if (rho[r] < -lambda / 2)
weights[r] = (rho[r] + lambda / 2) / norm_feature(r,
data, num_obs);
else if (rho[r] > lambda / 2)
weights[r] = (rho[r] - lambda / 2) / norm_feature(r,
data, num_obs);
else
weights[r] = 0;
weights[num_dim] = intercept(weights, data, num_dim);
}
/* printf("Iter(%d): ", t);
for (l = 0; l < 7; l++)
printf("%f ", weights[l]);
printf("\n");*/
t++;
}
//printf("\n");
printf("Final Weights: ");
for (i = 0; i < 7; i++)
printf("%f ", weights[i]);
printf("\n");
printf("\n");
seed++;
}
return 0;
}
PseudoCode:
How can I merge images like below into a single image using OpenCV (there can be any number of them both horizontally and vertically)? Is there any built-in solution to do it?
Additional pieces:
Well, it seems that I finished the puzzle:
Main steps:
Compare each pair of images (puzzle pieces) to know the relative position (findRelativePositions and getPosition).
Build a map knowing the relative positions of the pieces (buildPuzzle and builfForPiece)
Create the final collage putting each image at the correct position (final part of buildPuzzle).
Comparison between pieces A and B in step 1 is done checking for similarity (sum of absolute difference) among:
B is NORTH to A: A first row and B last row;
B is SOUTH to A: A last row and B first row;
B is WEST to A : A last column and B first column;
B is EAST to A : A first column and B last column.
Since images do not overlap, but we can assume that confining rows (columns) are quite similar, the key aspect is to use a (ad-hoc) threshold to discriminate between confining pieces or not. This is handled in function getPosition, with threshold parameter threshold.
Here the full code. Please let me know if something is not clear.
#include <opencv2\opencv.hpp>
#include <algorithm>
#include <set>
using namespace std;
using namespace cv;
enum Direction
{
NORTH = 0,
SOUTH,
WEST,
EAST
};
int getPosition(const Mat3b& A, const Mat3b& B, double& cost)
{
Mat hsvA, hsvB;
cvtColor(A, hsvA, COLOR_BGR2HSV);
cvtColor(B, hsvB, COLOR_BGR2HSV);
int threshold = 1000;
// Check NORTH
Mat3b AN = hsvA(Range(0, 1), Range::all());
Mat3b BS = hsvB(Range(B.rows - 1, B.rows), Range::all());
Mat3b AN_BS;
absdiff(AN, BS, AN_BS);
Scalar scoreN = sum(AN_BS);
// Check SOUTH
Mat3b AS = hsvA(Range(A.rows - 1, A.rows), Range::all());
Mat3b BN = hsvB(Range(0, 1), Range::all());
Mat3b AS_BN;
absdiff(AS, BN, AS_BN);
Scalar scoreS = sum(AS_BN);
// Check WEST
Mat3b AW = hsvA(Range::all(), Range(A.cols - 1, A.cols));
Mat3b BE = hsvB(Range::all(), Range(0, 1));
Mat3b AW_BE;
absdiff(AW, BE, AW_BE);
Scalar scoreW = sum(AW_BE);
// Check EAST
Mat3b AE = hsvA(Range::all(), Range(0, 1));
Mat3b BW = hsvB(Range::all(), Range(B.cols - 1, B.cols));
Mat3b AE_BW;
absdiff(AE, BW, AE_BW);
Scalar scoreE = sum(AE_BW);
vector<double> scores{ scoreN[0], scoreS[0], scoreW[0], scoreE[0] };
int idx_min = distance(scores.begin(), min_element(scores.begin(), scores.end()));
int direction = (scores[idx_min] < threshold) ? idx_min : -1;
cost = scores[idx_min];
return direction;
}
void resolveConflicts(Mat1i& positions, Mat1d& costs)
{
for (int c = 0; c < 4; ++c)
{
// Search for duplicate pieces in each column
set<int> pieces;
set<int> dups;
for (int r = 0; r < positions.rows; ++r)
{
int label = positions(r, c);
if (label >= 0)
{
if (pieces.count(label) == 1)
{
dups.insert(label);
}
else
{
pieces.insert(label);
}
}
}
if (dups.size() > 0)
{
int min_idx = -1;
for (int duplicate : dups)
{
// Find minimum cost position
Mat1d column = costs.col(c);
min_idx = distance(column.begin(), min_element(column.begin(), column.end()));
// Keep only minimum cost position
for (int ir = 0; ir < positions.rows; ++ir)
{
int label = positions(ir, c);
if ((label == duplicate) && (ir != min_idx))
{
positions(ir, c) = -1;
}
}
}
}
}
}
void findRelativePositions(const vector<Mat3b>& pieces, Mat1i& positions)
{
positions = Mat1i(pieces.size(), 4, -1);
Mat1d costs(pieces.size(), 4, DBL_MAX);
for (int i = 0; i < pieces.size(); ++i)
{
for (int j = i + 1; j < pieces.size(); ++j)
{
double cost;
int pos = getPosition(pieces[i], pieces[j], cost);
if (pos >= 0)
{
if (costs(i, pos) > cost)
{
positions(i, pos) = j;
costs(i, pos) = cost;
switch (pos)
{
case NORTH:
positions(j, SOUTH) = i;
costs(j, SOUTH) = cost;
break;
case SOUTH:
positions(j, NORTH) = i;
costs(j, NORTH) = cost;
break;
case WEST:
positions(j, EAST) = i;
costs(j, EAST) = cost;
break;
case EAST:
positions(j, WEST) = i;
costs(j, WEST) = cost;
break;
}
}
}
}
}
resolveConflicts(positions, costs);
}
void builfForPiece(int idx_piece, set<int>& posed, Mat1i& labels, const Mat1i& positions)
{
Point pos(-1, -1);
// Find idx_piece on grid;
for (int r = 0; r < labels.rows; ++r)
{
for (int c = 0; c < labels.cols; ++c)
{
if (labels(r, c) == idx_piece)
{
pos = Point(c, r);
break;
}
}
if (pos.x >= 0) break;
}
if (pos.x < 0) return;
// Put connected pieces
for (int c = 0; c < 4; ++c)
{
int next = positions(idx_piece, c);
if (next > 0)
{
switch (c)
{
case NORTH:
labels(Point(pos.x, pos.y - 1)) = next;
posed.insert(next);
break;
case SOUTH:
labels(Point(pos.x, pos.y + 1)) = next;
posed.insert(next);
break;
case WEST:
labels(Point(pos.x + 1, pos.y)) = next;
posed.insert(next);
break;
case EAST:
labels(Point(pos.x - 1, pos.y)) = next;
posed.insert(next);
break;
}
}
}
}
Mat3b buildPuzzle(const vector<Mat3b>& pieces, const Mat1i& positions, Size sz)
{
int n_pieces = pieces.size();
set<int> posed;
set<int> todo;
for (int i = 0; i < n_pieces; ++i) todo.insert(i);
Mat1i labels(n_pieces * 2 + 1, n_pieces * 2 + 1, -1);
// Place first element in the center
todo.erase(0);
labels(Point(n_pieces, n_pieces)) = 0;
posed.insert(0);
builfForPiece(0, posed, labels, positions);
// Build puzzle starting from the already placed elements
while (todo.size() > 0)
{
auto it = todo.begin();
int next = -1;
do
{
next = *it;
++it;
} while (posed.count(next) == 0 && it != todo.end());
todo.erase(next);
builfForPiece(next, posed, labels, positions);
}
// Posed all pieces, now collage!
vector<Point> pieces_position;
Mat1b mask = labels >= 0;
findNonZero(mask, pieces_position);
Rect roi = boundingRect(pieces_position);
Mat1i lbls = labels(roi);
Mat3b collage(roi.height * sz.height, roi.width * sz.width, Vec3b(0, 0, 0));
for (int r = 0; r < lbls.rows; ++r)
{
for (int c = 0; c < lbls.cols; ++c)
{
if (lbls(r, c) >= 0)
{
Rect rect(c*sz.width, r*sz.height, sz.width, sz.height);
pieces[lbls(r, c)].copyTo(collage(rect));
}
}
}
return collage;
}
int main()
{
// Load images
vector<String> filenames;
glob("D:\\SO\\img\\puzzle*", filenames);
vector<Mat3b> pieces(filenames.size());
for (int i = 0; i < filenames.size(); ++i)
{
pieces[i] = imread(filenames[i], IMREAD_COLOR);
}
// Find Relative positions
Mat1i positions;
findRelativePositions(pieces, positions);
// Build the puzzle
Mat3b puzzle = buildPuzzle(pieces, positions, pieces[0].size());
imshow("Puzzle", puzzle);
waitKey();
return 0;
}
NOTE
No, there is no built-in solution to perform this. Image stitching won't work since the images are not overlapped.
I cannot guarantee that this works for every puzzle, but should work for the most.
I probably should have worked this couple of hours, but it was fun :D
EDIT
Adding more puzzle pieces generates wrong results in the previous code version. This was due the (wrong) assumption that at most one piece is good enough to be connected with a given piece.
Now I added a cost matrix, and only the minimum cost piece is saved as neighbor of a given piece.
I added also a resolveConflicts function that avoid that one piece can be merged (in non-conflicting position) with more than one piece.
This is the result adding more pieces:
UPDATE
Considerations after increasing the number of puzzle pieces:
This solution it's dependent on the input order of pieces, since it turns out it has a greedy approach to find neighbors.
While searching for neighbors, it's better to compare the H channel in the HSV space. I updated the code above with this improvement.
The final solution needs probably some kind of global minimization of the of a global cost matrix. This will make the method independent on the input order. I'll be back on this asap.
Once you have loaded this images as OpenCV Mat, you can concatenate these Mat both vertically or horizontally using:
Mat A, B; // Images that will be concatenated
Mat H; // Here we will concatenate A and B horizontally
Mat V; // Here we will concatenate A and B vertically
hconcat(A, B, H);
vconcat(A, B, V);
If you need to concatenate more than two images, you can use these methods recursively.
By the way, I think these methods are not included in the OpenCV documentation, but I have used them in the past.
I'm writing a gaussian filter, and my goal is to match the gaussian blur filter in photoshop as closely as possible. This is my first image processing endeavor. Some problems/questions I have are...
Further blurring an image with my filter darkens it, while photoshop’s seems to lighten it.
The deviation value (“sigma,” in my code) I’m using is r/3, which results in the gaussian curve having approached about 0.0001 within the matrix...is there a better way to determine this value?
How does photoshop (or most people) handle image borders for this type of blur?
int matrixDimension = (radius*2)+1;
float sigma = radius/3;
float twoSigmaSquared = 2*pow(sigma, 2);
float oneOverSquareRootOfTwoPiSigmaSquared = 1/(sqrt(M_PI*twoSigmaSquared));
float kernel[matrixDimension];
int index = 0;
for (int offset = -radius; offset <= radius; offset++) {
float xSquared = pow(offset, 2);
float exponent = -(xSquared/twoSigmaSquared);
float eToThePower = pow(M_E, exponent);
float multFactor = oneOverSquareRootOfTwoPiSigmaSquared*eToThePower;
kernel[index] = multFactor;
index++;
}
//Normalize the kernel such that all its values will add to 1
float sum = 0;
for (int i = 0; i < matrixDimension; i++) {
sum += kernel[i];
}
for (int i = 0; i < matrixDimension; i++) {
kernel[i] = kernel[i]/sum;
}
//Blur horizontally
for (int row = 0; row < imageHeight; row++) {
for (int column = 0; column < imageWidth; column++) {
int currentPixel = (row*imageWidth)+column;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int index = 0;
for (int offset = -radius; offset <= radius; offset++) {
if (!(column+offset < 0) && !(column+offset > imageWidth-1)) {
int firstByteOfPixelWereLookingAtInSrcData = (currentPixel+offset)*4;
int in1 = srcData[firstByteOfPixelWereLookingAtInSrcData];
int in2 = srcData[firstByteOfPixelWereLookingAtInSrcData+1];
int in3 = srcData[firstByteOfPixelWereLookingAtInSrcData+2];
int in4 = srcData[firstByteOfPixelWereLookingAtInSrcData+3];
sum1 += (int)(in1 * kernel[index]);
sum2 += (int)(in2 * kernel[index]);
sum3 += (int)(in3 * kernel[index]);
sum4 += (int)(in4 * kernel[index]);
}
index++;
}
int currentPixelInData = currentPixel*4;
destData[currentPixelInData] = sum1;
destData[currentPixelInData+1] = sum2;
destData[currentPixelInData+2] = sum3;
destData[currentPixelInData+3] = sum4;
}
}
//Blur vertically
for (int row = 0; row < imageHeight; row++) {
for (int column = 0; column < imageWidth; column++) {
int currentPixel = (row*imageWidth)+column;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int index = 0;
for (int offset = -radius; offset <= radius; offset++) {
if (!(row+offset < 0) && !(row+offset > imageHeight-1)) {
int firstByteOfPixelWereLookingAtInSrcData = (currentPixel+(offset*imageWidth))*4;
int in1 = destData[firstByteOfPixelWereLookingAtInSrcData];
int in2 = destData[firstByteOfPixelWereLookingAtInSrcData+1];
int in3 = destData[firstByteOfPixelWereLookingAtInSrcData+2];
int in4 = destData[firstByteOfPixelWereLookingAtInSrcData+3];
sum1 += (int)(in1 * kernel[index]);
sum2 += (int)(in2 * kernel[index]);
sum3 += (int)(in3 * kernel[index]);
sum4 += (int)(in4 * kernel[index]);
}
index++;
}
int currentPixelInData = currentPixel*4;
finalData[currentPixelInData] = sum1;
finalData[currentPixelInData+1] = sum2;
finalData[currentPixelInData+2] = sum3;
finalData[currentPixelInData+3] = sum4;
}
}
To reverse engineer a filter, you need to find its impulse response. On a background of a very dark value, say 32, place a nearly white pixel, say 223. You don't want to use 0 and 255 because some filters will try to create values beyond the starting values. Run the filter on this image, and take the output values and stretch them from 0.0 to 1.0: (value-32)/(223-32). Now you have the exact weights needed to emulate the filter.
There are lots of ways to treat the image edges. I would suggest taking the filter weights and summing them, then dividing the result by that sum; if you're trying to go beyond the edge, use 0.0 for both the pixel value and the filter weight on that pixel.
Boundary conditions sometimes depend on exactly what you're doing and what kind of data you're working with, but I think for general purpose image manipulation the best thing to do is to extend the values at the borders beyond the edges of the image. Not literally of course, but if the filter tries to read a pixel that's outside the image borders, you substitute the value of the nearest pixel on the edge of the image. Which is really the same as just clamping the row to be between 0 and height, and the column to be between 0 and width.
I'm using the Hough transform in OpenCV to detect lines. However, I know in advance that I only need lines within a very limited range of angles (about 10 degrees or so). I'm doing this in a very performance sensitive setting, so I'd like to avoid the extra work spent detecting lines at other angles, lines I know in advance I don't care about.
I could extract the Hough source from OpenCV and just hack it to take min_rho and max_rho parameters, but I'd like a less fragile approach (have to manually update my code w/ each OpenCV update, etc.).
What's the best approach here?
Well, i've modified the icvHoughlines function to go for a certain range of angles. I'm sure there's cleaner ways that plays with memory allocation as well, but I got a speed gain going from 100ms to 33ms for a range of angle going from 180deg to 60deg, so i'm happy with that.
Note that this code also outputs the accumulator value. Also, I only output 1 line because that fit my purposes but there was no gain really there.
static void
icvHoughLinesStandard2( const CvMat* img, float rho, float theta,
int threshold, CvSeq *lines, int linesMax )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
float ang;
int r, n;
int i, j;
float irho = 1 / rho;
double scale;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
// find n and ang limits (in our case we want 60 to 120
float limit_min = 60.0/180.0*PI;
float limit_max = 120.0/180.0*PI;
//num_steps = (limit_max - limit_min)/theta;
int start_n = floor(limit_min/theta);
int stop_n = floor(limit_max/theta);
for( ang = limit_min, n = start_n; n < stop_n; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
//
for( n = start_n; n < stop_n; n++ )
{
r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
accum[(n+1) * (numrho+2) + r+1]++;
}
}
int max_accum = 0;
int max_ind = 0;
for( r = 0; r < numrho; r++ )
{
for( n = start_n; n < stop_n; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if (accum[base] > max_accum)
{
max_accum = accum[base];
max_ind = base;
}
}
}
CvLinePolar2 line;
scale = 1./(numrho+2);
int idx = max_ind;
n = cvFloor(idx*scale) - 1;
r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.votes = accum[idx];
cvSeqPush( lines, &line );
}
If you use the Probabilistic Hough transform then the output is in the form of a cvPoint each for lines[0] and lines[1] parameters. We can get x and y co-ordinated for each of the two points by pt1.x, pt1.y and pt2.x and pt2.y.
Then use the simple formula for finding slope of a line - (y2-y1)/(x2-x1). Taking arctan (tan inverse) of that will yield that angle in radians. Then simply filter out desired angles from the values for each hough line obtained.
I think it's more natural to use standart HoughLines(...) function, which gives collection of lines directly in rho and theta terms and select nessessary angle range from it, rather than recalculate angle from segment end points.