I'm trying to find a simple algorithm to crop (remove the black areas) of a panorama image created with the openCV Stitcher module.
My idea is to calculate the most inner black points in the image which will define the cropping area, as shown in the next image:
Expected cropped result:
I've tried the next two approaches, but they don't crop the image as expected:
First Approach:
void testCropA(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
Size size = gray.size();
int type = gray.type();
int left = 0, top = 0, right = size.width, bottom = size.height;
cv::Mat row_zeros = Mat::zeros(1, right, type);
cv::Mat col_zeros = Mat::zeros(bottom, 1, type);
while (countNonZero(gray.row(top) != row_zeros) == 0) { top++; }
while (countNonZero(gray.col(left) != col_zeros) == 0) { left++; }
while (countNonZero(gray.row(bottom-1) != row_zeros) == 0) { bottom--; }
while (countNonZero(gray.col(right-1) != col_zeros) == 0) { right--; }
cv::Rect cropRect(left, top, right - left, bottom - top);
image = image(cropRect);
}
Second Approach:
void testCropB(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
int minCol = gray.cols;
int minRow = gray.rows;
int maxCol = 0;
int maxRow = 0;
for (int i = 0; i < gray.rows - 3; i++)
{
for (int j = 0; j < gray.cols; j++)
{
if (gray.at<char>(i, j) != 0)
{
if (i < minRow) {minRow = i;}
if (j < minCol) {minCol = j;}
if (i > maxRow) {maxRow = i;}
if (j > maxCol) {maxCol = j;}
}
}
}
cv::Rect cropRect = Rect(minCol, minRow, maxCol - minCol, maxRow - minRow);
image = image(cropRect);
}
This is my current solution. Hope it helps to others:
bool checkInteriorExterior(const cv::Mat &mask, const cv::Rect &croppingMask,
int &top, int &bottom, int &left, int &right)
{
// Return true if the rectangle is fine as it is
bool result = true;
cv::Mat sub = mask(croppingMask);
int x = 0;
int y = 0;
// Count how many exterior pixels are, and choose that side for
// reduction where mose exterior pixels occurred (that's the heuristic)
int top_row = 0;
int bottom_row = 0;
int left_column = 0;
int right_column = 0;
for (y = 0, x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the top side of the rect a bit to the bottom
if (sub.at<char>(y, x) == 0)
{
result = false;
++top_row;
}
}
for (y = (sub.rows - 1), x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the bottom side of the rect a bit to the top
if (sub.at<char>(y, x) == 0)
{
result = false;
++bottom_row;
}
}
for (y = 0, x = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++left_column;
}
}
for (x = (sub.cols - 1), y = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++right_column;
}
}
// The idea is to set `top = 1` if it's better to reduce
// the rect at the top than anywhere else.
if (top_row > bottom_row)
{
if (top_row > left_column)
{
if (top_row > right_column)
{
top = 1;
}
}
}
else if (bottom_row > left_column)
{
if (bottom_row > right_column)
{
bottom = 1;
}
}
if (left_column >= right_column)
{
if (left_column >= bottom_row)
{
if (left_column >= top_row)
{
left = 1;
}
}
}
else if (right_column >= top_row)
{
if (right_column >= bottom_row)
{
right = 1;
}
}
return result;
}
bool compareX(cv::Point a, cv::Point b)
{
return a.x < b.x;
}
bool compareY(cv::Point a, cv::Point b)
{
return a.y < b.y;
}
void crop(cv::Mat &source)
{
cv::Mat gray;
source.convertTo(source, CV_8U);
cvtColor(source, gray, cv::COLOR_RGB2GRAY);
// Extract all the black background (and some interior parts maybe)
cv::Mat mask = gray > 0;
// now extract the outer contour
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cv::Point(0, 0));
cv::Mat contourImage = cv::Mat::zeros(source.size(), CV_8UC3);;
// Find contour with max elements
int maxSize = 0;
int id = 0;
for (int i = 0; i < contours.size(); ++i)
{
if (contours.at((unsigned long)i).size() > maxSize)
{
maxSize = (int)contours.at((unsigned long)i).size();
id = i;
}
}
// Draw filled contour to obtain a mask with interior parts
cv::Mat contourMask = cv::Mat::zeros(source.size(), CV_8UC1);
drawContours(contourMask, contours, id, cv::Scalar(255), -1, 8, hierarchy, 0, cv::Point());
// Sort contour in x/y directions to easily find min/max and next
std::vector<cv::Point> cSortedX = contours.at((unsigned long)id);
std::sort(cSortedX.begin(), cSortedX.end(), compareX);
std::vector<cv::Point> cSortedY = contours.at((unsigned long)id);
std::sort(cSortedY.begin(), cSortedY.end(), compareY);
int minXId = 0;
int maxXId = (int)(cSortedX.size() - 1);
int minYId = 0;
int maxYId = (int)(cSortedY.size() - 1);
cv::Rect croppingMask;
while ((minXId < maxXId) && (minYId < maxYId))
{
cv::Point min(cSortedX[minXId].x, cSortedY[minYId].y);
cv::Point max(cSortedX[maxXId].x, cSortedY[maxYId].y);
croppingMask = cv::Rect(min.x, min.y, max.x - min.x, max.y - min.y);
// Out-codes: if one of them is set, the rectangle size has to be reduced at that border
int ocTop = 0;
int ocBottom = 0;
int ocLeft = 0;
int ocRight = 0;
bool finished = checkInteriorExterior(contourMask, croppingMask, ocTop, ocBottom, ocLeft, ocRight);
if (finished == true)
{
break;
}
// Reduce rectangle at border if necessary
if (ocLeft)
{ ++minXId; }
if (ocRight)
{ --maxXId; }
if (ocTop)
{ ++minYId; }
if (ocBottom)
{ --maxYId; }
}
// Crop image with created mask
source = source(croppingMask);
}
I never used the stitcher calss, but I think that you may get the estimated homography matrix at each pair of images, if you could obtain it easily, then you can multiply it with the corners of the first original image and so for the corner of the last original one, you will get their stitched coordinate, then get the min of left and right x-coordinates and min of up and bottom y-coordinates of each images. You may get the coordinates of of each stitched image, what you need to do in some cases of cropping.
Related
I want to calculate perimeter of a white blob in a 512*512 dimension binary image. Image will have only one blob. I used following code earlier in OpenCV 3 but somehow it doesn't work in OpenCV 4.2. IplImage
is deprecated in latest version. And I cannot pass Mat object directly to cvFindContours function. I am new to opencv and I don't know how does it work. Other related questions regarding perimeter are still unanswered.
To summaries, following works in opencv 3 but does not work in current opencv version (4.2).
int getPerimeter(unsigned char* inImagePtr, int inW, int inH)
{
int sumEven = 0; int sumOdd = 0;
int sumCorner = 0; int prevCode = 0;
//create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
//create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1; element.data[3] = 1;
element.data[4] = 1; element.data[5] = 1;
element.data[7] = 1;
//erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
//Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
//multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
//Get chain code of the blob
CvChain* chain = 0;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
auto temp = new IplImage(edge);
cvFindContours(temp, storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_EXTERNAL, CV_CHAIN_CODE);
delete temp;
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
for (i = 0; i < total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
if (code % 2 == 0)
sumEven++;
else
sumOdd++;
if (i > 0) {
if (code != prevCode)
sumCorner++;
}
prevCode = code;
}
}
float perimeter = (float)sumEven*0.980 + (float)sumOdd*1.406 - (float)sumCorner*0.091;
return (roundf(perimeter));
}
This worked just fine for me!
int getPerimeter(unsigned char* inImagePtr, int inW, int inH) {
// create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
// create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1;
element.data[3] = 1;
element.data[4] = 1;
element.data[5] = 1;
element.data[7] = 1;
// erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
// Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
// multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
vector<vector<Point>> contours;
findContours(edge, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); // Retrieve only external contour
int preValue[2];
int nextValue[2];
int sumEven = 0;
int sumOdd = 0;
//vector<Point>::iterator itr;
for (int ii = 0; ii < contours[0].size(); ii++) {
Point pt = contours[0].at(ii);
preValue[0] = pt.x;
preValue[1] = pt.y;
if (ii != contours[0].size() - 1) {
Point pt_next = contours[0].at(ii + 1);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
} else {
Point pt_next = contours[0].at(0);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
}
if ((preValue[0] == nextValue[0]) or (preValue[1] == nextValue[1])) {
sumEven = sumEven + abs(nextValue[0] - preValue[0]) + abs(nextValue[1] - preValue[1]);
} else {
sumOdd = sumOdd + abs(nextValue[0] - preValue[0]);
}
}
int sumCorner = contours[0].size() - 1;
float perimeter = round(sumEven * 0.980 + sumOdd * 1.406 - sumCorner * 0.091);
return (roundf(perimeter));
}
I need something like here OpenCV C++/Obj-C: Detecting a sheet of paper / Square Detection
My code is working like a charm when my background and foreground is not the same, but if my background is almost the same color as the document it can't work anymore.
Here is the picture with a beige bg + almost beige document what is not working.. Can somebody help in this how can I fix this code?
https://i.imgur.com/81DrIIK.jpg
and the code is here:
vector<Point> getPoints(Mat image)
{
int width = image.size().width;
int height = image.size().height;
Mat image_proc = image.clone();
vector<vector<Point> > squares;
// blur will enhance edge detection
Mat blurred(image_proc);
medianBlur(image_proc, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
double largest_area = -1;
int largest_contour_index = 0;
for(int i=0;i<squares.size();i++)
{
double a =contourArea(squares[i],false);
if(a>largest_area)
{
largest_area = a;
largest_contour_index = i;
}
}
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning size() %d",squares.size());
vector<Point> points;
if(squares.size() > 0)
{
points = squares[largest_contour_index];
}
else
{
points.push_back(Point(0, 0));
points.push_back(Point(width, 0));
points.push_back(Point(0, height));
points.push_back(Point(width, height));
}
return points;
}
}
Thanks
You can do threshold operation in S space of HSV-color-space. https://en.wikipedia.org/wiki/HSL_and_HSV#General_approach
I just split the channels of BGR and HSV as follow. More operations are needed.
I want to apply on OpenCV a K Means to a region of an image not squared or a rectangle. For example the source image is:
now I select a custom mask:
and apply K Means with K = 3:
Obviously without considering the bounds (white).
Instead, what I can do with OpenCV is K Means but considering the bounds:
And that messes out my final image because black is considered one colour.
Do you have any clue?
Thank you in advance.
Quick and dirty solution.
vector<Vec3b> points;
vector<Point> locations;
for( int y = 0; y < src.rows; y++) {
for( int x = 0; x < src.cols; x++) {
if ( (int)mask.at<unsigned char>(y,x) != 0 ) {
points.push_back(src.at<Vec3b>(y,x));
locations.push_back(Point(x,y));
}
}
}
Mat kmeanPoints(points.size(), 3, CV_32F);
for( int y = 0; y < points.size(); y++ ) {
for( int z = 0; z < 3; z++) {
kmeanPoints.at<float>(y, z) = points[y][z];
}
}
Mat labels;
Mat centers;
kmeans(kmeanPoints, 4, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.1), 10, cv::KMEANS_PP_CENTERS, centers);
Mat final = Mat::zeros( src.size(), src.type() );
Vec3b tempColor;
for(int i = 0; i<locations.size(); i++) {
int cluster_idx = labels.at<int>(i,0);
tempColor[0] = centers.at<float>(cluster_idx, 0);
tempColor[1] = centers.at<float>(cluster_idx, 1);
tempColor[2] = centers.at<float>(cluster_idx, 2);
final.at<Vec3b>(locations[i]) = tempColor;
}
Assuming that you have an input RGB image called img(here) and a one-channel mask called mask(here), here is the snippet to prepare your k-means computation :
int nbClasses = 3; // or whatever you want
cv::TermCriteria myCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 10, 1.0);
cv::Mat labels, centers, result;
img.convertTo(data, CV_32F);
// reshape into 3 columns (one per channel, in BGR order) and as many rows as the total number of pixels in img
data = data.reshape(1, data.total());
If you want to apply a normal k-means (without mask) :
// apply k-means
cv::kmeans(data, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape both to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value
cv::Vec3f *p = data.ptr<cv::Vec3f>();
for (size_t i = 0; i < data.rows; i++)
{
int center_id = labels.at<int>(i);
p[i] = centers.at<cv::Vec3f>(center_id);
}
// back to 2D image
data = data.reshape(3, img.rows);
// optional conversion to uchar
data.convertTo(result, CV_8U);
The result is here.
But, if you want instead to apply a masked k-means :
int nbWhitePixels = cv::countNonZero(mask);
cv::Mat dataMasked = cv::Mat(nbWhitePixels, 3, CV_32F, cv::Scalar(0));
cv::Mat maskFlatten = mask.reshape(1, mask.total());
// filter data by the mask
int idx = 0;
for (int k = 0; k < mask.total(); k++)
{
int val = maskFlatten.at<uchar>(k, 0);
if (val != 0)
{
float val0 = data.at<float>(k, 0);
float val1 = data.at<float>(k, 1);
float val2 = data.at<float>(k, 2);
dataMasked.at<float>(idx,0) = val0;
dataMasked.at<float>(idx,1) = val1;
dataMasked.at<float>(idx,2) = val2;
idx++;
}
}
// apply k-means
cv::kmeans(dataMasked, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
dataMasked = dataMasked.reshape(3, dataMasked.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value, only for pixels in mask
cv::Vec3f *p = data.ptr<cv::Vec3f>();
idx = 0;
for (size_t i = 0; i < data.rows; i++)
{
if (maskFlatten.at<uchar>(i, 0) != 0)
{
int center_id = labels.at<int>(idx);
p[i] = centers.at<cv::Vec3f>(center_id);
idx++;
}
//else
// p[i] = cv::Vec3f(0, 0, 0);
}
// back to 2d, and uchar
data = data.reshape(3, img.rows);
data.convertTo(result, CV_8U);
You will have now this result.
If you let commented the else part, you will keep initial pixels outside the mask, whereas if you uncomment it, you will convert them into black pixels, like here.
Can anyone help me out with this?
I am trying to calculate gradient orientation using the Sobel operator in OpenCV for gradient in x and y direction. I am using the atan2 function for computing the tangent in radians, which I later convert to degrees, but all the angles I am getting are between 0 and 90 degrees.
My expectation is to get angles between 0 and 360 degrees. The image I am using is grayscale. The code segment is here below.
Mat PeripheralArea;
Mat grad_x, grad_y; // this is the matrix for the gradients in x and y directions
int off_set_y = 0, off_set_x = 0;
int scale = 1, num_bins = 8, bin = 0;
int delta=-1 ;
int ddepth = CV_16S;
GaussianBlur(PeripheralArea, PeripheralArea, Size(3, 3), 0, 0, BORDER_DEFAULT);
Sobel(PeripheralArea, grad_y, ddepth, 0, 1,3,scale, delta, BORDER_DEFAULT);
Sobel(PeripheralArea, grad_x, ddepth, 1, 0,3, scale, delta, BORDER_DEFAULT);
for (int row_y1 = 0, row_y2 = 0; row_y1 < grad_y.rows / 5, row_y2 < grad_x.rows / 5; row_y1++, row_y2++) {
for (int col_x1 = 0, col_x2 = 0; col_x1 < grad_y.cols / 5, col_x2 < grad_x.cols / 5; col_x1++, col_x2++) {
gradient_direction_radians = (double) atan2((double) grad_y.at<uchar>(row_y1 + off_set_y, col_x1 + off_set_x), (double) grad_x.at<uchar>(row_y2 + off_set_y, col_x2 + off_set_x));
gradient_direction_degrees = (int) (180 * gradient_direction_radians / 3.1415);
gradient_direction_degrees = gradient_direction_degrees < 0
? gradient_direction_degrees+360
: gradient_direction_degrees;
}
}
Note the off_set_x and off_set_y variable are not part of the computation
but to offset to different square blocks for which I eventually want to
compute an histogram feature vector
You have specified that the destination depth of Sobel() is CV_16S.
Yet, when you access grad_x and grad_y, you use .at<uchar>(), implying that their elements are 8 bit unsigned quantities, when in fact they are 16 bit signed. You could use .at<short>() instead, but to me it looks like there a number of issues with your code, not the least of which is that there is an OpenCV function that does exactly what you want.
Use cv::phase(), and replace your for loops with
cv::Mat gradient_angle_degrees;
bool angleInDegrees = true;
cv::phase(grad_x, grad_y, gradient_angle_degrees, angleInDegrees);
I solved this need when I dived into doing some edge detection using C++.
For orientation of gradient I use artan2(), this standard API defines its +y and +x same as how we usually traverse a 2D image.
Plot it to show you my understanding.
///////////////////////////////
// Quadrants of image:
// 3(-dx,-dy) | 4(+dx,-dy) [-pi,0]
// ------------------------->+x
// 2(-dx,+dy) | 1(+dx,+dy) [0,pi]
// v
// +y
///////////////////////////////
// Definition of arctan2():
// -135(-dx,-dy) | -45(+dx,-dy)
// ------------------------->+x
// 135(-dx,+dy) | +45(+dx,+dy)
// v
// +y
///////////////////////////////
How I do for gradient:
bool gradient(double*& magnitude, double*& orientation, double* src, int width, int height, string file) {
if (src == NULL)
return false;
if (width <= 0 || height <= 0)
return false;
double gradient_x_correlation[3*3] = {-0.5, 0.0, 0.5,
-0.5, 0.0, 0.5,
-0.5, 0.0, 0.5};
double gradient_y_correlation[3*3] = {-0.5,-0.5,-0.5,
0.0, 0.0, 0.0,
0.5, 0.5, 0.5};
double *Gx = NULL;
double *Gy = NULL;
this->correlation(Gx, src, gradient_x_correlation, width, height, 3);
this->correlation(Gy, src, gradient_y_correlation, width, height, 3);
if (Gx == NULL || Gy == NULL)
return false;
//magnitude
magnitude = new double[sizeof(double)*width*height];
if (magnitude == NULL)
return false;
memset(magnitude, 0, sizeof(double)*width*height);
double gx = 0.0;
double gy = 0.0;
double gm = 0.0;
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
gx = pow(Gx[i+j*width],2);
gy = pow(Gy[i+j*width],2);
gm = sqrt(pow(Gx[i+j*width],2)+pow(Gy[i+j*width],2));
if (gm >= 255.0) {
return false;
}
magnitude[i+j*width] = gm;
}
}
//orientation
orientation = new double[sizeof(double)*width*height];
if (orientation == NULL)
return false;
memset(orientation, 0, sizeof(double)*width*height);
double ori = 0.0;
double dtmp = 0.0;
double ori_normalized = 0.0;
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
gx = (Gx[i+j*width]);
gy = (Gy[i+j*width]);
ori = atan2(Gy[i+j*width], Gx[i+j*width])/PI*(180.0); //[-pi,+pi]
if (gx >= 0 && gy >= 0) { //[Qudrant 1]:[0,90] to be [0,63]
if (ori < 0) {
printf("[Err1QUA]ori:%.1f\n", ori);
return false;
}
ori_normalized = (ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 1]orientation: %.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx >= 0 && gy < 0) { //[Qudrant 4]:[270,360) equal to [-90, 0) to be [191,255]
if (ori > 0) {
printf("[Err4QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (360.0+ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 4]orientation:%.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx < 0 && gy >= 0) { //[Qudrant 2]:(90,180] to be [64,127]
if (ori < 0) {
printf("[Err2QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 2]orientation: %.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx < 0 && gy < 0) { //[Qudrant 3]:(180,270) equal to (-180, -90) to be [128,190]
if (ori > 0) {
printf("[Err3QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (360.0+ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 3]orientation:%.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else {
printf("[EXCEPTION]orientation:%.1f\n", ori);
return false;
}
orientation[i+j*width] = ori_normalized;
}
}
return true;
}
How I do for cross correlation:
bool correlation(double*& dst, double* src, double* kernel, int width, int height, int window) {
if (src == NULL || kernel == NULL)
return false;
if (width <= 0 || height <= 0 || width < window || height < window )
return false;
dst = new double[sizeof(double)*width*height];
if (dst == NULL)
return false;
memset(dst, 0, sizeof(double)*width*height);
int ii = 0;
int jj = 0;
int nn = 0;
int mm = 0;
double max = std::numeric_limits<double>::min();
double min = std::numeric_limits<double>::max();
double range = std::numeric_limits<double>::max();
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
for (int m=0; m<window; m++) {
for (int n=0; n<window; n++) {
ii = i+(n-window/2);
jj = j+(m-window/2);
nn = n;
mm = m;
if (ii >=0 && ii<width && jj>=0 && jj<height) {
dst[i+j*width] += src[ii+jj*width]*kernel[nn+mm*window];
}
else {
dst[i+j*width] += 0;
}
}
}
if (dst[i+j*width] > max)
max = dst[i+j*width];
else if (dst[i+j*width] < min)
min = dst[i+j*width];
}
}
//normalize double matrix to be an uint8_t matrix
range = max - min;
double norm = 0.0;
printf("correlated matrix max:%.1f, min:%.1f, range:%.1f\n", max, min, range);
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
norm = dst[i+j*width];
norm = 255.0*norm/range;
dst[i+j*width] = norm;
}
}
return true;
}
For me, I use an image like a hollow rectangle, you can download it on my sample.
The orientation of gradient of the hollow rectangle part of my sample image would move from 0 to 360 clockwise (Quadrant 1 to 2 to 3 to 4).
Here is my print which describes the trace of orientation:
[Qudrant 1]orientation: 45.0 to be 31.9(31)
[Qudrant 1]orientation: 90.0 to be 63.8(63)
[Qudrant 2]orientation: 135.0 to be 95.6(95)
[Qudrant 2]orientation: 180.0 to be 127.5(127)
[Qudrant 3]orientation:-135.0 to be 159.4(159)
[Qudrant 3]orientation:-116.6 to be 172.4(172)
[Qudrant 4]orientation:-90.0 to be 191.2(191)
[Qudrant 4]orientation:-63.4 to be 210.1(210)
[Qudrant 4]orientation:-45.0 to be 223.1(223)
You can see more source code about digital image processing on my GitHub :)
I am loading a png in processing. This png has a lot of unused pixels around the actual image. Luckily all those pixels are completely transparent. My goal is to crop the png to only show the image and get rid of the unused pixels. The first step would be to calculate the bounds of the image. Initially i wanted to check every pixel for alpha value and see if that pixel is the highest or lowest coordinate for bounds. like this:
------
------
--->oo
oooooo
oooooo
Then i realized i only needed to do this until the first non-alpha pixel and repeat it backwards for highest coordinate bound. Like this:
------
-->ooo
oooooo
ooo<--
------
This would mean less calculating for the same result. However the code i got out of it still seems to be very complex. Here it is:
class Rect { //class for storing the boundries
int xMin, xMax, yMin, yMax;
Rect() {
}
}
PImage gfx;
void setup() {
size(800, 600);
gfx = loadImage("resources/test.png");
Rect _bounds = calcBounds(); //first calculate the boundries
cropImage(_bounds); //then crop the image using those boundries
}
void draw() {
}
Rect calcBounds() {
Rect _bounds = new Rect();
boolean _coordFound = false;
gfx.loadPixels();
//x min bounds
for (int i = 0; i < gfx.width; i++) { //rows
for (int i2 = 0; i2 < gfx.height; i2++) { //columns
if (alpha(gfx.pixels[(gfx.width * i2) + i]) != 0) {
_bounds.xMin = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//x max bounds
_coordFound = false;
for (int i = gfx.width - 1; i >= 0; i--) { //rows
for (int i2 = gfx.height - 1; i2 >= 0; i2--) { //columns
if (alpha(gfx.pixels[(gfx.width * i2) + i]) != 0) {
_bounds.xMax = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//y min bounds
_coordFound = false;
for (int i = 0; i < gfx.height; i++) { //columns
for (int i2 = 0; i2 < gfx.width; i2++) { //rows
if (alpha(gfx.pixels[(gfx.width * i) + i2]) != 0) {
_bounds.yMin = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//y max bounds
_coordFound = false;
for (int i = gfx.height - 1; i >= 0; i--) { //columns
for (int i2 = gfx.width -1; i2 >= 0; i2--) { //rows
if (alpha(gfx.pixels[(gfx.width * i) + i2]) != 0) {
_bounds.yMax = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
return _bounds;
}
void cropImage(Rect _bounds) {
PImage _temp = createImage((_bounds.xMax - _bounds.xMin) + 1, (_bounds.yMax - _bounds.yMin) + 1, ARGB);
_temp.copy(gfx, _bounds.xMin, _bounds.yMin, (_bounds.xMax - _bounds.xMin) + 1, (_bounds.yMax - _bounds.yMin)+ 1, 0, 0, _temp.width, _temp.height);
gfx = _temp; //now the image is cropped
}
Isnt there a more efficient/faster way to calculate the bounds of the image?
And i do still want the boundries coordinates afterward instead of just cutting away at the image during calculation.
If you store the last completely empty line found for e.g. the horizontal minimum and maximum scan in a variable, you can use that to constrain your vertical scanning to only the area that has not yet been checked for being empty, instead of having to scan full columns. Depending on the amount and shape of the croppable area that can save you quite a bit - See the schematic for a visual explanation of the modified algorithm:
By the way, in your //x min bounds scan you seem to be iterating over the width in both for loops, should be height in one though? (unless your images are all square of course :))