cvCalibrateCamera2 returns matrix with NaN-elements - opencv

I'm trying to do a camera calibration using cvCalibrateCamera2() but run into a strage problem: the returned intrinsic_matrix and distortion_coeffs contain invalid floats (=NaN).
That's what I'm doing (the break's are responsible for leaving this completely):
image_points =cvCreateMat(board_total,2,CV_32FC1); if (!image_points) break;
object_points =cvCreateMat(board_total,3,CV_32FC1); if (!object_points) break;
point_counts =cvCreateMat(1,1,CV_32SC1); if (!point_counts) break;
intrinsic_matrix =cvCreateMat(3,3,CV_32FC1); if (!intrinsic_matrix) break;
corners =new CvPoint2D32f[board_total]; if (!corners) break;
distortion_coeffs=cvCreateMat(4,1,CV_32FC1); if (!distortion_coeffs) break;
int found = cvFindChessboardCorners(gray_image, board_sz, corners,corner_count,CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS );
if (found==0) break;
It woks fine until here, "gray_image" is my input image and "corners" contains the correct chessboard corner coordinates
cvFindCornerSubPix(gray_image, corners,*corner_count, cvSize(11,11),cvSize(-1,-1), cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));
for (j=0; j<board_total; j++)
{
CV_MAT_ELEM(*image_points, float,0,0) = corners[j].x;
CV_MAT_ELEM(*image_points, float,0,1) = corners[j].y;
CV_MAT_ELEM(*object_points,float,0,0) = (float) j/board_w;
CV_MAT_ELEM(*object_points,float,0,1) = (float) (j%board_w);
CV_MAT_ELEM(*object_points,float,0,2) = 0.0f;
}
CV_MAT_ELEM(*point_counts, int,0,0) = board_total;
CV_MAT_ELEM( *intrinsic_matrix, float, 0, 0 ) = 1.0f;
CV_MAT_ELEM( *intrinsic_matrix, float, 1, 1 ) = 1.0f;
cvCalibrateCamera2(object_points,image_points,point_counts,cvGetSize( gray_image ),intrinsic_matrix,distortion_coeffs,NULL,NULL,0);
Here cvCalibrateCamera2() seems to work and causes no exception but when I try to get the results out of it, all floats "d" are invalid:
float d;
for (i=0; i<3; i++)
for (j=0; j<3; j++)
{
d=CV_MAT_ELEM(*intrinsic_matrix,float,i,j);
calib_data->intrinsic[i][j]=(int)OAPC_ROUND(d*1000000,0);
}
for (i=0; i<4; i++)
{
d=CV_MAT_ELEM(*distortion_coeffs,float,i,0);
calib_data->distortion[i]=(int)OAPC_ROUND(d*100000000,0);
}
So what is wrong here?

Related

How to calculate perimeter of a binary image using OpenCV 4.2 in C++

I want to calculate perimeter of a white blob in a 512*512 dimension binary image. Image will have only one blob. I used following code earlier in OpenCV 3 but somehow it doesn't work in OpenCV 4.2. IplImage
is deprecated in latest version. And I cannot pass Mat object directly to cvFindContours function. I am new to opencv and I don't know how does it work. Other related questions regarding perimeter are still unanswered.
To summaries, following works in opencv 3 but does not work in current opencv version (4.2).
int getPerimeter(unsigned char* inImagePtr, int inW, int inH)
{
int sumEven = 0; int sumOdd = 0;
int sumCorner = 0; int prevCode = 0;
//create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
//create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1; element.data[3] = 1;
element.data[4] = 1; element.data[5] = 1;
element.data[7] = 1;
//erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
//Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
//multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
//Get chain code of the blob
CvChain* chain = 0;
CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
auto temp = new IplImage(edge);
cvFindContours(temp, storage, (CvSeq**)(&chain), sizeof(*chain), CV_RETR_EXTERNAL, CV_CHAIN_CODE);
delete temp;
for (; chain != NULL; chain = (CvChain*)chain->h_next)
{
CvSeqReader reader;
int i, total = chain->total;
cvStartReadSeq((CvSeq*)chain, &reader, 0);
for (i = 0; i < total; i++)
{
char code;
CV_READ_SEQ_ELEM(code, reader);
if (code % 2 == 0)
sumEven++;
else
sumOdd++;
if (i > 0) {
if (code != prevCode)
sumCorner++;
}
prevCode = code;
}
}
float perimeter = (float)sumEven*0.980 + (float)sumOdd*1.406 - (float)sumCorner*0.091;
return (roundf(perimeter));
}
This worked just fine for me!
int getPerimeter(unsigned char* inImagePtr, int inW, int inH) {
// create a mat input Image
cv::Mat inImage(inH, inW, CV_8UC1, inImagePtr);
// create four connected structuring element
cv::Mat element = cv::Mat::zeros(3, 3, CV_8UC1);
element.data[1] = 1;
element.data[3] = 1;
element.data[4] = 1;
element.data[5] = 1;
element.data[7] = 1;
// erode input image
cv::Mat erodeImage;
erode(inImage, erodeImage, element);
// Invert eroded Image
cv::threshold(erodeImage, erodeImage, 0, 255, THRESH_BINARY_INV);
// multiply with original binary Image to get the edge Image
cv::Mat edge = erodeImage.mul(inImage);
vector<vector<Point>> contours;
findContours(edge, contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE); // Retrieve only external contour
int preValue[2];
int nextValue[2];
int sumEven = 0;
int sumOdd = 0;
//vector<Point>::iterator itr;
for (int ii = 0; ii < contours[0].size(); ii++) {
Point pt = contours[0].at(ii);
preValue[0] = pt.x;
preValue[1] = pt.y;
if (ii != contours[0].size() - 1) {
Point pt_next = contours[0].at(ii + 1);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
} else {
Point pt_next = contours[0].at(0);
nextValue[0] = pt_next.x;
nextValue[1] = pt_next.y;
}
if ((preValue[0] == nextValue[0]) or (preValue[1] == nextValue[1])) {
sumEven = sumEven + abs(nextValue[0] - preValue[0]) + abs(nextValue[1] - preValue[1]);
} else {
sumOdd = sumOdd + abs(nextValue[0] - preValue[0]);
}
}
int sumCorner = contours[0].size() - 1;
float perimeter = round(sumEven * 0.980 + sumOdd * 1.406 - sumCorner * 0.091);
return (roundf(perimeter));
}

How to merge a lot of square images via OpenCV?

How can I merge images like below into a single image using OpenCV (there can be any number of them both horizontally and vertically)? Is there any built-in solution to do it?
Additional pieces:
Well, it seems that I finished the puzzle:
Main steps:
Compare each pair of images (puzzle pieces) to know the relative position (findRelativePositions and getPosition).
Build a map knowing the relative positions of the pieces (buildPuzzle and builfForPiece)
Create the final collage putting each image at the correct position (final part of buildPuzzle).
Comparison between pieces A and B in step 1 is done checking for similarity (sum of absolute difference) among:
B is NORTH to A: A first row and B last row;
B is SOUTH to A: A last row and B first row;
B is WEST to A : A last column and B first column;
B is EAST to A : A first column and B last column.
Since images do not overlap, but we can assume that confining rows (columns) are quite similar, the key aspect is to use a (ad-hoc) threshold to discriminate between confining pieces or not. This is handled in function getPosition, with threshold parameter threshold.
Here the full code. Please let me know if something is not clear.
#include <opencv2\opencv.hpp>
#include <algorithm>
#include <set>
using namespace std;
using namespace cv;
enum Direction
{
NORTH = 0,
SOUTH,
WEST,
EAST
};
int getPosition(const Mat3b& A, const Mat3b& B, double& cost)
{
Mat hsvA, hsvB;
cvtColor(A, hsvA, COLOR_BGR2HSV);
cvtColor(B, hsvB, COLOR_BGR2HSV);
int threshold = 1000;
// Check NORTH
Mat3b AN = hsvA(Range(0, 1), Range::all());
Mat3b BS = hsvB(Range(B.rows - 1, B.rows), Range::all());
Mat3b AN_BS;
absdiff(AN, BS, AN_BS);
Scalar scoreN = sum(AN_BS);
// Check SOUTH
Mat3b AS = hsvA(Range(A.rows - 1, A.rows), Range::all());
Mat3b BN = hsvB(Range(0, 1), Range::all());
Mat3b AS_BN;
absdiff(AS, BN, AS_BN);
Scalar scoreS = sum(AS_BN);
// Check WEST
Mat3b AW = hsvA(Range::all(), Range(A.cols - 1, A.cols));
Mat3b BE = hsvB(Range::all(), Range(0, 1));
Mat3b AW_BE;
absdiff(AW, BE, AW_BE);
Scalar scoreW = sum(AW_BE);
// Check EAST
Mat3b AE = hsvA(Range::all(), Range(0, 1));
Mat3b BW = hsvB(Range::all(), Range(B.cols - 1, B.cols));
Mat3b AE_BW;
absdiff(AE, BW, AE_BW);
Scalar scoreE = sum(AE_BW);
vector<double> scores{ scoreN[0], scoreS[0], scoreW[0], scoreE[0] };
int idx_min = distance(scores.begin(), min_element(scores.begin(), scores.end()));
int direction = (scores[idx_min] < threshold) ? idx_min : -1;
cost = scores[idx_min];
return direction;
}
void resolveConflicts(Mat1i& positions, Mat1d& costs)
{
for (int c = 0; c < 4; ++c)
{
// Search for duplicate pieces in each column
set<int> pieces;
set<int> dups;
for (int r = 0; r < positions.rows; ++r)
{
int label = positions(r, c);
if (label >= 0)
{
if (pieces.count(label) == 1)
{
dups.insert(label);
}
else
{
pieces.insert(label);
}
}
}
if (dups.size() > 0)
{
int min_idx = -1;
for (int duplicate : dups)
{
// Find minimum cost position
Mat1d column = costs.col(c);
min_idx = distance(column.begin(), min_element(column.begin(), column.end()));
// Keep only minimum cost position
for (int ir = 0; ir < positions.rows; ++ir)
{
int label = positions(ir, c);
if ((label == duplicate) && (ir != min_idx))
{
positions(ir, c) = -1;
}
}
}
}
}
}
void findRelativePositions(const vector<Mat3b>& pieces, Mat1i& positions)
{
positions = Mat1i(pieces.size(), 4, -1);
Mat1d costs(pieces.size(), 4, DBL_MAX);
for (int i = 0; i < pieces.size(); ++i)
{
for (int j = i + 1; j < pieces.size(); ++j)
{
double cost;
int pos = getPosition(pieces[i], pieces[j], cost);
if (pos >= 0)
{
if (costs(i, pos) > cost)
{
positions(i, pos) = j;
costs(i, pos) = cost;
switch (pos)
{
case NORTH:
positions(j, SOUTH) = i;
costs(j, SOUTH) = cost;
break;
case SOUTH:
positions(j, NORTH) = i;
costs(j, NORTH) = cost;
break;
case WEST:
positions(j, EAST) = i;
costs(j, EAST) = cost;
break;
case EAST:
positions(j, WEST) = i;
costs(j, WEST) = cost;
break;
}
}
}
}
}
resolveConflicts(positions, costs);
}
void builfForPiece(int idx_piece, set<int>& posed, Mat1i& labels, const Mat1i& positions)
{
Point pos(-1, -1);
// Find idx_piece on grid;
for (int r = 0; r < labels.rows; ++r)
{
for (int c = 0; c < labels.cols; ++c)
{
if (labels(r, c) == idx_piece)
{
pos = Point(c, r);
break;
}
}
if (pos.x >= 0) break;
}
if (pos.x < 0) return;
// Put connected pieces
for (int c = 0; c < 4; ++c)
{
int next = positions(idx_piece, c);
if (next > 0)
{
switch (c)
{
case NORTH:
labels(Point(pos.x, pos.y - 1)) = next;
posed.insert(next);
break;
case SOUTH:
labels(Point(pos.x, pos.y + 1)) = next;
posed.insert(next);
break;
case WEST:
labels(Point(pos.x + 1, pos.y)) = next;
posed.insert(next);
break;
case EAST:
labels(Point(pos.x - 1, pos.y)) = next;
posed.insert(next);
break;
}
}
}
}
Mat3b buildPuzzle(const vector<Mat3b>& pieces, const Mat1i& positions, Size sz)
{
int n_pieces = pieces.size();
set<int> posed;
set<int> todo;
for (int i = 0; i < n_pieces; ++i) todo.insert(i);
Mat1i labels(n_pieces * 2 + 1, n_pieces * 2 + 1, -1);
// Place first element in the center
todo.erase(0);
labels(Point(n_pieces, n_pieces)) = 0;
posed.insert(0);
builfForPiece(0, posed, labels, positions);
// Build puzzle starting from the already placed elements
while (todo.size() > 0)
{
auto it = todo.begin();
int next = -1;
do
{
next = *it;
++it;
} while (posed.count(next) == 0 && it != todo.end());
todo.erase(next);
builfForPiece(next, posed, labels, positions);
}
// Posed all pieces, now collage!
vector<Point> pieces_position;
Mat1b mask = labels >= 0;
findNonZero(mask, pieces_position);
Rect roi = boundingRect(pieces_position);
Mat1i lbls = labels(roi);
Mat3b collage(roi.height * sz.height, roi.width * sz.width, Vec3b(0, 0, 0));
for (int r = 0; r < lbls.rows; ++r)
{
for (int c = 0; c < lbls.cols; ++c)
{
if (lbls(r, c) >= 0)
{
Rect rect(c*sz.width, r*sz.height, sz.width, sz.height);
pieces[lbls(r, c)].copyTo(collage(rect));
}
}
}
return collage;
}
int main()
{
// Load images
vector<String> filenames;
glob("D:\\SO\\img\\puzzle*", filenames);
vector<Mat3b> pieces(filenames.size());
for (int i = 0; i < filenames.size(); ++i)
{
pieces[i] = imread(filenames[i], IMREAD_COLOR);
}
// Find Relative positions
Mat1i positions;
findRelativePositions(pieces, positions);
// Build the puzzle
Mat3b puzzle = buildPuzzle(pieces, positions, pieces[0].size());
imshow("Puzzle", puzzle);
waitKey();
return 0;
}
NOTE
No, there is no built-in solution to perform this. Image stitching won't work since the images are not overlapped.
I cannot guarantee that this works for every puzzle, but should work for the most.
I probably should have worked this couple of hours, but it was fun :D
EDIT
Adding more puzzle pieces generates wrong results in the previous code version. This was due the (wrong) assumption that at most one piece is good enough to be connected with a given piece.
Now I added a cost matrix, and only the minimum cost piece is saved as neighbor of a given piece.
I added also a resolveConflicts function that avoid that one piece can be merged (in non-conflicting position) with more than one piece.
This is the result adding more pieces:
UPDATE
Considerations after increasing the number of puzzle pieces:
This solution it's dependent on the input order of pieces, since it turns out it has a greedy approach to find neighbors.
While searching for neighbors, it's better to compare the H channel in the HSV space. I updated the code above with this improvement.
The final solution needs probably some kind of global minimization of the of a global cost matrix. This will make the method independent on the input order. I'll be back on this asap.
Once you have loaded this images as OpenCV Mat, you can concatenate these Mat both vertically or horizontally using:
Mat A, B; // Images that will be concatenated
Mat H; // Here we will concatenate A and B horizontally
Mat V; // Here we will concatenate A and B vertically
hconcat(A, B, H);
vconcat(A, B, V);
If you need to concatenate more than two images, you can use these methods recursively.
By the way, I think these methods are not included in the OpenCV documentation, but I have used them in the past.

copying pixel by pixel in openCV

I have a code where i will copy a video to another video
When i copy it somehow the angle change
heres a link for a picture
http://i24.photobucket.com/albums/c22/Klifford_Kho/wrongpixel_zpsloshtqqy.png
Mat frame;
Mat processedImage;
void copy()
{
for (int i = 0; i<400; i++)
{
for (int j = 0; j<200; j++)
{
int b = frame.at<cv::Vec3b>(i, j)[0];
int g = frame.at<cv::Vec3b>(i, j)[1];
int r = frame.at<cv::Vec3b>(i, j)[2];
processedImage.at<cv::Vec3b>(i, j)[0] = b;
processedImage.at<cv::Vec3b>(i, j)[1] = g;
processedImage.at<cv::Vec3b>(i, j)[2] = r;
}
}
int main()
{
VideoCapture cap(0); // get first cam
while (cap.isOpened())
{
if (!cap.read(frame)) // cam might need some warmup
continue;
processedImage = cv::Mat(frame.size().height, frame.size().width, CV_8UC1);
processedImage.setTo(cv::Scalar::all(0));
copy();
imshow("Original", frame);
imshow("Processed", processedImage);
if (waitKey(10) == 27)
break;
}
return 0;
}
P.S. I didnt use frame.cols and frame.rows in the condition because it generated an error
heres a picture of the error
http://i24.photobucket.com/albums/c22/Klifford_Kho/wrongpixel_zpswze5qjrr.png
It is because you create single channel destination image.
processedImage = cv::Mat(frame.size().height, frame.size().width, CV_8UC1);
Change CV_8UC1 to CV_8UC3, it should help also with error mentioned at question end.

How to apply K means in a mask of an image instead the whole one

I want to apply on OpenCV a K Means to a region of an image not squared or a rectangle. For example the source image is:
now I select a custom mask:
and apply K Means with K = 3:
Obviously without considering the bounds (white).
Instead, what I can do with OpenCV is K Means but considering the bounds:
And that messes out my final image because black is considered one colour.
Do you have any clue?
Thank you in advance.
Quick and dirty solution.
vector<Vec3b> points;
vector<Point> locations;
for( int y = 0; y < src.rows; y++) {
for( int x = 0; x < src.cols; x++) {
if ( (int)mask.at<unsigned char>(y,x) != 0 ) {
points.push_back(src.at<Vec3b>(y,x));
locations.push_back(Point(x,y));
}
}
}
Mat kmeanPoints(points.size(), 3, CV_32F);
for( int y = 0; y < points.size(); y++ ) {
for( int z = 0; z < 3; z++) {
kmeanPoints.at<float>(y, z) = points[y][z];
}
}
Mat labels;
Mat centers;
kmeans(kmeanPoints, 4, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 10, 0.1), 10, cv::KMEANS_PP_CENTERS, centers);
Mat final = Mat::zeros( src.size(), src.type() );
Vec3b tempColor;
for(int i = 0; i<locations.size(); i++) {
int cluster_idx = labels.at<int>(i,0);
tempColor[0] = centers.at<float>(cluster_idx, 0);
tempColor[1] = centers.at<float>(cluster_idx, 1);
tempColor[2] = centers.at<float>(cluster_idx, 2);
final.at<Vec3b>(locations[i]) = tempColor;
}
Assuming that you have an input RGB image called img(here) and a one-channel mask called mask(here), here is the snippet to prepare your k-means computation :
int nbClasses = 3; // or whatever you want
cv::TermCriteria myCriteria(cv::TermCriteria::MAX_ITER + cv::TermCriteria::EPS, 10, 1.0);
cv::Mat labels, centers, result;
img.convertTo(data, CV_32F);
// reshape into 3 columns (one per channel, in BGR order) and as many rows as the total number of pixels in img
data = data.reshape(1, data.total());
If you want to apply a normal k-means (without mask) :
// apply k-means
cv::kmeans(data, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape both to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value
cv::Vec3f *p = data.ptr<cv::Vec3f>();
for (size_t i = 0; i < data.rows; i++)
{
int center_id = labels.at<int>(i);
p[i] = centers.at<cv::Vec3f>(center_id);
}
// back to 2D image
data = data.reshape(3, img.rows);
// optional conversion to uchar
data.convertTo(result, CV_8U);
The result is here.
But, if you want instead to apply a masked k-means :
int nbWhitePixels = cv::countNonZero(mask);
cv::Mat dataMasked = cv::Mat(nbWhitePixels, 3, CV_32F, cv::Scalar(0));
cv::Mat maskFlatten = mask.reshape(1, mask.total());
// filter data by the mask
int idx = 0;
for (int k = 0; k < mask.total(); k++)
{
int val = maskFlatten.at<uchar>(k, 0);
if (val != 0)
{
float val0 = data.at<float>(k, 0);
float val1 = data.at<float>(k, 1);
float val2 = data.at<float>(k, 2);
dataMasked.at<float>(idx,0) = val0;
dataMasked.at<float>(idx,1) = val1;
dataMasked.at<float>(idx,2) = val2;
idx++;
}
}
// apply k-means
cv::kmeans(dataMasked, nbClasses, labels, myCriteria, 3, cv::KMEANS_PP_CENTERS, centers);
// reshape to a single column of Vec3f pixels
centers = centers.reshape(3, centers.rows);
dataMasked = dataMasked.reshape(3, dataMasked.rows);
data = data.reshape(3, data.rows);
// replace pixel values with their center value, only for pixels in mask
cv::Vec3f *p = data.ptr<cv::Vec3f>();
idx = 0;
for (size_t i = 0; i < data.rows; i++)
{
if (maskFlatten.at<uchar>(i, 0) != 0)
{
int center_id = labels.at<int>(idx);
p[i] = centers.at<cv::Vec3f>(center_id);
idx++;
}
//else
// p[i] = cv::Vec3f(0, 0, 0);
}
// back to 2d, and uchar
data = data.reshape(3, img.rows);
data.convertTo(result, CV_8U);
You will have now this result.
If you let commented the else part, you will keep initial pixels outside the mask, whereas if you uncomment it, you will convert them into black pixels, like here.

efficient cropping calculation in processing

I am loading a png in processing. This png has a lot of unused pixels around the actual image. Luckily all those pixels are completely transparent. My goal is to crop the png to only show the image and get rid of the unused pixels. The first step would be to calculate the bounds of the image. Initially i wanted to check every pixel for alpha value and see if that pixel is the highest or lowest coordinate for bounds. like this:
------
------
--->oo
oooooo
oooooo
Then i realized i only needed to do this until the first non-alpha pixel and repeat it backwards for highest coordinate bound. Like this:
------
-->ooo
oooooo
ooo<--
------
This would mean less calculating for the same result. However the code i got out of it still seems to be very complex. Here it is:
class Rect { //class for storing the boundries
int xMin, xMax, yMin, yMax;
Rect() {
}
}
PImage gfx;
void setup() {
size(800, 600);
gfx = loadImage("resources/test.png");
Rect _bounds = calcBounds(); //first calculate the boundries
cropImage(_bounds); //then crop the image using those boundries
}
void draw() {
}
Rect calcBounds() {
Rect _bounds = new Rect();
boolean _coordFound = false;
gfx.loadPixels();
//x min bounds
for (int i = 0; i < gfx.width; i++) { //rows
for (int i2 = 0; i2 < gfx.height; i2++) { //columns
if (alpha(gfx.pixels[(gfx.width * i2) + i]) != 0) {
_bounds.xMin = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//x max bounds
_coordFound = false;
for (int i = gfx.width - 1; i >= 0; i--) { //rows
for (int i2 = gfx.height - 1; i2 >= 0; i2--) { //columns
if (alpha(gfx.pixels[(gfx.width * i2) + i]) != 0) {
_bounds.xMax = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//y min bounds
_coordFound = false;
for (int i = 0; i < gfx.height; i++) { //columns
for (int i2 = 0; i2 < gfx.width; i2++) { //rows
if (alpha(gfx.pixels[(gfx.width * i) + i2]) != 0) {
_bounds.yMin = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//y max bounds
_coordFound = false;
for (int i = gfx.height - 1; i >= 0; i--) { //columns
for (int i2 = gfx.width -1; i2 >= 0; i2--) { //rows
if (alpha(gfx.pixels[(gfx.width * i) + i2]) != 0) {
_bounds.yMax = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
return _bounds;
}
void cropImage(Rect _bounds) {
PImage _temp = createImage((_bounds.xMax - _bounds.xMin) + 1, (_bounds.yMax - _bounds.yMin) + 1, ARGB);
_temp.copy(gfx, _bounds.xMin, _bounds.yMin, (_bounds.xMax - _bounds.xMin) + 1, (_bounds.yMax - _bounds.yMin)+ 1, 0, 0, _temp.width, _temp.height);
gfx = _temp; //now the image is cropped
}
Isnt there a more efficient/faster way to calculate the bounds of the image?
And i do still want the boundries coordinates afterward instead of just cutting away at the image during calculation.
If you store the last completely empty line found for e.g. the horizontal minimum and maximum scan in a variable, you can use that to constrain your vertical scanning to only the area that has not yet been checked for being empty, instead of having to scan full columns. Depending on the amount and shape of the croppable area that can save you quite a bit - See the schematic for a visual explanation of the modified algorithm:
By the way, in your //x min bounds scan you seem to be iterating over the width in both for loops, should be height in one though? (unless your images are all square of course :))

Resources