Check collision in a bubble grid - ios

I am new in game development. I am creating a bubble shooting game in x-code using cocos2dx.
All things are working properly but sometimes the collision does not work. The playerBuble does not collide in the last line, it collides with the last one. Where’s the problem in my code?
bool collided = false;
int collidedX = 0;
int collidedY = 0;
CCPoint playerBubblePosition = playerBubble->getPosition();
// Breadth first Search through the array starting from the bottom left
for (int y = GRID_HEIGHT-1; y >= 0; y--)
{
if (collided)
break;
int maxWidth = GRID_WIDTH;
if (y % 2 != 0) // odd rows have GRID_WIDTH -1 bubbles
{
maxWidth -= 1;
}
for (int x = 0; x < maxWidth; x++)
{
Bubble* bubble = m_pBubbles[x][y];
//
if ((y != 0 &&( bubble->getType() == EMPTY) || bubble->getFalling())
continue;
if ((ccpDistance(bubble->getPosition(), (playerBubblePosition)) < (BUBBLE_RADIUS))
{
collidedX = x;
collidedY = y;
collided = true;
break;
}
}
}

Related

Looking for an more efficient way to create a 4 directional linked-list

I am trying to create a method that will link all Cell objects set up in a 2D array named CellGrid[,]
My question is: Since most of the code in SetDirection() is so similar, it seem there is a better way to achieve my goal.
(Side note: This is functional but the execution feels "off" )
private void SetDirection()
{
int x = 0, y = 0;
for ( int i = 0 ; i < Size * (Size - 1);)//setting all UP pointers
{
if ( i == 0 ) { x = 0; y = 1;}//initial setup
for ( x = 0 ; x < Size ; x++ )
{
CellGrid[x,y].SetPointer(CellGrid[x,y-1] , Direction.Up );
i++;
}
y++;
}
for ( int i = 0 ; i < Size * (Size - 1);) //setting all DOWN pointers
{
if ( i == 0 ) { x = 0; y = 0;}//initial setup
for ( x = 0 ; x < Size ; x++ )
{
CellGrid[x,y].SetPointer(CellGrid[x,y+1], Direction.Down);
i++;
}
y++;
}
for ( int i = 0 ; i < Size * (Size - 1);)//setting all LEFT pointers
{
if ( i == 0 ) { x = 1; y = 0;}//initial setup
for ( y = 0 ; y < Size ; y++ )
{
CellGrid[x, y].SetPointer( CellGrid[x-1,y], Direction.Left);
i++;
}
x++;
}
for ( int i = 0 ; i < Size * (Size - 1);) //setting all RIGHT pointers
{
if ( i == 0 ) { x = 0; y = 0;}//initial setup
for ( y = 0 ; y < Size ; y++ )
{
CellGrid[x, y].SetPointer( CellGrid[x+1,y], Direction.Right);
i++;
}
x++;
}
}
public void SetPointer( Cell cellRef ,GridBuilder.Direction dir)
{
switch ( dir )
{
case GridBuilder.Direction.Up:
this.Up = cellRef;
break;
case GridBuilder.Direction.Down:
this.Down = cellRef;
break;
case GridBuilder.Direction.Left:
this.Left = cellRef;
break;
case GridBuilder.Direction.Right:
this.Right = cellRef;
break;
}
}
You can indeed use one set of loops to make the links in all four directions. This is based on two ideas:
When setting a link, immediately set the link in the opposite direction between the same two cells.
When setting a link, immediately set the link between the two cells that are located in the mirrored positions -- mirrored by the main diagonal (x <--> y).
private void SetDirection() {
for (int i = 1; i < Size; i++) {
for (int j = 0; j < Size; j++) {
CellGrid[i, j].SetPointer(CellGrid[i-1, j], Direction.Left);
CellGrid[i-1, j].SetPointer(CellGrid[i, j], Direction.Right);
CellGrid[j, i].SetPointer(CellGrid[j, i-1], Direction.Up);
CellGrid[j, i-1].SetPointer(CellGrid[j, i], Direction.Down);
}
}
}

Network Delay Problem - Complexity Analysis

Below is a solution Network delay problem of leetcode. I have written a all test case success solution. But not able to analyse the time complexity. I believe its O(V^2 + E) where V is the number of nodes and E edges.
In this solution though I am adding all adjacents of each node every time, but not processing them further if there exists a min distance for that node already.
Leetcode question link https://leetcode.com/problems/network-delay-time
public int networkDelayTime(int[][] times, int n, int k) {
int[] distances = new int[n+1];
Arrays.fill(distances , -1);
if(n > 0){
List<List<int[]>> edges = new ArrayList<List<int[]>>();
for(int i = 0 ; i <= n ; i++){
edges.add(new ArrayList<int[]>());
}
for(int[] time : times){
edges.get(time[0]).add(new int[]{time[1] , time[2]});
}
Queue<Vertex> queue = new LinkedList<>();
queue.add(new Vertex(k , 0));
while(!queue.isEmpty()){
Vertex cx = queue.poll();
int index = cx.index;
int distance = cx.distance;
//process adjacents only if distance is updated
if(distances[index] == -1 || distances[index] > distance){
distances[index] = distance;
List<int[]> adjacents = edges.get(index);
for(int[] adjacent : adjacents){
queue.add(new Vertex(adjacent[0] , adjacent[1]+distance));
}
}
}
}
int sum = 0;
for(int i = 1 ; i <= n; i++){
int distance = distances[i];
if(distance == -1){
return -1;
}
sum = Math.max(sum , distance);
}
return sum;
}
public static class Vertex{
int index;
int distance;
public Vertex(int i , int d){
index = i;
distance = d;
}
}
You should use PriorityQueue instead of LinkedList

Computing gradient orientation in c++ using opencv functions

Can anyone help me out with this?
I am trying to calculate gradient orientation using the Sobel operator in OpenCV for gradient in x and y direction. I am using the atan2 function for computing the tangent in radians, which I later convert to degrees, but all the angles I am getting are between 0 and 90 degrees.
My expectation is to get angles between 0 and 360 degrees. The image I am using is grayscale. The code segment is here below.
Mat PeripheralArea;
Mat grad_x, grad_y; // this is the matrix for the gradients in x and y directions
int off_set_y = 0, off_set_x = 0;
int scale = 1, num_bins = 8, bin = 0;
int delta=-1 ;
int ddepth = CV_16S;
GaussianBlur(PeripheralArea, PeripheralArea, Size(3, 3), 0, 0, BORDER_DEFAULT);
Sobel(PeripheralArea, grad_y, ddepth, 0, 1,3,scale, delta, BORDER_DEFAULT);
Sobel(PeripheralArea, grad_x, ddepth, 1, 0,3, scale, delta, BORDER_DEFAULT);
for (int row_y1 = 0, row_y2 = 0; row_y1 < grad_y.rows / 5, row_y2 < grad_x.rows / 5; row_y1++, row_y2++) {
for (int col_x1 = 0, col_x2 = 0; col_x1 < grad_y.cols / 5, col_x2 < grad_x.cols / 5; col_x1++, col_x2++) {
gradient_direction_radians = (double) atan2((double) grad_y.at<uchar>(row_y1 + off_set_y, col_x1 + off_set_x), (double) grad_x.at<uchar>(row_y2 + off_set_y, col_x2 + off_set_x));
gradient_direction_degrees = (int) (180 * gradient_direction_radians / 3.1415);
gradient_direction_degrees = gradient_direction_degrees < 0
? gradient_direction_degrees+360
: gradient_direction_degrees;
}
}
Note the off_set_x and off_set_y variable are not part of the computation
but to offset to different square blocks for which I eventually want to
compute an histogram feature vector
You have specified that the destination depth of Sobel() is CV_16S.
Yet, when you access grad_x and grad_y, you use .at<uchar>(), implying that their elements are 8 bit unsigned quantities, when in fact they are 16 bit signed. You could use .at<short>() instead, but to me it looks like there a number of issues with your code, not the least of which is that there is an OpenCV function that does exactly what you want.
Use cv::phase(), and replace your for loops with
cv::Mat gradient_angle_degrees;
bool angleInDegrees = true;
cv::phase(grad_x, grad_y, gradient_angle_degrees, angleInDegrees);
I solved this need when I dived into doing some edge detection using C++.
For orientation of gradient I use artan2(), this standard API defines its +y and +x same as how we usually traverse a 2D image.
Plot it to show you my understanding.
///////////////////////////////
// Quadrants of image:
// 3(-dx,-dy) | 4(+dx,-dy) [-pi,0]
// ------------------------->+x
// 2(-dx,+dy) | 1(+dx,+dy) [0,pi]
// v
// +y
///////////////////////////////
// Definition of arctan2():
// -135(-dx,-dy) | -45(+dx,-dy)
// ------------------------->+x
// 135(-dx,+dy) | +45(+dx,+dy)
// v
// +y
///////////////////////////////
How I do for gradient:
bool gradient(double*& magnitude, double*& orientation, double* src, int width, int height, string file) {
if (src == NULL)
return false;
if (width <= 0 || height <= 0)
return false;
double gradient_x_correlation[3*3] = {-0.5, 0.0, 0.5,
-0.5, 0.0, 0.5,
-0.5, 0.0, 0.5};
double gradient_y_correlation[3*3] = {-0.5,-0.5,-0.5,
0.0, 0.0, 0.0,
0.5, 0.5, 0.5};
double *Gx = NULL;
double *Gy = NULL;
this->correlation(Gx, src, gradient_x_correlation, width, height, 3);
this->correlation(Gy, src, gradient_y_correlation, width, height, 3);
if (Gx == NULL || Gy == NULL)
return false;
//magnitude
magnitude = new double[sizeof(double)*width*height];
if (magnitude == NULL)
return false;
memset(magnitude, 0, sizeof(double)*width*height);
double gx = 0.0;
double gy = 0.0;
double gm = 0.0;
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
gx = pow(Gx[i+j*width],2);
gy = pow(Gy[i+j*width],2);
gm = sqrt(pow(Gx[i+j*width],2)+pow(Gy[i+j*width],2));
if (gm >= 255.0) {
return false;
}
magnitude[i+j*width] = gm;
}
}
//orientation
orientation = new double[sizeof(double)*width*height];
if (orientation == NULL)
return false;
memset(orientation, 0, sizeof(double)*width*height);
double ori = 0.0;
double dtmp = 0.0;
double ori_normalized = 0.0;
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
gx = (Gx[i+j*width]);
gy = (Gy[i+j*width]);
ori = atan2(Gy[i+j*width], Gx[i+j*width])/PI*(180.0); //[-pi,+pi]
if (gx >= 0 && gy >= 0) { //[Qudrant 1]:[0,90] to be [0,63]
if (ori < 0) {
printf("[Err1QUA]ori:%.1f\n", ori);
return false;
}
ori_normalized = (ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 1]orientation: %.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx >= 0 && gy < 0) { //[Qudrant 4]:[270,360) equal to [-90, 0) to be [191,255]
if (ori > 0) {
printf("[Err4QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (360.0+ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 4]orientation:%.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx < 0 && gy >= 0) { //[Qudrant 2]:(90,180] to be [64,127]
if (ori < 0) {
printf("[Err2QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 2]orientation: %.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx < 0 && gy < 0) { //[Qudrant 3]:(180,270) equal to (-180, -90) to be [128,190]
if (ori > 0) {
printf("[Err3QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (360.0+ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 3]orientation:%.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else {
printf("[EXCEPTION]orientation:%.1f\n", ori);
return false;
}
orientation[i+j*width] = ori_normalized;
}
}
return true;
}
How I do for cross correlation:
bool correlation(double*& dst, double* src, double* kernel, int width, int height, int window) {
if (src == NULL || kernel == NULL)
return false;
if (width <= 0 || height <= 0 || width < window || height < window )
return false;
dst = new double[sizeof(double)*width*height];
if (dst == NULL)
return false;
memset(dst, 0, sizeof(double)*width*height);
int ii = 0;
int jj = 0;
int nn = 0;
int mm = 0;
double max = std::numeric_limits<double>::min();
double min = std::numeric_limits<double>::max();
double range = std::numeric_limits<double>::max();
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
for (int m=0; m<window; m++) {
for (int n=0; n<window; n++) {
ii = i+(n-window/2);
jj = j+(m-window/2);
nn = n;
mm = m;
if (ii >=0 && ii<width && jj>=0 && jj<height) {
dst[i+j*width] += src[ii+jj*width]*kernel[nn+mm*window];
}
else {
dst[i+j*width] += 0;
}
}
}
if (dst[i+j*width] > max)
max = dst[i+j*width];
else if (dst[i+j*width] < min)
min = dst[i+j*width];
}
}
//normalize double matrix to be an uint8_t matrix
range = max - min;
double norm = 0.0;
printf("correlated matrix max:%.1f, min:%.1f, range:%.1f\n", max, min, range);
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
norm = dst[i+j*width];
norm = 255.0*norm/range;
dst[i+j*width] = norm;
}
}
return true;
}
For me, I use an image like a hollow rectangle, you can download it on my sample.
The orientation of gradient of the hollow rectangle part of my sample image would move from 0 to 360 clockwise (Quadrant 1 to 2 to 3 to 4).
Here is my print which describes the trace of orientation:
[Qudrant 1]orientation: 45.0 to be 31.9(31)
[Qudrant 1]orientation: 90.0 to be 63.8(63)
[Qudrant 2]orientation: 135.0 to be 95.6(95)
[Qudrant 2]orientation: 180.0 to be 127.5(127)
[Qudrant 3]orientation:-135.0 to be 159.4(159)
[Qudrant 3]orientation:-116.6 to be 172.4(172)
[Qudrant 4]orientation:-90.0 to be 191.2(191)
[Qudrant 4]orientation:-63.4 to be 210.1(210)
[Qudrant 4]orientation:-45.0 to be 223.1(223)
You can see more source code about digital image processing on my GitHub :)

Cropping panorama image in OpenCV

I'm trying to find a simple algorithm to crop (remove the black areas) of a panorama image created with the openCV Stitcher module.
My idea is to calculate the most inner black points in the image which will define the cropping area, as shown in the next image:
Expected cropped result:
I've tried the next two approaches, but they don't crop the image as expected:
First Approach:
void testCropA(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
Size size = gray.size();
int type = gray.type();
int left = 0, top = 0, right = size.width, bottom = size.height;
cv::Mat row_zeros = Mat::zeros(1, right, type);
cv::Mat col_zeros = Mat::zeros(bottom, 1, type);
while (countNonZero(gray.row(top) != row_zeros) == 0) { top++; }
while (countNonZero(gray.col(left) != col_zeros) == 0) { left++; }
while (countNonZero(gray.row(bottom-1) != row_zeros) == 0) { bottom--; }
while (countNonZero(gray.col(right-1) != col_zeros) == 0) { right--; }
cv::Rect cropRect(left, top, right - left, bottom - top);
image = image(cropRect);
}
Second Approach:
void testCropB(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
int minCol = gray.cols;
int minRow = gray.rows;
int maxCol = 0;
int maxRow = 0;
for (int i = 0; i < gray.rows - 3; i++)
{
for (int j = 0; j < gray.cols; j++)
{
if (gray.at<char>(i, j) != 0)
{
if (i < minRow) {minRow = i;}
if (j < minCol) {minCol = j;}
if (i > maxRow) {maxRow = i;}
if (j > maxCol) {maxCol = j;}
}
}
}
cv::Rect cropRect = Rect(minCol, minRow, maxCol - minCol, maxRow - minRow);
image = image(cropRect);
}
This is my current solution. Hope it helps to others:
bool checkInteriorExterior(const cv::Mat &mask, const cv::Rect &croppingMask,
int &top, int &bottom, int &left, int &right)
{
// Return true if the rectangle is fine as it is
bool result = true;
cv::Mat sub = mask(croppingMask);
int x = 0;
int y = 0;
// Count how many exterior pixels are, and choose that side for
// reduction where mose exterior pixels occurred (that's the heuristic)
int top_row = 0;
int bottom_row = 0;
int left_column = 0;
int right_column = 0;
for (y = 0, x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the top side of the rect a bit to the bottom
if (sub.at<char>(y, x) == 0)
{
result = false;
++top_row;
}
}
for (y = (sub.rows - 1), x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the bottom side of the rect a bit to the top
if (sub.at<char>(y, x) == 0)
{
result = false;
++bottom_row;
}
}
for (y = 0, x = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++left_column;
}
}
for (x = (sub.cols - 1), y = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++right_column;
}
}
// The idea is to set `top = 1` if it's better to reduce
// the rect at the top than anywhere else.
if (top_row > bottom_row)
{
if (top_row > left_column)
{
if (top_row > right_column)
{
top = 1;
}
}
}
else if (bottom_row > left_column)
{
if (bottom_row > right_column)
{
bottom = 1;
}
}
if (left_column >= right_column)
{
if (left_column >= bottom_row)
{
if (left_column >= top_row)
{
left = 1;
}
}
}
else if (right_column >= top_row)
{
if (right_column >= bottom_row)
{
right = 1;
}
}
return result;
}
bool compareX(cv::Point a, cv::Point b)
{
return a.x < b.x;
}
bool compareY(cv::Point a, cv::Point b)
{
return a.y < b.y;
}
void crop(cv::Mat &source)
{
cv::Mat gray;
source.convertTo(source, CV_8U);
cvtColor(source, gray, cv::COLOR_RGB2GRAY);
// Extract all the black background (and some interior parts maybe)
cv::Mat mask = gray > 0;
// now extract the outer contour
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cv::Point(0, 0));
cv::Mat contourImage = cv::Mat::zeros(source.size(), CV_8UC3);;
// Find contour with max elements
int maxSize = 0;
int id = 0;
for (int i = 0; i < contours.size(); ++i)
{
if (contours.at((unsigned long)i).size() > maxSize)
{
maxSize = (int)contours.at((unsigned long)i).size();
id = i;
}
}
// Draw filled contour to obtain a mask with interior parts
cv::Mat contourMask = cv::Mat::zeros(source.size(), CV_8UC1);
drawContours(contourMask, contours, id, cv::Scalar(255), -1, 8, hierarchy, 0, cv::Point());
// Sort contour in x/y directions to easily find min/max and next
std::vector<cv::Point> cSortedX = contours.at((unsigned long)id);
std::sort(cSortedX.begin(), cSortedX.end(), compareX);
std::vector<cv::Point> cSortedY = contours.at((unsigned long)id);
std::sort(cSortedY.begin(), cSortedY.end(), compareY);
int minXId = 0;
int maxXId = (int)(cSortedX.size() - 1);
int minYId = 0;
int maxYId = (int)(cSortedY.size() - 1);
cv::Rect croppingMask;
while ((minXId < maxXId) && (minYId < maxYId))
{
cv::Point min(cSortedX[minXId].x, cSortedY[minYId].y);
cv::Point max(cSortedX[maxXId].x, cSortedY[maxYId].y);
croppingMask = cv::Rect(min.x, min.y, max.x - min.x, max.y - min.y);
// Out-codes: if one of them is set, the rectangle size has to be reduced at that border
int ocTop = 0;
int ocBottom = 0;
int ocLeft = 0;
int ocRight = 0;
bool finished = checkInteriorExterior(contourMask, croppingMask, ocTop, ocBottom, ocLeft, ocRight);
if (finished == true)
{
break;
}
// Reduce rectangle at border if necessary
if (ocLeft)
{ ++minXId; }
if (ocRight)
{ --maxXId; }
if (ocTop)
{ ++minYId; }
if (ocBottom)
{ --maxYId; }
}
// Crop image with created mask
source = source(croppingMask);
}
I never used the stitcher calss, but I think that you may get the estimated homography matrix at each pair of images, if you could obtain it easily, then you can multiply it with the corners of the first original image and so for the corner of the last original one, you will get their stitched coordinate, then get the min of left and right x-coordinates and min of up and bottom y-coordinates of each images. You may get the coordinates of of each stitched image, what you need to do in some cases of cropping.

efficient cropping calculation in processing

I am loading a png in processing. This png has a lot of unused pixels around the actual image. Luckily all those pixels are completely transparent. My goal is to crop the png to only show the image and get rid of the unused pixels. The first step would be to calculate the bounds of the image. Initially i wanted to check every pixel for alpha value and see if that pixel is the highest or lowest coordinate for bounds. like this:
------
------
--->oo
oooooo
oooooo
Then i realized i only needed to do this until the first non-alpha pixel and repeat it backwards for highest coordinate bound. Like this:
------
-->ooo
oooooo
ooo<--
------
This would mean less calculating for the same result. However the code i got out of it still seems to be very complex. Here it is:
class Rect { //class for storing the boundries
int xMin, xMax, yMin, yMax;
Rect() {
}
}
PImage gfx;
void setup() {
size(800, 600);
gfx = loadImage("resources/test.png");
Rect _bounds = calcBounds(); //first calculate the boundries
cropImage(_bounds); //then crop the image using those boundries
}
void draw() {
}
Rect calcBounds() {
Rect _bounds = new Rect();
boolean _coordFound = false;
gfx.loadPixels();
//x min bounds
for (int i = 0; i < gfx.width; i++) { //rows
for (int i2 = 0; i2 < gfx.height; i2++) { //columns
if (alpha(gfx.pixels[(gfx.width * i2) + i]) != 0) {
_bounds.xMin = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//x max bounds
_coordFound = false;
for (int i = gfx.width - 1; i >= 0; i--) { //rows
for (int i2 = gfx.height - 1; i2 >= 0; i2--) { //columns
if (alpha(gfx.pixels[(gfx.width * i2) + i]) != 0) {
_bounds.xMax = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//y min bounds
_coordFound = false;
for (int i = 0; i < gfx.height; i++) { //columns
for (int i2 = 0; i2 < gfx.width; i2++) { //rows
if (alpha(gfx.pixels[(gfx.width * i) + i2]) != 0) {
_bounds.yMin = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
//y max bounds
_coordFound = false;
for (int i = gfx.height - 1; i >= 0; i--) { //columns
for (int i2 = gfx.width -1; i2 >= 0; i2--) { //rows
if (alpha(gfx.pixels[(gfx.width * i) + i2]) != 0) {
_bounds.yMax = i;
_coordFound = true;
break;
}
}
if (_coordFound) {
break;
}
}
return _bounds;
}
void cropImage(Rect _bounds) {
PImage _temp = createImage((_bounds.xMax - _bounds.xMin) + 1, (_bounds.yMax - _bounds.yMin) + 1, ARGB);
_temp.copy(gfx, _bounds.xMin, _bounds.yMin, (_bounds.xMax - _bounds.xMin) + 1, (_bounds.yMax - _bounds.yMin)+ 1, 0, 0, _temp.width, _temp.height);
gfx = _temp; //now the image is cropped
}
Isnt there a more efficient/faster way to calculate the bounds of the image?
And i do still want the boundries coordinates afterward instead of just cutting away at the image during calculation.
If you store the last completely empty line found for e.g. the horizontal minimum and maximum scan in a variable, you can use that to constrain your vertical scanning to only the area that has not yet been checked for being empty, instead of having to scan full columns. Depending on the amount and shape of the croppable area that can save you quite a bit - See the schematic for a visual explanation of the modified algorithm:
By the way, in your //x min bounds scan you seem to be iterating over the width in both for loops, should be height in one though? (unless your images are all square of course :))

Resources