Contour segmentation - opencv

I have a contour which consists of curved segments and straigth segments. Is there any possibility to segment the contour into the curved and straigth parts?
So this is an example for a contour
I would like to have a segmentation like this:
Do you have any idea how I could solve such a problem
Thank you very much and best regards

Yes I got the solution with the link #PSchn posted. I just go through the contour points and defined a border. Everything under the border is a "curved segment" everthing else is a straigth segment. Thank you for your help!!
vector<double> getCurvature(vector<Point> const& tContourPoints, int tStepSize)
{
int iplus;
int iminus;
double acurvature;
double adivisor;
Point2f pplus;
Point2f pminus;
// erste Ableitung
Point2f a1stDerivative;
// zweite Ableitung
Point2f a2ndDerivative;
vector< double > rVecCurvature( tContourPoints.size() );
if ((int)tContourPoints.size() < tStepSize)
{
return rVecCurvature;
}
for (int i = 0; i < (int)tContourPoints.size(); i++ )
{
const Point2f& pos = tContourPoints[i];
iminus = i-tStepSize;
iplus = i+tStepSize;
if(iminus < 0)
{
pminus = tContourPoints[iminus + tContourPoints.size()];
}
else
{
pminus = tContourPoints[iminus];
}
if(iplus > (int)tContourPoints.size())
{
pplus = tContourPoints[iplus - (int)tContourPoints.size()];
}
else
{
pplus = tContourPoints[iplus];
}
a1stDerivative.x = (pplus.x - pminus.x) / ( iplus-iminus);
a1stDerivative.y = (pplus.y - pminus.y) / ( iplus-iminus);
a2ndDerivative.x = (pplus.x - 2*pos.x + pminus.x) / ((iplus-iminus)/2*(iplus-iminus)/2);
a2ndDerivative.y = (pplus.y - 2*pos.y + pminus.y) / ((iplus-iminus)/2*(iplus-iminus)/2);
adivisor = a2ndDerivative.x*a2ndDerivative.x + a2ndDerivative.y*a2ndDerivative.y;
if ( abs(adivisor) > 10e-8 )
{
acurvature = abs(a2ndDerivative.y*a1stDerivative.x - a2ndDerivative.x*a1stDerivative.y) / pow(adivisor, 3.0/2.0 ) ;
}
else
{
acurvature = numeric_limits<double>::infinity();
}
rVecCurvature[i] = acurvature;
}
return rVecCurvature;
}
Once I get the curvature I defined a border and went through my contour:
acurvature = getCurvature(aContours_img[0], 50);
if(acurvature.size() > 0)
{
// aSegmentChange =1 --> curved segment
// aSegmentChange =0 --> straigth segment
if( acurvature[0] < aBorder)
{
aSegmentChange = 1;
}
else
{
aSegmentChange = 0;
}
// Kontur segmentieren
for(int i = 0; i < (int)acurvature.size(); i++)
{
aSegments[aSegmentIndex].push_back(aContours_img[0][i]);
aCurveIndex[aSegmentIndex].push_back(aSegmentChange);
if( acurvature[i] < aBorder && aSegmentChange == 0 )
{
aSegmentIndex++;
aSegmentChange = 1;
}
if( acurvature[i] > aBorder && aSegmentChange == 1 )
{
aSegmentIndex++;
aSegmentChange = 0;
}
if(aSegmentIndex >= (int)aSegments.size()-1)
{
aSegments.resize(aSegmentIndex+1);
aCurveIndex.resize(aSegmentIndex+1);
}
}
}

This is my contour
And this is the result of segmentation

Related

Looking for an more efficient way to create a 4 directional linked-list

I am trying to create a method that will link all Cell objects set up in a 2D array named CellGrid[,]
My question is: Since most of the code in SetDirection() is so similar, it seem there is a better way to achieve my goal.
(Side note: This is functional but the execution feels "off" )
private void SetDirection()
{
int x = 0, y = 0;
for ( int i = 0 ; i < Size * (Size - 1);)//setting all UP pointers
{
if ( i == 0 ) { x = 0; y = 1;}//initial setup
for ( x = 0 ; x < Size ; x++ )
{
CellGrid[x,y].SetPointer(CellGrid[x,y-1] , Direction.Up );
i++;
}
y++;
}
for ( int i = 0 ; i < Size * (Size - 1);) //setting all DOWN pointers
{
if ( i == 0 ) { x = 0; y = 0;}//initial setup
for ( x = 0 ; x < Size ; x++ )
{
CellGrid[x,y].SetPointer(CellGrid[x,y+1], Direction.Down);
i++;
}
y++;
}
for ( int i = 0 ; i < Size * (Size - 1);)//setting all LEFT pointers
{
if ( i == 0 ) { x = 1; y = 0;}//initial setup
for ( y = 0 ; y < Size ; y++ )
{
CellGrid[x, y].SetPointer( CellGrid[x-1,y], Direction.Left);
i++;
}
x++;
}
for ( int i = 0 ; i < Size * (Size - 1);) //setting all RIGHT pointers
{
if ( i == 0 ) { x = 0; y = 0;}//initial setup
for ( y = 0 ; y < Size ; y++ )
{
CellGrid[x, y].SetPointer( CellGrid[x+1,y], Direction.Right);
i++;
}
x++;
}
}
public void SetPointer( Cell cellRef ,GridBuilder.Direction dir)
{
switch ( dir )
{
case GridBuilder.Direction.Up:
this.Up = cellRef;
break;
case GridBuilder.Direction.Down:
this.Down = cellRef;
break;
case GridBuilder.Direction.Left:
this.Left = cellRef;
break;
case GridBuilder.Direction.Right:
this.Right = cellRef;
break;
}
}
You can indeed use one set of loops to make the links in all four directions. This is based on two ideas:
When setting a link, immediately set the link in the opposite direction between the same two cells.
When setting a link, immediately set the link between the two cells that are located in the mirrored positions -- mirrored by the main diagonal (x <--> y).
private void SetDirection() {
for (int i = 1; i < Size; i++) {
for (int j = 0; j < Size; j++) {
CellGrid[i, j].SetPointer(CellGrid[i-1, j], Direction.Left);
CellGrid[i-1, j].SetPointer(CellGrid[i, j], Direction.Right);
CellGrid[j, i].SetPointer(CellGrid[j, i-1], Direction.Up);
CellGrid[j, i-1].SetPointer(CellGrid[j, i], Direction.Down);
}
}
}

Computing gradient orientation in c++ using opencv functions

Can anyone help me out with this?
I am trying to calculate gradient orientation using the Sobel operator in OpenCV for gradient in x and y direction. I am using the atan2 function for computing the tangent in radians, which I later convert to degrees, but all the angles I am getting are between 0 and 90 degrees.
My expectation is to get angles between 0 and 360 degrees. The image I am using is grayscale. The code segment is here below.
Mat PeripheralArea;
Mat grad_x, grad_y; // this is the matrix for the gradients in x and y directions
int off_set_y = 0, off_set_x = 0;
int scale = 1, num_bins = 8, bin = 0;
int delta=-1 ;
int ddepth = CV_16S;
GaussianBlur(PeripheralArea, PeripheralArea, Size(3, 3), 0, 0, BORDER_DEFAULT);
Sobel(PeripheralArea, grad_y, ddepth, 0, 1,3,scale, delta, BORDER_DEFAULT);
Sobel(PeripheralArea, grad_x, ddepth, 1, 0,3, scale, delta, BORDER_DEFAULT);
for (int row_y1 = 0, row_y2 = 0; row_y1 < grad_y.rows / 5, row_y2 < grad_x.rows / 5; row_y1++, row_y2++) {
for (int col_x1 = 0, col_x2 = 0; col_x1 < grad_y.cols / 5, col_x2 < grad_x.cols / 5; col_x1++, col_x2++) {
gradient_direction_radians = (double) atan2((double) grad_y.at<uchar>(row_y1 + off_set_y, col_x1 + off_set_x), (double) grad_x.at<uchar>(row_y2 + off_set_y, col_x2 + off_set_x));
gradient_direction_degrees = (int) (180 * gradient_direction_radians / 3.1415);
gradient_direction_degrees = gradient_direction_degrees < 0
? gradient_direction_degrees+360
: gradient_direction_degrees;
}
}
Note the off_set_x and off_set_y variable are not part of the computation
but to offset to different square blocks for which I eventually want to
compute an histogram feature vector
You have specified that the destination depth of Sobel() is CV_16S.
Yet, when you access grad_x and grad_y, you use .at<uchar>(), implying that their elements are 8 bit unsigned quantities, when in fact they are 16 bit signed. You could use .at<short>() instead, but to me it looks like there a number of issues with your code, not the least of which is that there is an OpenCV function that does exactly what you want.
Use cv::phase(), and replace your for loops with
cv::Mat gradient_angle_degrees;
bool angleInDegrees = true;
cv::phase(grad_x, grad_y, gradient_angle_degrees, angleInDegrees);
I solved this need when I dived into doing some edge detection using C++.
For orientation of gradient I use artan2(), this standard API defines its +y and +x same as how we usually traverse a 2D image.
Plot it to show you my understanding.
///////////////////////////////
// Quadrants of image:
// 3(-dx,-dy) | 4(+dx,-dy) [-pi,0]
// ------------------------->+x
// 2(-dx,+dy) | 1(+dx,+dy) [0,pi]
// v
// +y
///////////////////////////////
// Definition of arctan2():
// -135(-dx,-dy) | -45(+dx,-dy)
// ------------------------->+x
// 135(-dx,+dy) | +45(+dx,+dy)
// v
// +y
///////////////////////////////
How I do for gradient:
bool gradient(double*& magnitude, double*& orientation, double* src, int width, int height, string file) {
if (src == NULL)
return false;
if (width <= 0 || height <= 0)
return false;
double gradient_x_correlation[3*3] = {-0.5, 0.0, 0.5,
-0.5, 0.0, 0.5,
-0.5, 0.0, 0.5};
double gradient_y_correlation[3*3] = {-0.5,-0.5,-0.5,
0.0, 0.0, 0.0,
0.5, 0.5, 0.5};
double *Gx = NULL;
double *Gy = NULL;
this->correlation(Gx, src, gradient_x_correlation, width, height, 3);
this->correlation(Gy, src, gradient_y_correlation, width, height, 3);
if (Gx == NULL || Gy == NULL)
return false;
//magnitude
magnitude = new double[sizeof(double)*width*height];
if (magnitude == NULL)
return false;
memset(magnitude, 0, sizeof(double)*width*height);
double gx = 0.0;
double gy = 0.0;
double gm = 0.0;
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
gx = pow(Gx[i+j*width],2);
gy = pow(Gy[i+j*width],2);
gm = sqrt(pow(Gx[i+j*width],2)+pow(Gy[i+j*width],2));
if (gm >= 255.0) {
return false;
}
magnitude[i+j*width] = gm;
}
}
//orientation
orientation = new double[sizeof(double)*width*height];
if (orientation == NULL)
return false;
memset(orientation, 0, sizeof(double)*width*height);
double ori = 0.0;
double dtmp = 0.0;
double ori_normalized = 0.0;
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
gx = (Gx[i+j*width]);
gy = (Gy[i+j*width]);
ori = atan2(Gy[i+j*width], Gx[i+j*width])/PI*(180.0); //[-pi,+pi]
if (gx >= 0 && gy >= 0) { //[Qudrant 1]:[0,90] to be [0,63]
if (ori < 0) {
printf("[Err1QUA]ori:%.1f\n", ori);
return false;
}
ori_normalized = (ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 1]orientation: %.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx >= 0 && gy < 0) { //[Qudrant 4]:[270,360) equal to [-90, 0) to be [191,255]
if (ori > 0) {
printf("[Err4QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (360.0+ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 4]orientation:%.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx < 0 && gy >= 0) { //[Qudrant 2]:(90,180] to be [64,127]
if (ori < 0) {
printf("[Err2QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 2]orientation: %.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx < 0 && gy < 0) { //[Qudrant 3]:(180,270) equal to (-180, -90) to be [128,190]
if (ori > 0) {
printf("[Err3QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (360.0+ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 3]orientation:%.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else {
printf("[EXCEPTION]orientation:%.1f\n", ori);
return false;
}
orientation[i+j*width] = ori_normalized;
}
}
return true;
}
How I do for cross correlation:
bool correlation(double*& dst, double* src, double* kernel, int width, int height, int window) {
if (src == NULL || kernel == NULL)
return false;
if (width <= 0 || height <= 0 || width < window || height < window )
return false;
dst = new double[sizeof(double)*width*height];
if (dst == NULL)
return false;
memset(dst, 0, sizeof(double)*width*height);
int ii = 0;
int jj = 0;
int nn = 0;
int mm = 0;
double max = std::numeric_limits<double>::min();
double min = std::numeric_limits<double>::max();
double range = std::numeric_limits<double>::max();
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
for (int m=0; m<window; m++) {
for (int n=0; n<window; n++) {
ii = i+(n-window/2);
jj = j+(m-window/2);
nn = n;
mm = m;
if (ii >=0 && ii<width && jj>=0 && jj<height) {
dst[i+j*width] += src[ii+jj*width]*kernel[nn+mm*window];
}
else {
dst[i+j*width] += 0;
}
}
}
if (dst[i+j*width] > max)
max = dst[i+j*width];
else if (dst[i+j*width] < min)
min = dst[i+j*width];
}
}
//normalize double matrix to be an uint8_t matrix
range = max - min;
double norm = 0.0;
printf("correlated matrix max:%.1f, min:%.1f, range:%.1f\n", max, min, range);
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
norm = dst[i+j*width];
norm = 255.0*norm/range;
dst[i+j*width] = norm;
}
}
return true;
}
For me, I use an image like a hollow rectangle, you can download it on my sample.
The orientation of gradient of the hollow rectangle part of my sample image would move from 0 to 360 clockwise (Quadrant 1 to 2 to 3 to 4).
Here is my print which describes the trace of orientation:
[Qudrant 1]orientation: 45.0 to be 31.9(31)
[Qudrant 1]orientation: 90.0 to be 63.8(63)
[Qudrant 2]orientation: 135.0 to be 95.6(95)
[Qudrant 2]orientation: 180.0 to be 127.5(127)
[Qudrant 3]orientation:-135.0 to be 159.4(159)
[Qudrant 3]orientation:-116.6 to be 172.4(172)
[Qudrant 4]orientation:-90.0 to be 191.2(191)
[Qudrant 4]orientation:-63.4 to be 210.1(210)
[Qudrant 4]orientation:-45.0 to be 223.1(223)
You can see more source code about digital image processing on my GitHub :)

Cropping panorama image in OpenCV

I'm trying to find a simple algorithm to crop (remove the black areas) of a panorama image created with the openCV Stitcher module.
My idea is to calculate the most inner black points in the image which will define the cropping area, as shown in the next image:
Expected cropped result:
I've tried the next two approaches, but they don't crop the image as expected:
First Approach:
void testCropA(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
Size size = gray.size();
int type = gray.type();
int left = 0, top = 0, right = size.width, bottom = size.height;
cv::Mat row_zeros = Mat::zeros(1, right, type);
cv::Mat col_zeros = Mat::zeros(bottom, 1, type);
while (countNonZero(gray.row(top) != row_zeros) == 0) { top++; }
while (countNonZero(gray.col(left) != col_zeros) == 0) { left++; }
while (countNonZero(gray.row(bottom-1) != row_zeros) == 0) { bottom--; }
while (countNonZero(gray.col(right-1) != col_zeros) == 0) { right--; }
cv::Rect cropRect(left, top, right - left, bottom - top);
image = image(cropRect);
}
Second Approach:
void testCropB(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
int minCol = gray.cols;
int minRow = gray.rows;
int maxCol = 0;
int maxRow = 0;
for (int i = 0; i < gray.rows - 3; i++)
{
for (int j = 0; j < gray.cols; j++)
{
if (gray.at<char>(i, j) != 0)
{
if (i < minRow) {minRow = i;}
if (j < minCol) {minCol = j;}
if (i > maxRow) {maxRow = i;}
if (j > maxCol) {maxCol = j;}
}
}
}
cv::Rect cropRect = Rect(minCol, minRow, maxCol - minCol, maxRow - minRow);
image = image(cropRect);
}
This is my current solution. Hope it helps to others:
bool checkInteriorExterior(const cv::Mat &mask, const cv::Rect &croppingMask,
int &top, int &bottom, int &left, int &right)
{
// Return true if the rectangle is fine as it is
bool result = true;
cv::Mat sub = mask(croppingMask);
int x = 0;
int y = 0;
// Count how many exterior pixels are, and choose that side for
// reduction where mose exterior pixels occurred (that's the heuristic)
int top_row = 0;
int bottom_row = 0;
int left_column = 0;
int right_column = 0;
for (y = 0, x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the top side of the rect a bit to the bottom
if (sub.at<char>(y, x) == 0)
{
result = false;
++top_row;
}
}
for (y = (sub.rows - 1), x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the bottom side of the rect a bit to the top
if (sub.at<char>(y, x) == 0)
{
result = false;
++bottom_row;
}
}
for (y = 0, x = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++left_column;
}
}
for (x = (sub.cols - 1), y = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++right_column;
}
}
// The idea is to set `top = 1` if it's better to reduce
// the rect at the top than anywhere else.
if (top_row > bottom_row)
{
if (top_row > left_column)
{
if (top_row > right_column)
{
top = 1;
}
}
}
else if (bottom_row > left_column)
{
if (bottom_row > right_column)
{
bottom = 1;
}
}
if (left_column >= right_column)
{
if (left_column >= bottom_row)
{
if (left_column >= top_row)
{
left = 1;
}
}
}
else if (right_column >= top_row)
{
if (right_column >= bottom_row)
{
right = 1;
}
}
return result;
}
bool compareX(cv::Point a, cv::Point b)
{
return a.x < b.x;
}
bool compareY(cv::Point a, cv::Point b)
{
return a.y < b.y;
}
void crop(cv::Mat &source)
{
cv::Mat gray;
source.convertTo(source, CV_8U);
cvtColor(source, gray, cv::COLOR_RGB2GRAY);
// Extract all the black background (and some interior parts maybe)
cv::Mat mask = gray > 0;
// now extract the outer contour
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cv::Point(0, 0));
cv::Mat contourImage = cv::Mat::zeros(source.size(), CV_8UC3);;
// Find contour with max elements
int maxSize = 0;
int id = 0;
for (int i = 0; i < contours.size(); ++i)
{
if (contours.at((unsigned long)i).size() > maxSize)
{
maxSize = (int)contours.at((unsigned long)i).size();
id = i;
}
}
// Draw filled contour to obtain a mask with interior parts
cv::Mat contourMask = cv::Mat::zeros(source.size(), CV_8UC1);
drawContours(contourMask, contours, id, cv::Scalar(255), -1, 8, hierarchy, 0, cv::Point());
// Sort contour in x/y directions to easily find min/max and next
std::vector<cv::Point> cSortedX = contours.at((unsigned long)id);
std::sort(cSortedX.begin(), cSortedX.end(), compareX);
std::vector<cv::Point> cSortedY = contours.at((unsigned long)id);
std::sort(cSortedY.begin(), cSortedY.end(), compareY);
int minXId = 0;
int maxXId = (int)(cSortedX.size() - 1);
int minYId = 0;
int maxYId = (int)(cSortedY.size() - 1);
cv::Rect croppingMask;
while ((minXId < maxXId) && (minYId < maxYId))
{
cv::Point min(cSortedX[minXId].x, cSortedY[minYId].y);
cv::Point max(cSortedX[maxXId].x, cSortedY[maxYId].y);
croppingMask = cv::Rect(min.x, min.y, max.x - min.x, max.y - min.y);
// Out-codes: if one of them is set, the rectangle size has to be reduced at that border
int ocTop = 0;
int ocBottom = 0;
int ocLeft = 0;
int ocRight = 0;
bool finished = checkInteriorExterior(contourMask, croppingMask, ocTop, ocBottom, ocLeft, ocRight);
if (finished == true)
{
break;
}
// Reduce rectangle at border if necessary
if (ocLeft)
{ ++minXId; }
if (ocRight)
{ --maxXId; }
if (ocTop)
{ ++minYId; }
if (ocBottom)
{ --maxYId; }
}
// Crop image with created mask
source = source(croppingMask);
}
I never used the stitcher calss, but I think that you may get the estimated homography matrix at each pair of images, if you could obtain it easily, then you can multiply it with the corners of the first original image and so for the corner of the last original one, you will get their stitched coordinate, then get the min of left and right x-coordinates and min of up and bottom y-coordinates of each images. You may get the coordinates of of each stitched image, what you need to do in some cases of cropping.

opencv remap function : How to set pixel value to (0,0,0)

Function call is:
remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0) );
map_x and map_y defines as below:
for( int j = 0; j < src.rows; j++ )
{ for( int i = 0; i < src.cols; i++ ){
if( i > src.cols*0.25 && i < src.cols*0.75 && j > src.rows*0.25 && j < src.rows*0.75 )
{
map_x.at<float>(j,i) = 2*( i - src.cols*0.25 ) + 0.5 ;
map_y.at<float>(j,i) = 2*( j - src.rows*0.25 ) + 0.5 ;
}
else{
map_x.at<float>(j,i) = 0 ;
map_y.at<float>(j,i) = 0 ;
}
}
}
You can see the original and resulting image. I want to ask:
How to set the outer parts of the result image(green part) to black(r=0,g=0,b=0)?
I think that you are mapping all pixels on the border to the original pixel at location (0,0). Try to replace the following
else {
map_x.at<float>(j,i) = 0 ;
map_y.at<float>(j,i) = 0 ;
}
by this
else {
map_x.at<float>(j,i) = -1 ;
map_y.at<float>(j,i) = -1 ;
}

wrong perspective image after taking picture on accelerometer supported blackberry device

There is a perspective image issue when I read a picture that is taken from the camera. When the direction is north, the picture looks like needed to be rotated 270 degrees. When the direction is east, picture should be rotated 180 degrees. But it's good when the direction is west. I tried getMetaData().getKeyValue("orientation") in EncodedImage for producing a good rotating formula, but it returned empty string. Please help me how to solve this problem.
Found solution here:
https://gist.github.com/3788313
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import javax.microedition.io.Connector;
import javax.microedition.io.file.FileConnection;
import net.rim.device.api.system.Bitmap;
import net.rim.device.api.system.EncodedImage;
public class ExifRotate {
/**
* Flip the image horizontally.
*/
public static final int FLIP_H = 1;
/**
* Flip the image vertically.
*/
public static final int FLIP_V = 2;
/**
* Flip the image horizontally and vertically.
*/
public static final int FLIP_HV = 3;
/**
* Rotate the image 90 degrees clockwise.
*/
public static final int FLIP_90CW = 4;
/**
* Rotate the image 90 degrees counter-clockwise.
*/
public static final int FLIP_90CCW = 5;
/**
* Rotate the image 180 degrees.
*/
public static final int FLIP_180 = 6;
private final static int read2bytes(InputStream in) throws IOException {
return in.read() << 8 | in.read();
}
private final static int readByte(InputStream in) throws IOException {
return in.read();
}
public static Bitmap readImageFromFile(String filename, int width, int height) throws IOException {
EncodedImage img = null;
byte[] data = null;
FileConnection file = null;
try {
file = (FileConnection) Connector.open(filename, Connector.READ);
int fileSize = (int) file.fileSize();
if (fileSize == 0) {
throw new IOException("File is empty");
}
data = new byte[fileSize];
InputStream input = file.openInputStream();
input.read(data);
input.close();
img = EncodedImage.createEncodedImage(data, 0, data.length);
int orientation = -1;
if ( filename.toLowerCase().endsWith(".jpg") || filename.toLowerCase().endsWith(".jpeg")) {
ByteArrayInputStream is = new ByteArrayInputStream( data );
orientation = getRotation(is);
}
if ( orientation == 2 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_H);
} else if ( orientation == 3 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_180);
} else if ( orientation == 4 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_V);
} else if ( orientation == 5 ) {
Bitmap tmp = rotateBitmap(img.getBitmap(), ImageUtil.FLIP_H);
tmp = rotateBitmap(tmp, ImageUtil.FLIP_90CCW);
return tmp;
} else if ( orientation == 6 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_90CW);
} else if ( orientation == 7 ) {
Bitmap tmp = rotateBitmap(img.getBitmap(), ImageUtil.FLIP_H);
tmp = rotateBitmap(tmp, ImageUtil.FLIP_90CW);
return tmp;
} else if ( orientation == 8 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_90CCW);
} else {
return img.getBitmap();
}
} finally {
if (file != null) {
try { file.close(); }
catch(Exception ex){}
}
}
}
public static int getRotation(InputStream in) throws IOException {
int [] exif_data = new int[100];
int n_flag = 0, set_flag = 0;
int is_motorola = 0;
/* Read File head, check for JPEG SOI + Exif APP1 */
for (int i = 0; i < 4; i++)
exif_data[i] = readByte(in);
if (exif_data[0] != 0xFF || exif_data[1] != 0xD8 || exif_data[2] != 0xFF || exif_data[3] != 0xE1)
return -2;
/* Get the marker parameter length count */
int length = read2bytes(in);
// exif_data = new int[length];
/* Length includes itself, so must be at least 2 */
/* Following Exif data length must be at least 6 */
if (length < 8)
return -1;
length -= 8;
/* Read Exif head, check for "Exif" */
for (int i = 0; i < 6; i++)
exif_data[i] = in.read();
if (exif_data[0] != 0x45 || exif_data[1] != 0x78 || exif_data[2] != 0x69 || exif_data[3] != 0x66 || exif_data[4] != 0 || exif_data[5] != 0)
return -1;
/* Read Exif body */
length = length > exif_data.length ? exif_data.length : length;
for (int i = 0; i < length; i++)
exif_data[i] = in.read();
if (length < 12)
return -1; /* Length of an IFD entry */
/* Discover byte order */
if (exif_data[0] == 0x49 && exif_data[1] == 0x49)
is_motorola = 0;
else if (exif_data[0] == 0x4D && exif_data[1] == 0x4D)
is_motorola = 1;
else
return -1;
/* Check Tag Mark */
if (is_motorola == 1) {
if (exif_data[2] != 0)
return -1;
if (exif_data[3] != 0x2A)
return -1;
} else {
if (exif_data[3] != 0)
return -1;
if (exif_data[2] != 0x2A)
return -1;
}
/* Get first IFD offset (offset to IFD0) */
int offset;
if (is_motorola == 1) {
if (exif_data[4] != 0)
return -1;
if (exif_data[5] != 0)
return -1;
offset = exif_data[6];
offset <<= 8;
offset += exif_data[7];
} else {
if (exif_data[7] != 0)
return -1;
if (exif_data[6] != 0)
return -1;
offset = exif_data[5];
offset <<= 8;
offset += exif_data[4];
}
if (offset > length - 2)
return -1; /* check end of data segment */
/* Get the number of directory entries contained in this IFD */
int number_of_tags;
if (is_motorola == 1) {
number_of_tags = exif_data[offset];
number_of_tags <<= 8;
number_of_tags += exif_data[offset + 1];
} else {
number_of_tags = exif_data[offset + 1];
number_of_tags <<= 8;
number_of_tags += exif_data[offset];
}
if (number_of_tags == 0)
return -1;
offset += 2;
/* Search for Orientation Tag in IFD0 */
for (;;) {
if (offset > length - 12)
return -1; /* check end of data segment */
/* Get Tag number */
int tagnum;
if (is_motorola == 1) {
tagnum = exif_data[offset];
tagnum <<= 8;
tagnum += exif_data[offset + 1];
} else {
tagnum = exif_data[offset + 1];
tagnum <<= 8;
tagnum += exif_data[offset];
}
if (tagnum == 0x0112)
break; /* found Orientation Tag */
if (--number_of_tags == 0)
return -1;
offset += 12;
}
/*
* if (set_flag==1) { Set the Orientation value if (is_motorola==1) {
* exif_data[offset+2] = 0; Format = unsigned short (2 octets)
* exif_data[offset+3] = 3; exif_data[offset+4] = 0; Number Of
* Components = 1 exif_data[offset+5] = 0; exif_data[offset+6] = 0;
* exif_data[offset+7] = 1; exif_data[offset+8] = 0; exif_data[offset+9]
* = set_flag; exif_data[offset+10] = 0; exif_data[offset+11] = 0; }
* else { exif_data[offset+2] = 3; Format = unsigned short (2 octets)
* exif_data[offset+3] = 0; exif_data[offset+4] = 1; Number Of
* Components = 1 exif_data[offset+5] = 0; exif_data[offset+6] = 0;
* exif_data[offset+7] = 0; exif_data[offset+8] = set_flag;
* exif_data[offset+9] = 0; exif_data[offset+10] = 0;
* exif_data[offset+11] = 0; } }
*/
// else {
/* Get the Orientation value */
if (is_motorola == 1) {
if (exif_data[offset + 8] != 0)
return -1;
set_flag = exif_data[offset + 9];
} else {
if (exif_data[offset + 9] != 0)
return -1;
set_flag = exif_data[offset + 8];
}
if (set_flag > 8)
return -1;
// }
/* Write out Orientation value */
if (n_flag == 1)
System.out.println("set_flag " + set_flag);
else
System.out.println("set_flag " + set_flag);
return set_flag;
}
public static Bitmap rotateBitmap(Bitmap src, int operation) {
int width = src.getWidth();
int height = src.getHeight();
int[] inPixels = new int[width*height];
src.getARGB(inPixels, 0, width, 0, 0, width, height);
int x = 0, y = 0;
int w = width;
int h = height;
int newX = 0;
int newY = 0;
int newW = w;
int newH = h;
switch (operation) {
case FLIP_H:
newX = width - (x + w);
break;
case FLIP_V:
newY = height - (y + h);
break;
case FLIP_HV:
newW = h;
newH = w;
newX = y;
newY = x;
break;
case FLIP_90CW:
newW = h;
newH = w;
newX = height - (y + h);
newY = x;
break;
case FLIP_90CCW:
newW = h;
newH = w;
newX = y;
newY = width - (x + w);
break;
case FLIP_180:
newX = width - (x + w);
newY = height - (y + h);
break;
}
int[] newPixels = new int[newW * newH];
int index, newRow, newCol, newIndex;
if ( operation == FLIP_H ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = row;
newCol = w - col - 1;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_V ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = h - row - 1;
newCol = col;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_HV ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = col;
newCol = row;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_90CW ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = col;
newCol = h - row - 1;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_90CCW ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = w - col - 1;
newCol = row;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_180 ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = h - row - 1;
newCol = w - col - 1;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
}
Bitmap dst = new Bitmap( newW, newH );
dst.setARGB(newPixels, 0, newW, 0, 0, newW, newH);
return dst;
}
}

Resources