Computing gradient orientation in c++ using opencv functions - opencv

Can anyone help me out with this?
I am trying to calculate gradient orientation using the Sobel operator in OpenCV for gradient in x and y direction. I am using the atan2 function for computing the tangent in radians, which I later convert to degrees, but all the angles I am getting are between 0 and 90 degrees.
My expectation is to get angles between 0 and 360 degrees. The image I am using is grayscale. The code segment is here below.
Mat PeripheralArea;
Mat grad_x, grad_y; // this is the matrix for the gradients in x and y directions
int off_set_y = 0, off_set_x = 0;
int scale = 1, num_bins = 8, bin = 0;
int delta=-1 ;
int ddepth = CV_16S;
GaussianBlur(PeripheralArea, PeripheralArea, Size(3, 3), 0, 0, BORDER_DEFAULT);
Sobel(PeripheralArea, grad_y, ddepth, 0, 1,3,scale, delta, BORDER_DEFAULT);
Sobel(PeripheralArea, grad_x, ddepth, 1, 0,3, scale, delta, BORDER_DEFAULT);
for (int row_y1 = 0, row_y2 = 0; row_y1 < grad_y.rows / 5, row_y2 < grad_x.rows / 5; row_y1++, row_y2++) {
for (int col_x1 = 0, col_x2 = 0; col_x1 < grad_y.cols / 5, col_x2 < grad_x.cols / 5; col_x1++, col_x2++) {
gradient_direction_radians = (double) atan2((double) grad_y.at<uchar>(row_y1 + off_set_y, col_x1 + off_set_x), (double) grad_x.at<uchar>(row_y2 + off_set_y, col_x2 + off_set_x));
gradient_direction_degrees = (int) (180 * gradient_direction_radians / 3.1415);
gradient_direction_degrees = gradient_direction_degrees < 0
? gradient_direction_degrees+360
: gradient_direction_degrees;
}
}
Note the off_set_x and off_set_y variable are not part of the computation
but to offset to different square blocks for which I eventually want to
compute an histogram feature vector

You have specified that the destination depth of Sobel() is CV_16S.
Yet, when you access grad_x and grad_y, you use .at<uchar>(), implying that their elements are 8 bit unsigned quantities, when in fact they are 16 bit signed. You could use .at<short>() instead, but to me it looks like there a number of issues with your code, not the least of which is that there is an OpenCV function that does exactly what you want.
Use cv::phase(), and replace your for loops with
cv::Mat gradient_angle_degrees;
bool angleInDegrees = true;
cv::phase(grad_x, grad_y, gradient_angle_degrees, angleInDegrees);

I solved this need when I dived into doing some edge detection using C++.
For orientation of gradient I use artan2(), this standard API defines its +y and +x same as how we usually traverse a 2D image.
Plot it to show you my understanding.
///////////////////////////////
// Quadrants of image:
// 3(-dx,-dy) | 4(+dx,-dy) [-pi,0]
// ------------------------->+x
// 2(-dx,+dy) | 1(+dx,+dy) [0,pi]
// v
// +y
///////////////////////////////
// Definition of arctan2():
// -135(-dx,-dy) | -45(+dx,-dy)
// ------------------------->+x
// 135(-dx,+dy) | +45(+dx,+dy)
// v
// +y
///////////////////////////////
How I do for gradient:
bool gradient(double*& magnitude, double*& orientation, double* src, int width, int height, string file) {
if (src == NULL)
return false;
if (width <= 0 || height <= 0)
return false;
double gradient_x_correlation[3*3] = {-0.5, 0.0, 0.5,
-0.5, 0.0, 0.5,
-0.5, 0.0, 0.5};
double gradient_y_correlation[3*3] = {-0.5,-0.5,-0.5,
0.0, 0.0, 0.0,
0.5, 0.5, 0.5};
double *Gx = NULL;
double *Gy = NULL;
this->correlation(Gx, src, gradient_x_correlation, width, height, 3);
this->correlation(Gy, src, gradient_y_correlation, width, height, 3);
if (Gx == NULL || Gy == NULL)
return false;
//magnitude
magnitude = new double[sizeof(double)*width*height];
if (magnitude == NULL)
return false;
memset(magnitude, 0, sizeof(double)*width*height);
double gx = 0.0;
double gy = 0.0;
double gm = 0.0;
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
gx = pow(Gx[i+j*width],2);
gy = pow(Gy[i+j*width],2);
gm = sqrt(pow(Gx[i+j*width],2)+pow(Gy[i+j*width],2));
if (gm >= 255.0) {
return false;
}
magnitude[i+j*width] = gm;
}
}
//orientation
orientation = new double[sizeof(double)*width*height];
if (orientation == NULL)
return false;
memset(orientation, 0, sizeof(double)*width*height);
double ori = 0.0;
double dtmp = 0.0;
double ori_normalized = 0.0;
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
gx = (Gx[i+j*width]);
gy = (Gy[i+j*width]);
ori = atan2(Gy[i+j*width], Gx[i+j*width])/PI*(180.0); //[-pi,+pi]
if (gx >= 0 && gy >= 0) { //[Qudrant 1]:[0,90] to be [0,63]
if (ori < 0) {
printf("[Err1QUA]ori:%.1f\n", ori);
return false;
}
ori_normalized = (ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 1]orientation: %.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx >= 0 && gy < 0) { //[Qudrant 4]:[270,360) equal to [-90, 0) to be [191,255]
if (ori > 0) {
printf("[Err4QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (360.0+ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 4]orientation:%.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx < 0 && gy >= 0) { //[Qudrant 2]:(90,180] to be [64,127]
if (ori < 0) {
printf("[Err2QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 2]orientation: %.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else if (gx < 0 && gy < 0) { //[Qudrant 3]:(180,270) equal to (-180, -90) to be [128,190]
if (ori > 0) {
printf("[Err3QUA]orientation:%.1f\n", ori);
return false;
}
ori_normalized = (360.0+ori)*255.0/360.0;
if (ori != 0.0 && dtmp != ori) {
printf("[Qudrant 3]orientation:%.1f to be %.1f(%d)\n", ori, ori_normalized, (uint8_t)ori_normalized);
dtmp = ori;
}
}
else {
printf("[EXCEPTION]orientation:%.1f\n", ori);
return false;
}
orientation[i+j*width] = ori_normalized;
}
}
return true;
}
How I do for cross correlation:
bool correlation(double*& dst, double* src, double* kernel, int width, int height, int window) {
if (src == NULL || kernel == NULL)
return false;
if (width <= 0 || height <= 0 || width < window || height < window )
return false;
dst = new double[sizeof(double)*width*height];
if (dst == NULL)
return false;
memset(dst, 0, sizeof(double)*width*height);
int ii = 0;
int jj = 0;
int nn = 0;
int mm = 0;
double max = std::numeric_limits<double>::min();
double min = std::numeric_limits<double>::max();
double range = std::numeric_limits<double>::max();
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
for (int m=0; m<window; m++) {
for (int n=0; n<window; n++) {
ii = i+(n-window/2);
jj = j+(m-window/2);
nn = n;
mm = m;
if (ii >=0 && ii<width && jj>=0 && jj<height) {
dst[i+j*width] += src[ii+jj*width]*kernel[nn+mm*window];
}
else {
dst[i+j*width] += 0;
}
}
}
if (dst[i+j*width] > max)
max = dst[i+j*width];
else if (dst[i+j*width] < min)
min = dst[i+j*width];
}
}
//normalize double matrix to be an uint8_t matrix
range = max - min;
double norm = 0.0;
printf("correlated matrix max:%.1f, min:%.1f, range:%.1f\n", max, min, range);
for (int j=0; j<height; j++) {
for (int i=0; i<width; i++) {
norm = dst[i+j*width];
norm = 255.0*norm/range;
dst[i+j*width] = norm;
}
}
return true;
}
For me, I use an image like a hollow rectangle, you can download it on my sample.
The orientation of gradient of the hollow rectangle part of my sample image would move from 0 to 360 clockwise (Quadrant 1 to 2 to 3 to 4).
Here is my print which describes the trace of orientation:
[Qudrant 1]orientation: 45.0 to be 31.9(31)
[Qudrant 1]orientation: 90.0 to be 63.8(63)
[Qudrant 2]orientation: 135.0 to be 95.6(95)
[Qudrant 2]orientation: 180.0 to be 127.5(127)
[Qudrant 3]orientation:-135.0 to be 159.4(159)
[Qudrant 3]orientation:-116.6 to be 172.4(172)
[Qudrant 4]orientation:-90.0 to be 191.2(191)
[Qudrant 4]orientation:-63.4 to be 210.1(210)
[Qudrant 4]orientation:-45.0 to be 223.1(223)
You can see more source code about digital image processing on my GitHub :)

Related

Arrows segmentations in an image

I need a way to segment each arrow alone. I tried OpenCv findContours but it broke it or add it to multiple shapes and arrows as the share the boundaries of shapes. I tried OpenCV connected components but this arrows almost in some graph connected all of it. Plus having trouble as the boundaries almost have the same color as the arrow. And in these kind of images each arrow contains different colors. Any opinion about this problem.
This is a sample diagram. I have to deal with harder diagrams like this.
Ok, work with new picture.
1. Binarization the arrows (and shapes):
cv::Mat imgCl = cv::imread("62uoU.jpg", cv::IMREAD_COLOR);
cv::Mat img;
cv::cvtColor(imgCl, img, cv::COLOR_BGR2GRAY);
cv::Mat mask1;
cv::threshold(img, mask1, 30, 255, cv::THRESH_BINARY_INV);
cv::Mat mask2;
cv::threshold(img, mask2, 120, 255, cv::THRESH_BINARY_INV);
cv::Mat diff;
cv::absdiff(mask1, mask2, diff);
cv::imshow("diff1", diff);
Result 1:
Remove rectangle shapes:
cv::Rect objRect(0, 0, diff.cols, diff.rows);
cv::Size minSize(objRect.width / 100, objRect.height / 100);
cv::Mat bin = cv::Mat(diff, objRect).clone();
for (;;)
{
cv::Rect cutRect;
if (!PosRefinement(bin, cutRect, 0.9f, minSize))
{
break;
}
cv::rectangle(bin, cutRect, cv::Scalar(0, 0, 0), cv::FILLED);
cv::rectangle(diff, cutRect, cv::Scalar(0, 0, 0), cv::FILLED);
objRect.x += cutRect.x;
objRect.y += cutRect.y;
objRect.width = cutRect.width;
objRect.height = cutRect.height;
}
cv::imshow("diff", diff);
Result 2:
Find lines:
std::vector<cv::Vec4i> linesP;
cv::HoughLinesP(diff, linesP, 1, CV_PI / 180, 20, 10, 5);
for (size_t i = 0; i < linesP.size(); i++)
{
cv::Vec4i l = linesP[i];
cv::line(imgCl, cv::Point(l[0], l[1]), cv::Point(l[2], l[3]), cv::Scalar(0, 0, 255), 3, cv::LINE_AA);
}
cv::imshow("img", imgCl);
Result 3:
Black arrows was founded. It can to improve this solution: find and delete text areas from image (tesseract or cv::text::ERFilter). And add a little morphology for draw arrow tips with Hough lines.
P.S. Utility function:
bool PosRefinement(
cv::Mat bin,
cv::Rect& cutRect,
double kThreshold,
cv::Size minSize
)
{
const double areaThreshold = 100;
const int radius = 5;
const int maxIters = 100;
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(bin, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE, cv::Point());
size_t bestCont = contours.size();
double maxArea = 0;
for (size_t i = 0; i < contours.size(); i++)
{
double area = cv::contourArea(contours[i]);
if (area > maxArea)
{
maxArea = area;
bestCont = i;
}
}
if (maxArea < areaThreshold)
{
return false;
}
cv::Moments m = cv::moments(contours[bestCont]);
cv::Point mc(cvRound(m.m10 / m.m00), cvRound(m.m01 / m.m00));
cv::Rect currRect(mc.x - radius / 2, mc.y - radius / 2, radius, radius);
auto Clamp = [](int v, int hi) -> bool
{
if (v < 0)
{
v = 0;
return true;
}
else if (hi && v > hi - 1)
{
v = hi - 1;
return true;
}
return false;
};
auto RectClamp = [&](cv::Rect& r, int w, int h) -> bool
{
return Clamp(r.x, w) || Clamp(r.x + r.width, w) || Clamp(r.y, h) || Clamp(r.y + r.height, h);
};
int stepL = radius / 2;
int stepR = radius / 2;
int stepT = radius / 2;
int stepB = radius / 2;
double k = 0;
struct State
{
double k = 0;
int stepL = 0;
int stepR = 0;
int stepT = 0;
int stepB = 0;
cv::Rect currRect;
State() = default;
State(double k_, int stepL_, int stepR_, int stepT_, int stepB_, cv::Rect currRect_)
:
k(k_),
stepL(stepL_),
stepR(stepR_),
stepT(stepT_),
stepB(stepB_),
currRect(currRect_)
{
}
bool operator==(const State& st) const
{
return (st.k == k) && (st.stepL == stepL) && (st.stepR == stepR) && (st.stepT == stepT) && (st.stepB == stepB) && (st.currRect == currRect);
}
};
const size_t statesCount = 2;
State prevStates[statesCount];
size_t stateInd = 0;
for (int it = 0; it < maxIters; ++it)
{
cv::Rect rleft(currRect.x - stepL, currRect.y, currRect.width + stepL, currRect.height);
cv::Rect rright(currRect.x, currRect.y, currRect.width + stepR, currRect.height);
cv::Rect rtop(currRect.x, currRect.y - stepT, currRect.width, currRect.height + stepT);
cv::Rect rbottom(currRect.x, currRect.y, currRect.width, currRect.height + stepB);
double kleft = 0;
double kright = 0;
double ktop = 0;
double kbottom = 0;
if (!RectClamp(rleft, bin.cols, bin.rows))
{
cv::Rect rstep(currRect.x - stepL, currRect.y, stepL, currRect.height);
if (cv::sum(bin(rstep))[0] / (255.0 * rstep.area()) > kThreshold / 2)
{
kleft = cv::sum(bin(rleft))[0] / (255.0 * rleft.area());
}
}
if (!RectClamp(rright, bin.cols, bin.rows))
{
cv::Rect rstep(currRect.x + currRect.width, currRect.y, stepR, currRect.height);
if (cv::sum(bin(rstep))[0] / (255.0 * rstep.area()) > kThreshold / 2)
{
kright = cv::sum(bin(rright))[0] / (255.0 * rright.area());
}
}
if (!RectClamp(rtop, bin.cols, bin.rows))
{
cv::Rect rstep(currRect.x, currRect.y - stepT, currRect.width, stepT);
if (cv::sum(bin(rstep))[0] / (255.0 * rstep.area()) > kThreshold / 2)
{
ktop = cv::sum(bin(rtop))[0] / (255.0 * rtop.area());
}
}
if (!RectClamp(rbottom, bin.cols, bin.rows))
{
cv::Rect rstep(currRect.x, currRect.y + currRect.height, currRect.width, stepB);
if (cv::sum(bin(rstep))[0] / (255.0 * rstep.area()) > kThreshold / 2)
{
kbottom = cv::sum(bin(rbottom))[0] / (255.0 * rbottom.area());
}
}
bool wasEnlargeX = false;
if (kleft > kThreshold)
{
currRect.x -= stepL;
currRect.width += stepL;
wasEnlargeX = true;
if (kleft > k)
{
++stepL;
}
}
else
{
if (stepL > 1)
{
--stepL;
}
currRect.x += 1;
currRect.width -= 1;
}
if (kright > kThreshold)
{
currRect.width += stepR;
wasEnlargeX = true;
if (kright > k)
{
++stepR;
}
}
else
{
if (stepR > 1)
{
--stepR;
}
currRect.width -= 1;
}
bool wasEnlargeY = false;
if (ktop > kThreshold)
{
currRect.y -= stepT;
currRect.height += stepT;
wasEnlargeY = true;
if (ktop > k)
{
++stepT;
}
}
else
{
if (stepT > 1)
{
--stepT;
}
currRect.y += 1;
currRect.height -= 1;
}
if (kbottom > kThreshold)
{
currRect.height += stepB;
wasEnlargeY = true;
if (kbottom > k)
{
++stepB;
}
}
else
{
if (stepB > 1)
{
--stepB;
}
currRect.height -= 1;
}
k = cv::sum(bin(currRect))[0] / (255.0 * currRect.area());
State currState(k, stepL, stepR, stepT, stepB, currRect);
bool repState = false;
for (size_t i = 0; i < statesCount; ++i)
{
if (prevStates[i] == currState)
{
repState = true;
break;
}
}
if (repState)
{
break;
}
else
{
prevStates[stateInd] = currState;
stateInd = (stateInd + 1 < statesCount) ? (stateInd + 1) : 0;
}
if (k < kThreshold && (stepL + stepR + stepT + stepB == 4) && !wasEnlargeX && !wasEnlargeY)
{
break;
}
}
cutRect.x = std::max(0, currRect.x - 1);
cutRect.width = currRect.width + 2;
cutRect.y = std::max(0, currRect.y - 1);
cutRect.height = currRect.height + 2;
return (cutRect.width >= minSize.width) && (cutRect.height >= minSize.height);
}
For your example it might be simple. The picture (png) has 4 channels and 4th channel is transparent mask. It can work only with transparent channel and filter arrows with moments:
cv::Mat img = cv::imread("voXFs.png", cv::IMREAD_UNCHANGED);
std::cout << "imsize = " << img.size() << ", chans = " << img.channels() << std::endl;
cv::imshow("img", img);
std::vector<cv::Mat> chans;
cv::split(img, chans);
cv::imshow("transp", chans.back());
cv::Mat mask;
cv::threshold(chans.back(), mask, 50, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
std::vector<std::vector<cv::Point> > contours;
cv::findContours(mask, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
cv::Mat draw;
cv::cvtColor(mask, draw, cv::COLOR_GRAY2BGR);
for (size_t i = 0; i < contours.size(); ++i)
{
double area = cv::contourArea(contours[i]);
double len = cv::arcLength(contours[i], false);
double k = len / area;
if (area > 10 && len > 60 && k > 2)
{
std::cout << "area = " << area << ", len = " << len << ", k = " << k << std::endl;
cv::drawContours(draw, contours, i, cv::Scalar(255, 0, 0), 1);
}
}
cv::imshow("mask", mask);
cv::imshow("draw", draw);
cv::waitKey(0);
But for more robust result:
Find and delete text areas from image (tesseract or cv::text::ERFilter).
Erode mask, find all shapes by contours, draw and dilate they. Bitwise and operation for mask and result.
The end!

Simple Image Recognition in processing.org

I'm trying to write the simplest possible image comparison function.
The idea is to have a target image and a collection of n number of different images.
The goal is to pick the image which is most similar to the target image.
So far my method consists of defining the euclidean HSB distance from pixel to pixel in a resized image and have been trying to do a PImage function that returns the winner image. I already wrote a float function that ranks the image list from winner to loser but I would like to skip this step to make the process way more concise.
The issue is at the PImage difference(PImage){
function, the program outputs an error on the line:
float x1 = brightness(imageKey.pixels[i]);
The error is ArrayIndexOutOfBoundsException
Here's the entire code:
//CLICK ON S TO SAVE FRAMES TO FOLDER
int series = 50; //
PImage[] collection = new PImage[series];
PImage imageKey,imageKey2, imageKeyHUE, imageKeySUM, imageKeySAT; //target image alias with ready operations
int imageWidth = 800;
int leftAlign = 850 ;
void setup()
{
size(1200,600);
background(255);
frameRate(random(1,10.0));
for ( int i = 0; i< collection.length; i++ )
{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
}
//_____________________________________________TARGET IMAGE AND NAME TEXT
textSize(10);
fill(0);
text("target image", leftAlign, 220);
textSize(15);
text("central london", leftAlign, 240);
text("comparison methods", leftAlign, 290);
//_____________________________________________________________________BUTTONS
imageKey = loadImage("Image_0.jpg");
imageKey.resize(240, 180);
image(imageKey, leftAlign,25);
imageKeySAT= loadImage("Image_0.jpg");
imageKeySAT.resize(60,60);
imageKeySAT = saturation(imageKeySAT);
image(imageKeySAT, leftAlign+140,300);
imageKeySUM = loadImage("Image_0.jpg");
imageKeySUM.resize(60,60);
imageKeySUM = sum(imageKeySUM);
image(imageKeySUM, leftAlign+70,300);
imageKeyHUE = loadImage("Image_0.jpg");
imageKeyHUE.resize(60,60);
imageKeyHUE = hue(imageKeyHUE);
image(imageKeyHUE, leftAlign,300);
textSize(20);
text("CLICK HERE TO", leftAlign, 430);
text("STOP AT WINNER", leftAlign, 450);
}
void draw()
{
//______________________________________________SHOW IMAGES ARRAY
image(collection[int(random(0,series))],0,0);
//______________________________________________HISTOGRAMS
histogramhue();
histogramsat();
histogrambright();
//______________________________________________SUM METHOD
//float Vector_Approach(PImage sumSatin){
//}
}
void keyPressed()
{
if(key=='s') saveFrame("images/image-######.jpg");
}
PImage difference(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
imageKey.loadPixels();
PImage satout = createImage(satin.width, satin.height, RGB);
satout.loadPixels();
for(int i = imageWidth; i<satout.pixels.length-imageWidth; i++)
{
float x1 = brightness(imageKey.pixels[i]);
float b0 = brightness(satin.pixels[i]);
// float y1 = brightness(satin.pixels[i+1]);
float value = x1-b0;
satout.pixels[i] = color(0,0,value);
}
satout.updatePixels();
return satout;
}
void mouseReleased(){
//______________________________________________BUTTON OVER
for ( int i = 0; i< collection.length; i++ )
if (mouseX > leftAlign && mouseX < (leftAlign + 60) && mouseY > 300 && mouseY < 360){
collection[i] = loadImage( "Image_"+ i + ".jpg" );
collection[i] = hue(collection[i]); histogramhue();
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
textSize(10);text("comparison by hue", leftAlign, 380);
} else if (mouseX > (leftAlign + 70) && mouseX < (leftAlign + 130) && mouseY > 300 && mouseY < 360)
{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
collection[i] = sum(collection[i]);
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
textSize(10);text("comparison by sum", leftAlign, 380);
}else if (mouseX > (leftAlign + 140) && mouseX < (leftAlign + 200) && mouseY > 300 && mouseY < 360)
{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
collection[i] = saturation(collection[i]);
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
textSize(10);text("comparison by saturation", leftAlign, 380);
}else if (mouseX > leftAlign && mouseX < 1200 && mouseY > 340 && mouseY < 600)
{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
collection[i] = difference(collection[i]);
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
textSize(10);text("WINNER IMAGE!!!!", leftAlign, 380);
}else{
collection[i] = loadImage( "Image_"+ i + ".jpg" );
noStroke(); fill(255); rect(leftAlign,360,200,40); fill(0);
}
}
/* HSB PImage Methods */
//HUE -------> /** CHOSEN METHOD**/
//SATURATION -------> /** CHOSEN METHOD**/
//SUM -------> /** CHOSEN METHOD**/
PImage hue(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
PImage satout = createImage(satin.width, satin.height, HSB);
satout.loadPixels();
for (int j = 0; j < satout.pixels.length; j++)
{
satout.pixels[j] = color(hue(satin.pixels[j]),255,255);
}
satout.updatePixels();
return satout;
}
PImage saturation(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
PImage satout = createImage(satin.width, satin.height, RGB);
satout.loadPixels();
for (int j = 0; j < satout.pixels.length; j++)
{
satout.pixels[j] = color(saturation(satin.pixels[j]));
}
satout.updatePixels();
//colorMode(RGB);
return satout;
}
PImage sum(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
PImage satout = createImage(satin.width, satin.height, RGB);
satout.loadPixels();
for(int i = imageWidth; i<satout.pixels.length-imageWidth; i++)
{
float b0 = brightness(satin.pixels[i]);
float x1 = brightness(satin.pixels[i-1]);
float y1 = brightness(satin.pixels[i-imageWidth]);
float xdiff = b0-x1;
float ydiff = b0-y1;
float value = (510 + xdiff + ydiff)/3;
satout.pixels[i] = color(0,0,value);
}
satout.updatePixels();
return satout;
}
//REFERENCE HISTOGRAM TAKEN FROM A PROGRAMMING HANDBOOK FOR VISUAL DESIGNERS AND ARTISTS BY BEN FRY ET AL
void histogramhue(){
PImage img = loadImage("Image_0.jpg");
int[] hist = new int[600];
// Calculate the histogram
for (int i = 0; i < img.width; i++) {
for (int j = 0; j < img.height; j++) {
int hue = int(hue(get(i, j)));
hist[hue]++;
}
}
int histMax = max(hist);
stroke(255,250); strokeWeight(5);
// Draw half of the histogram (skip every second value)
for (int i = 0; i < img.width; i += 20) {
int which = int(map(i, 0, img.width, 0, 255));
int y = int(map(hist[which], 0, histMax, img.height, 0));
line(i, img.height, i, y);
}}
void histogramsat(){
PImage img = loadImage("Image_0.jpg");
int[] hist = new int[600];
for (int i = 0; i < img.width; i++) {
for (int j = 0; j < img.height; j++) {
int sat = int(saturation(get(i, j)));
hist[sat]++;
}
}
int histMax = max(hist);
stroke(255,150);strokeWeight(10);
for (int i = 0; i < img.width; i += 20) {
int which = int(map(i, 0, img.width, 0, 255));
int y = int(map(hist[which], 0, histMax, img.height, 0));
line(i, img.height, i, y);
}}
void histogrambright(){
PImage img = loadImage("Image_0.jpg");
int[] hist = new int[600];
for (int i = 0; i < img.width; i++) {
for (int j = 0; j < img.height; j++) {
int bright = int(brightness(get(i, j)));
hist[bright]++;
}
}
int histMax = max(hist);
stroke(255, 150);strokeWeight(20);
for (int i = 0; i < img.width; i += 20) {
int which = int(map(i, 0, img.width, 0, 255));
int y = int(map(hist[which], 0, histMax, img.height, 0));
line(i, img.height, i, y);
}}
In isolation your function does seem to work:
PImage imageKey,testImage;
int imageWidth = 800;
int imageHeight = 600;
void setup(){
size(1600,600);
//fake imageKey
imageKey = getNoise(imageWidth,imageHeight);
//fake test image
testImage = getNoise(imageWidth,imageHeight);
image(testImage,0,0);
image(difference(testImage),800,0);
}
PImage getNoise(int width,int height){
PImage out = createImage(width,height,RGB);
for(int i = 0 ; i < out.pixels.length; i++)
out.pixels[i] = color(random(255),random(255),random(255));
out.updatePixels();
return out;
}
PImage difference(PImage satin)
{
colorMode(HSB);
satin.loadPixels();
imageKey.loadPixels();
PImage satout = createImage(satin.width, satin.height, RGB);
satout.loadPixels();
for (int i = imageWidth; i<satout.pixels.length-imageWidth; i++)
{
float x1 = brightness(imageKey.pixels[i]);
float b0 = brightness(satin.pixels[i]);
// float y1 = brightness(satin.pixels[i+1]);
float value = x1-b0;
//println(i,x1,b0,x1-b0,value);
satout.pixels[i] = color(0, 0, value);
}
satout.updatePixels();
return satout;
}
I can't test your actual setup as I don't have access to your images, but the ArrayIndexOutOfBoundsException is probably because your i counter goes beyond the number of pixels in imageKey. You can test this by putting checking if i < imageKey.pixels.length. My guess is the images aren't the same dimensions and therefore don't have the same number of pixels.
Other notes that are going slightly off-topic:
Your difference() function is tightly coupled to the imageKey and imageWidth variables. You might want to make your functions loosely coupled so they can be reused easily in other contexts. You could start by making these two variables extra parameters/arguments of the function
You might also want to look at euclidean distance between colours (in a perceptual colour space such as Lab*). Have a look at this answer.Even though it's an OpenFrameworks answer, it should be easy to adapt to Processing's color and PVector types.

How to do flipping without using the inbuilt function flip in OpenCV?

Can anyone help me with this problem, how to do flipping of an image without using the inbuilt flipping function i.e. flip(src image, destination image , 1 or 0) in C++ using OpenCV. I am new to this software so please help.
OpenCV's flip function uses internal flipHoriz or flipVert functions.
static void
flipHoriz( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size size, size_t esz )
{
int i, j, limit = (int)(((size.width + 1)/2)*esz);
AutoBuffer<int> _tab(size.width*esz);
int* tab = _tab;
for( i = 0; i < size.width; i++ )
for( size_t k = 0; k < esz; k++ )
tab[i*esz + k] = (int)((size.width - i - 1)*esz + k);
for( ; size.height--; src += sstep, dst += dstep )
{
for( i = 0; i < limit; i++ )
{
j = tab[i];
uchar t0 = src[i], t1 = src[j];
dst[i] = t1; dst[j] = t0;
}
}
}
static void
flipVert( const uchar* src0, size_t sstep, uchar* dst0, size_t dstep, Size size, size_t esz )
{
const uchar* src1 = src0 + (size.height - 1)*sstep;
uchar* dst1 = dst0 + (size.height - 1)*dstep;
size.width *= (int)esz;
for( int y = 0; y < (size.height + 1)/2; y++, src0 += sstep, src1 -= sstep,
dst0 += dstep, dst1 -= dstep )
{
int i = 0;
if( ((size_t)src0|(size_t)dst0|(size_t)src1|(size_t)dst1) % sizeof(int) == 0 )
{
for( ; i <= size.width - 16; i += 16 )
{
int t0 = ((int*)(src0 + i))[0];
int t1 = ((int*)(src1 + i))[0];
((int*)(dst0 + i))[0] = t1;
((int*)(dst1 + i))[0] = t0;
t0 = ((int*)(src0 + i))[1];
t1 = ((int*)(src1 + i))[1];
((int*)(dst0 + i))[1] = t1;
((int*)(dst1 + i))[1] = t0;
t0 = ((int*)(src0 + i))[2];
t1 = ((int*)(src1 + i))[2];
((int*)(dst0 + i))[2] = t1;
((int*)(dst1 + i))[2] = t0;
t0 = ((int*)(src0 + i))[3];
t1 = ((int*)(src1 + i))[3];
((int*)(dst0 + i))[3] = t1;
((int*)(dst1 + i))[3] = t0;
}
for( ; i <= size.width - 4; i += 4 )
{
int t0 = ((int*)(src0 + i))[0];
int t1 = ((int*)(src1 + i))[0];
((int*)(dst0 + i))[0] = t1;
((int*)(dst1 + i))[0] = t0;
}
}
for( ; i < size.width; i++ )
{
uchar t0 = src0[i];
uchar t1 = src1[i];
dst0[i] = t1;
dst1[i] = t0;
}
}
}
// you can use it with a small modification as below
void myflip( InputArray _src, OutputArray _dst, int flip_mode )
{
CV_Assert( _src.dims() <= 2 );
Size size = _src.size();
if (flip_mode < 0)
{
if (size.width == 1)
flip_mode = 0;
if (size.height == 1)
flip_mode = 1;
}
if ((size.width == 1 && flip_mode > 0) ||
(size.height == 1 && flip_mode == 0) ||
(size.height == 1 && size.width == 1 && flip_mode < 0))
{
return _src.copyTo(_dst);
}
Mat src = _src.getMat();
int type = src.type();
_dst.create( size, type );
Mat dst = _dst.getMat();
size_t esz = CV_ELEM_SIZE(type);
if( flip_mode <= 0 )
flipVert( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
else
flipHoriz( src.ptr(), src.step, dst.ptr(), dst.step, src.size(), esz );
if( flip_mode < 0 )
flipHoriz( dst.ptr(), dst.step, dst.ptr(), dst.step, dst.size(), esz );
}
Assuming you have a good reason not to use OpenCV flip function, you can write your custom one.
For this example, I'll use CV_8UC3 images. I'll point out at the end how to expand this to different formats.
Let's see first how to flip an image x axis, which corresponds to cv::flip(src, dst, 1). Given an src image, the dst image will have the same y coordinate, and x coordinate as src.cols - 1 - x coordinates. In practice:
void flip_lr(const Mat3b& src, Mat3b& dst)
{
Mat3b _dst(src.rows, src.cols);
for (int r = 0; r < _dst.rows; ++r) {
for (int c = 0; c < _dst.cols; ++c) {
_dst(r, c) = src(r, src.cols - 1 - c);
}
}
dst = _dst;
}
Then, to flip around y axis (corresponding to cv::flip(src, dst, 0)), dst will have the same x coordinate, and y as src.rows - 1 - y. However, you can reuse the above-mentioned function, simply transposing the dst matrix, apply flip on x axis, and then transpose back. In practice:
dst = src.t();
flip_lr(dst, dst);
dst = dst.t();
Then, to flip both axis, corresponding to cv::flip(src, dst, -1), you need simply to combine the flip on x and y axis:
flip_lr(src, dst);
dst = dst.t();
flip_lr(dst, dst);
dst = dst.t();
You can wrap this functionality in a custom flip function that takes the same parameters as cv::flip:
void custom_flip(const Mat3b& src, Mat3b& dst, int code)
{
if (code > 0)
{ // Flip x axis
flip_lr(src, dst);
}
else if (code == 0)
{
// Flip y axis
dst = src.t();
flip_lr(dst, dst);
dst = dst.t();
}
else // code < 0
{
// Flip x and y axis
flip_lr(src, dst);
dst = dst.t();
flip_lr(dst, dst);
dst = dst.t();
}
}
Note that you can adapt this to different format simply modifing the flip_lr function, and taking care to call the appropriate version inside custom_flip, that will now accept Mat instead of Mat3b.
Full code for reference:
void flip_lr(const Mat3b& src, Mat3b& dst)
{
Mat3b _dst(src.rows, src.cols);
for (int r = 0; r < _dst.rows; ++r) {
for (int c = 0; c < _dst.cols; ++c) {
_dst(r, c) = src(r, src.cols - 1 - c);
}
}
dst = _dst;
}
void custom_flip(const Mat3b& src, Mat3b& dst, int code)
{
if (code > 0)
{ // Flip x axis
flip_lr(src, dst);
}
else if (code == 0)
{
// Flip y axis
dst = src.t();
flip_lr(dst, dst);
dst = dst.t();
}
else // code < 0
{
// Flip x and y axis
flip_lr(src, dst);
dst = dst.t();
flip_lr(dst, dst);
dst = dst.t();
}
}
int main(void)
{
Mat3b img = imread("path_to_image");
Mat3b flipped;
flip(img, flipped, -1);
Mat3b custom;
custom_flip(img, custom, -1);
imshow("OpenCV flip", flipped);
imshow("Custom flip", custom);
waitKey();
return 0;
}

Cropping panorama image in OpenCV

I'm trying to find a simple algorithm to crop (remove the black areas) of a panorama image created with the openCV Stitcher module.
My idea is to calculate the most inner black points in the image which will define the cropping area, as shown in the next image:
Expected cropped result:
I've tried the next two approaches, but they don't crop the image as expected:
First Approach:
void testCropA(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
Size size = gray.size();
int type = gray.type();
int left = 0, top = 0, right = size.width, bottom = size.height;
cv::Mat row_zeros = Mat::zeros(1, right, type);
cv::Mat col_zeros = Mat::zeros(bottom, 1, type);
while (countNonZero(gray.row(top) != row_zeros) == 0) { top++; }
while (countNonZero(gray.col(left) != col_zeros) == 0) { left++; }
while (countNonZero(gray.row(bottom-1) != row_zeros) == 0) { bottom--; }
while (countNonZero(gray.col(right-1) != col_zeros) == 0) { right--; }
cv::Rect cropRect(left, top, right - left, bottom - top);
image = image(cropRect);
}
Second Approach:
void testCropB(cv::Mat& image)
{
cv::Mat gray;
cvtColor(image, gray, CV_BGR2GRAY);
int minCol = gray.cols;
int minRow = gray.rows;
int maxCol = 0;
int maxRow = 0;
for (int i = 0; i < gray.rows - 3; i++)
{
for (int j = 0; j < gray.cols; j++)
{
if (gray.at<char>(i, j) != 0)
{
if (i < minRow) {minRow = i;}
if (j < minCol) {minCol = j;}
if (i > maxRow) {maxRow = i;}
if (j > maxCol) {maxCol = j;}
}
}
}
cv::Rect cropRect = Rect(minCol, minRow, maxCol - minCol, maxRow - minRow);
image = image(cropRect);
}
This is my current solution. Hope it helps to others:
bool checkInteriorExterior(const cv::Mat &mask, const cv::Rect &croppingMask,
int &top, int &bottom, int &left, int &right)
{
// Return true if the rectangle is fine as it is
bool result = true;
cv::Mat sub = mask(croppingMask);
int x = 0;
int y = 0;
// Count how many exterior pixels are, and choose that side for
// reduction where mose exterior pixels occurred (that's the heuristic)
int top_row = 0;
int bottom_row = 0;
int left_column = 0;
int right_column = 0;
for (y = 0, x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the top side of the rect a bit to the bottom
if (sub.at<char>(y, x) == 0)
{
result = false;
++top_row;
}
}
for (y = (sub.rows - 1), x = 0; x < sub.cols; ++x)
{
// If there is an exterior part in the interior we have
// to move the bottom side of the rect a bit to the top
if (sub.at<char>(y, x) == 0)
{
result = false;
++bottom_row;
}
}
for (y = 0, x = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++left_column;
}
}
for (x = (sub.cols - 1), y = 0; y < sub.rows; ++y)
{
// If there is an exterior part in the interior
if (sub.at<char>(y, x) == 0)
{
result = false;
++right_column;
}
}
// The idea is to set `top = 1` if it's better to reduce
// the rect at the top than anywhere else.
if (top_row > bottom_row)
{
if (top_row > left_column)
{
if (top_row > right_column)
{
top = 1;
}
}
}
else if (bottom_row > left_column)
{
if (bottom_row > right_column)
{
bottom = 1;
}
}
if (left_column >= right_column)
{
if (left_column >= bottom_row)
{
if (left_column >= top_row)
{
left = 1;
}
}
}
else if (right_column >= top_row)
{
if (right_column >= bottom_row)
{
right = 1;
}
}
return result;
}
bool compareX(cv::Point a, cv::Point b)
{
return a.x < b.x;
}
bool compareY(cv::Point a, cv::Point b)
{
return a.y < b.y;
}
void crop(cv::Mat &source)
{
cv::Mat gray;
source.convertTo(source, CV_8U);
cvtColor(source, gray, cv::COLOR_RGB2GRAY);
// Extract all the black background (and some interior parts maybe)
cv::Mat mask = gray > 0;
// now extract the outer contour
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cv::Point(0, 0));
cv::Mat contourImage = cv::Mat::zeros(source.size(), CV_8UC3);;
// Find contour with max elements
int maxSize = 0;
int id = 0;
for (int i = 0; i < contours.size(); ++i)
{
if (contours.at((unsigned long)i).size() > maxSize)
{
maxSize = (int)contours.at((unsigned long)i).size();
id = i;
}
}
// Draw filled contour to obtain a mask with interior parts
cv::Mat contourMask = cv::Mat::zeros(source.size(), CV_8UC1);
drawContours(contourMask, contours, id, cv::Scalar(255), -1, 8, hierarchy, 0, cv::Point());
// Sort contour in x/y directions to easily find min/max and next
std::vector<cv::Point> cSortedX = contours.at((unsigned long)id);
std::sort(cSortedX.begin(), cSortedX.end(), compareX);
std::vector<cv::Point> cSortedY = contours.at((unsigned long)id);
std::sort(cSortedY.begin(), cSortedY.end(), compareY);
int minXId = 0;
int maxXId = (int)(cSortedX.size() - 1);
int minYId = 0;
int maxYId = (int)(cSortedY.size() - 1);
cv::Rect croppingMask;
while ((minXId < maxXId) && (minYId < maxYId))
{
cv::Point min(cSortedX[minXId].x, cSortedY[minYId].y);
cv::Point max(cSortedX[maxXId].x, cSortedY[maxYId].y);
croppingMask = cv::Rect(min.x, min.y, max.x - min.x, max.y - min.y);
// Out-codes: if one of them is set, the rectangle size has to be reduced at that border
int ocTop = 0;
int ocBottom = 0;
int ocLeft = 0;
int ocRight = 0;
bool finished = checkInteriorExterior(contourMask, croppingMask, ocTop, ocBottom, ocLeft, ocRight);
if (finished == true)
{
break;
}
// Reduce rectangle at border if necessary
if (ocLeft)
{ ++minXId; }
if (ocRight)
{ --maxXId; }
if (ocTop)
{ ++minYId; }
if (ocBottom)
{ --maxYId; }
}
// Crop image with created mask
source = source(croppingMask);
}
I never used the stitcher calss, but I think that you may get the estimated homography matrix at each pair of images, if you could obtain it easily, then you can multiply it with the corners of the first original image and so for the corner of the last original one, you will get their stitched coordinate, then get the min of left and right x-coordinates and min of up and bottom y-coordinates of each images. You may get the coordinates of of each stitched image, what you need to do in some cases of cropping.

wrong perspective image after taking picture on accelerometer supported blackberry device

There is a perspective image issue when I read a picture that is taken from the camera. When the direction is north, the picture looks like needed to be rotated 270 degrees. When the direction is east, picture should be rotated 180 degrees. But it's good when the direction is west. I tried getMetaData().getKeyValue("orientation") in EncodedImage for producing a good rotating formula, but it returned empty string. Please help me how to solve this problem.
Found solution here:
https://gist.github.com/3788313
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import javax.microedition.io.Connector;
import javax.microedition.io.file.FileConnection;
import net.rim.device.api.system.Bitmap;
import net.rim.device.api.system.EncodedImage;
public class ExifRotate {
/**
* Flip the image horizontally.
*/
public static final int FLIP_H = 1;
/**
* Flip the image vertically.
*/
public static final int FLIP_V = 2;
/**
* Flip the image horizontally and vertically.
*/
public static final int FLIP_HV = 3;
/**
* Rotate the image 90 degrees clockwise.
*/
public static final int FLIP_90CW = 4;
/**
* Rotate the image 90 degrees counter-clockwise.
*/
public static final int FLIP_90CCW = 5;
/**
* Rotate the image 180 degrees.
*/
public static final int FLIP_180 = 6;
private final static int read2bytes(InputStream in) throws IOException {
return in.read() << 8 | in.read();
}
private final static int readByte(InputStream in) throws IOException {
return in.read();
}
public static Bitmap readImageFromFile(String filename, int width, int height) throws IOException {
EncodedImage img = null;
byte[] data = null;
FileConnection file = null;
try {
file = (FileConnection) Connector.open(filename, Connector.READ);
int fileSize = (int) file.fileSize();
if (fileSize == 0) {
throw new IOException("File is empty");
}
data = new byte[fileSize];
InputStream input = file.openInputStream();
input.read(data);
input.close();
img = EncodedImage.createEncodedImage(data, 0, data.length);
int orientation = -1;
if ( filename.toLowerCase().endsWith(".jpg") || filename.toLowerCase().endsWith(".jpeg")) {
ByteArrayInputStream is = new ByteArrayInputStream( data );
orientation = getRotation(is);
}
if ( orientation == 2 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_H);
} else if ( orientation == 3 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_180);
} else if ( orientation == 4 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_V);
} else if ( orientation == 5 ) {
Bitmap tmp = rotateBitmap(img.getBitmap(), ImageUtil.FLIP_H);
tmp = rotateBitmap(tmp, ImageUtil.FLIP_90CCW);
return tmp;
} else if ( orientation == 6 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_90CW);
} else if ( orientation == 7 ) {
Bitmap tmp = rotateBitmap(img.getBitmap(), ImageUtil.FLIP_H);
tmp = rotateBitmap(tmp, ImageUtil.FLIP_90CW);
return tmp;
} else if ( orientation == 8 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_90CCW);
} else {
return img.getBitmap();
}
} finally {
if (file != null) {
try { file.close(); }
catch(Exception ex){}
}
}
}
public static int getRotation(InputStream in) throws IOException {
int [] exif_data = new int[100];
int n_flag = 0, set_flag = 0;
int is_motorola = 0;
/* Read File head, check for JPEG SOI + Exif APP1 */
for (int i = 0; i < 4; i++)
exif_data[i] = readByte(in);
if (exif_data[0] != 0xFF || exif_data[1] != 0xD8 || exif_data[2] != 0xFF || exif_data[3] != 0xE1)
return -2;
/* Get the marker parameter length count */
int length = read2bytes(in);
// exif_data = new int[length];
/* Length includes itself, so must be at least 2 */
/* Following Exif data length must be at least 6 */
if (length < 8)
return -1;
length -= 8;
/* Read Exif head, check for "Exif" */
for (int i = 0; i < 6; i++)
exif_data[i] = in.read();
if (exif_data[0] != 0x45 || exif_data[1] != 0x78 || exif_data[2] != 0x69 || exif_data[3] != 0x66 || exif_data[4] != 0 || exif_data[5] != 0)
return -1;
/* Read Exif body */
length = length > exif_data.length ? exif_data.length : length;
for (int i = 0; i < length; i++)
exif_data[i] = in.read();
if (length < 12)
return -1; /* Length of an IFD entry */
/* Discover byte order */
if (exif_data[0] == 0x49 && exif_data[1] == 0x49)
is_motorola = 0;
else if (exif_data[0] == 0x4D && exif_data[1] == 0x4D)
is_motorola = 1;
else
return -1;
/* Check Tag Mark */
if (is_motorola == 1) {
if (exif_data[2] != 0)
return -1;
if (exif_data[3] != 0x2A)
return -1;
} else {
if (exif_data[3] != 0)
return -1;
if (exif_data[2] != 0x2A)
return -1;
}
/* Get first IFD offset (offset to IFD0) */
int offset;
if (is_motorola == 1) {
if (exif_data[4] != 0)
return -1;
if (exif_data[5] != 0)
return -1;
offset = exif_data[6];
offset <<= 8;
offset += exif_data[7];
} else {
if (exif_data[7] != 0)
return -1;
if (exif_data[6] != 0)
return -1;
offset = exif_data[5];
offset <<= 8;
offset += exif_data[4];
}
if (offset > length - 2)
return -1; /* check end of data segment */
/* Get the number of directory entries contained in this IFD */
int number_of_tags;
if (is_motorola == 1) {
number_of_tags = exif_data[offset];
number_of_tags <<= 8;
number_of_tags += exif_data[offset + 1];
} else {
number_of_tags = exif_data[offset + 1];
number_of_tags <<= 8;
number_of_tags += exif_data[offset];
}
if (number_of_tags == 0)
return -1;
offset += 2;
/* Search for Orientation Tag in IFD0 */
for (;;) {
if (offset > length - 12)
return -1; /* check end of data segment */
/* Get Tag number */
int tagnum;
if (is_motorola == 1) {
tagnum = exif_data[offset];
tagnum <<= 8;
tagnum += exif_data[offset + 1];
} else {
tagnum = exif_data[offset + 1];
tagnum <<= 8;
tagnum += exif_data[offset];
}
if (tagnum == 0x0112)
break; /* found Orientation Tag */
if (--number_of_tags == 0)
return -1;
offset += 12;
}
/*
* if (set_flag==1) { Set the Orientation value if (is_motorola==1) {
* exif_data[offset+2] = 0; Format = unsigned short (2 octets)
* exif_data[offset+3] = 3; exif_data[offset+4] = 0; Number Of
* Components = 1 exif_data[offset+5] = 0; exif_data[offset+6] = 0;
* exif_data[offset+7] = 1; exif_data[offset+8] = 0; exif_data[offset+9]
* = set_flag; exif_data[offset+10] = 0; exif_data[offset+11] = 0; }
* else { exif_data[offset+2] = 3; Format = unsigned short (2 octets)
* exif_data[offset+3] = 0; exif_data[offset+4] = 1; Number Of
* Components = 1 exif_data[offset+5] = 0; exif_data[offset+6] = 0;
* exif_data[offset+7] = 0; exif_data[offset+8] = set_flag;
* exif_data[offset+9] = 0; exif_data[offset+10] = 0;
* exif_data[offset+11] = 0; } }
*/
// else {
/* Get the Orientation value */
if (is_motorola == 1) {
if (exif_data[offset + 8] != 0)
return -1;
set_flag = exif_data[offset + 9];
} else {
if (exif_data[offset + 9] != 0)
return -1;
set_flag = exif_data[offset + 8];
}
if (set_flag > 8)
return -1;
// }
/* Write out Orientation value */
if (n_flag == 1)
System.out.println("set_flag " + set_flag);
else
System.out.println("set_flag " + set_flag);
return set_flag;
}
public static Bitmap rotateBitmap(Bitmap src, int operation) {
int width = src.getWidth();
int height = src.getHeight();
int[] inPixels = new int[width*height];
src.getARGB(inPixels, 0, width, 0, 0, width, height);
int x = 0, y = 0;
int w = width;
int h = height;
int newX = 0;
int newY = 0;
int newW = w;
int newH = h;
switch (operation) {
case FLIP_H:
newX = width - (x + w);
break;
case FLIP_V:
newY = height - (y + h);
break;
case FLIP_HV:
newW = h;
newH = w;
newX = y;
newY = x;
break;
case FLIP_90CW:
newW = h;
newH = w;
newX = height - (y + h);
newY = x;
break;
case FLIP_90CCW:
newW = h;
newH = w;
newX = y;
newY = width - (x + w);
break;
case FLIP_180:
newX = width - (x + w);
newY = height - (y + h);
break;
}
int[] newPixels = new int[newW * newH];
int index, newRow, newCol, newIndex;
if ( operation == FLIP_H ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = row;
newCol = w - col - 1;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_V ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = h - row - 1;
newCol = col;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_HV ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = col;
newCol = row;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_90CW ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = col;
newCol = h - row - 1;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_90CCW ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = w - col - 1;
newCol = row;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_180 ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = h - row - 1;
newCol = w - col - 1;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
}
Bitmap dst = new Bitmap( newW, newH );
dst.setARGB(newPixels, 0, newW, 0, 0, newW, newH);
return dst;
}
}

Resources