I get this error when I try to compile my code for CS50 week 4 filter-less - clang

I am doing Week 4 Filter-less for CS50, I get an error whne I try to compile my code
When I complie it is see this erorr.
/usr/bin/ld: /lib/x86_64-linux-gnu/Scrt1.o: in function _start': (.text+0x1b): undefined reference to main'
clang: error: linker command failed with exit code 1 (use -v to see invocation)
make: *** [: helpers] Error 1
This is my code
#include "helpers.h"
#include <math.h>
// Convert image to grayscale
void grayscale(int height, int width, RGBTRIPLE image[height][width])
{
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
float r = image[i][j].rgbtRed;
float g = image[i][j].rgbtGreen;
float b = image[i][j].rgbtBlue;
float grey = round((b + g + r)/3.0);
image[i][j].rgbtRed = image[i][j].rgbtGreen = image[i][j].rgbtBlue = grey;
}
}
return;
}
// Convert image to sepia
void sepia(int height, int width, RGBTRIPLE image[height][width])
{
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width / 2; j++)
{
int w = width - (j + 1);
RGBTRIPLE tmp = image[i][j];
image[i][j] = image[i][w];
image[i][w] = tmp;
}
}
return;
}
// Reflect image horizontally
void reflect(int height, int width, RGBTRIPLE image[height][width])
{
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
int b = image[i][j].rgbtBlue;
int g = image[i][j].rgbtGreen;
int r = image[i][j].rgbtRed;
int sepiaRed = round((0.393 * r) + (0.769 * g) + (0.189 * b));
int sepiaGreen = round((0.349 * r) + (0.686 * g) + (0.168 * b));
int sepiaBlue = round((0.272 * r) + (0.534 * g) + (0.131 * b));
if (sepiaRed > 255)
{
sepiaRed = 255;
}
if (sepiaGreen > 255)
{
sepiaGreen = 255;
}
if (sepiaBlue > 255)
{
sepiaBlue = 255;
}
image[i][j].rgbtBlue = sepiaBlue;
image[i][j].rgbtGreen = sepiaGreen;
image[i][j].rgbtRed = sepiaRed;
}
}
return;
}
// Blur image
void blur(int height, int width, RGBTRIPLE image[height][width])
{
RGBTRIPLE tmp[height][width];
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
tmp[i][j] = image[i][j];
}
}
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
int totalRed = 0;
int totalGreen = 0;
int totalBlue = 0;
float counter = 0.00;
for (int x = -1; x < 2; x++)
{
for (int y = -1; y < 2; y++)
{
int X = i + x;
int Y = j + y;
if (X < 0 || Y < 0 || X > (height - 1) || Y > (width - 1))
{
continue;
}
totalRed += image[X][Y].rgbtRed;
totalGreen += image[X][Y].rgbtGreen;
totalBlue += image[X][Y].rgbtBlue;
counter++;
}
tmp[i][j].rgbtRed = round(totalRed / counter);
tmp[i][j].rgbtGreen = round(totalGreen / counter);
tmp[i][j].rgbtBlue = round(totalBlue / counter);
}
}
}
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
image[i][j].rgbtRed = tmp[i][j].rgbtRed;
image[i][j].rgbtGreen = tmp[i][j].rgbtGreen;
image[i][j].rgbtBlue = tmp[i][j].rgbtBlue;
}
}
return;
}

Related

Why am I getting wrong answer for this SPOJ Question

I still dont know weather I am allowed discuss Competetive Programming Doubts here, please let me know..
Can someone help me on this SPOJ question, I am getting wrong Answer:
https://www.spoj.com/problems/SPIKES/
I have tried all the test cases I could think of and the code always gives correct output.
Before posting it here, I also asked it in spoj forum but Its been two days, no one replies...
#include<bits/stdc++.h>
using namespace std;
// # = 35
// # = 64
// . = 46
// s = 115
// Using Dijkstra to find the path with minimum no. of Spikes
int n,m,j;
const int N = 100;
const int INF = 9;
vector<int> g[N];
int vis[N][N];
pair<int,int> dist[N][N];
vector<pair<int,int>> movements = {
{0,1},{0,-1},{1,0},{-1,0}
};
bool isvalid(int i, int j){
return i>=0 && j>=0 && i<n && j<m;
}
void bfs(int sourcei, int sourcej){
set<pair<int,pair<int,int>>> q;
q.insert({0,{sourcei,sourcej}});
dist[sourcei][sourcej] = {0,INF};
while(q.size()>0){
auto curr_v = *q.begin();
int curr_i = (curr_v.second).first;
int curr_j = (curr_v.second).second;
int spikes = curr_v.first;
q.erase(q.begin());
if(vis[curr_i][curr_j]) continue;
vis[curr_i][curr_j] = 1;
for(auto m : movements){
int child_i = curr_i + m.first;
int child_j = curr_j + m.second;
int spikec = spikes;
if(!isvalid(child_i,child_j)) continue;
if(g[child_i][child_j] == 115) spikec = spikes+1;
if(vis[child_i][child_j]) continue;
if(g[child_i][child_j]==35) continue;
if(dist[child_i][child_j].second > spikec){
dist[child_i][child_j] = {(dist[curr_i][curr_j].first +1),spikec};
q.insert({spikec , {child_i,child_j}});
}
}
}
}
int main(){
cin>>n>>m>>j;
int start_j,start_i;
int end_j,end_i;
for (int i = 0; i < N; ++i){
for (int j = 0; j < N; ++j){
dist[i][j].second = INF;
dist[i][j].first = 0;
}
}
for (int i = 0; i < n; ++i){
for (int j = 0; j < m; ++j){
char x; cin>>x;
g[i].push_back((int)x);
if(x=='#') {
start_i = i;
start_j = j;
}
if(x=='x'){
end_i = i;
end_j = j;
}
}
}
bfs(start_i,start_j);
// for (int i = 0; i < n; ++i){
// for (int j = 0; j < m; ++j){
// cout<<dist[i][j].first<<","<<dist[i][j].second<<" ";
// }cout<<endl;
// }
if(dist[end_i][end_j].second <= (int)j/2) cout<<"SUCCESS"<<endl;
else cout<<"IMPOSSIBLE"<<endl;
return 0;
}

OpenCV Error: Assertion failed, mat.hpp line 548

first I gave the following error in the code
int KK = 10;
int colors[KK];
then I wrote const int instead of kk and fixed the error
I'm using the opencv library but I'm getting an error somewhere. When i and j are 713 48, they do not give an error, but in the next cycle, i and j are 713 and 49, and they give an error code.(OpenCV Error: Assertion failed (dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] && (unsigned)(i1*DataType<_Tp>::channels) < (unsigned)(size.p[1]*channels()) && ((((sizeof(size_t)<<28)|0x8442211) >> ((DataType<_Tp>::depth) & ((1 << 3) - 1))*4) & 15) == elemSize1()) in cv::Mat::at, file c:\opencv\build\include\opencv2\core\mat.hpp, line 548)
the error gives in this row.
clustered.at(i,j) = (float)(colors[bestLabels.at(1,z)]);
I share the detailed code below
int KK = 10;
int colors[KK];
Mat p;
Mat bestLabels, centers;
vector<Mat> bgr, bgrBN;
split(frame_color, bgr);
Mat mask, clustered;
Mat clusteredAll = Mat::zeros(frame_color.rows, frame_color.cols, CV_32F);
for (int k = 0; k < contoursN.size(); k++)
{
mask = Mat::zeros(frame_color.rows, frame_color.cols, CV_8U);
drawContours(mask, contoursN, k, CV_RGB(255,255,255), CV_FILLED);
clustered = Mat::zeros(frame_color.rows, frame_color.cols, CV_32F);
int A = 0;
for (int i = 0; i < frame_color.rows; i++)
for (int j = 0; j < frame_color.cols; j++)
if (mask.at<uchar>(i,j) != 0)
A++;
if (A > 20)
{
p = Mat::zeros(A, G, CV_32F);
double moy = 0;
int z = 0;
for (int i = 0; i < frame_color.rows; i++)
{
for (int j = 0; j < frame_color.cols; j++)
{
if (mask.at<uchar>(i,j) != 0)
{
p.at<float>(z,0) = bgr[0].data[i*frame_color.cols+j] / 255.0;
p.at<float>(z,1) = bgr[1].data[i*frame_color.cols+j] / 255.0;
p.at<float>(z,2) = bgr[2].data[i*frame_color.cols+j] / 255.0;
z++;
moy = moy + frame.at<uchar>(i,j);
}
}
}
moy = moy/z;
double var = 0;
for (int i = 0; i < frame_color.rows; i++)
for (int j = 0; j < frame_color.cols; j++)
if (mask.at<uchar>(i,j) != 0)
var = var+(frame.at<uchar>(i,j) - moy)*(frame.at<uchar>(i,j) - moy);
var = var/(z*z);
int K = 1 + log(1+(A/A0)+(var/var0));
kmeans(p, K, bestLabels, TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0), 3, KMEANS_PP_CENTERS, centers);
for(int i=0; i<KK; i++)
colors[i] = 255/(i+1);
z = 0;
for (int i = 0; i < frame_color.rows; i++)
{
for (int j = 0; j < frame_color.cols; j++)
{
if (mask.at<uchar>(i,j) != 0)
{
clustered.at<float>(i,j) = (float)(colors[bestLabels.at<int>(1,z)]);
z++;
}
}
}
clusteredAll = clusteredAll + clustered;
clustered.convertTo(clustered, CV_8U);
}

Boxing Detected Objects in OpenCV

I am currently implementing a connected components algorithm and the last step of the algorithm requires me to enclose the objects I found in a box. I have attempted to enclose an object in a box and this is the result:
As you can see some of them seem to be enclosed in a box. Some of the lines of the box are not seen unless I stretch the windows of the imshow function's output and also some of them have color when I expected a line with a shade of gray.
My question is: Is the object really getting enclosed since I remember when I ran a similar code of mine into a different OS the lines with color are not see at all but are seen in my computer. Additionally, why are some of the lines in a different color given that I was expecting a shade of gray.
Mat src, src_gray;
Mat dst, detected_edges;
const char* window_name = "THRESHOLDED IMAGE";
/**
* #function connectedComponent
*/
static void connectedComponent(int, void*)
{
Mat test; //dummy
Mat sub;
int newObject = 0;
int zeroTest = 0, nonZero = 0;
int arr[5] = {0,0,0,0,0};
/// Reduce noise with a kernel 3x3
blur( src_gray,detected_edges, Size(3,3) ); //filtering out of noise
namedWindow("INITIAL", WINDOW_NORMAL);
imshow("INITIAL",detected_edges);
resizeWindow("INITIAL", 300, 300);
threshold(detected_edges, detected_edges, 0,255, THRESH_BINARY | THRESH_OTSU);
int** newSub = new int*[detected_edges.rows];
for(int i = 0; i < detected_edges.rows; i++)
newSub[i] = new int[detected_edges.cols];
for(int i = 0; i < detected_edges.rows; i++){
for(int j = 0; j < detected_edges.cols; j++){
newSub[i][j] = 0;
}
}
/*INITIAL MARKING LOOP*/
for(int i = 0; i < detected_edges.rows; i++){
for(int j = 0; j < detected_edges.cols; j++){
if(detected_edges.at<uchar>(i,j) == 0){
if(i-1 < 0 && j-1 < 0){
newObject = newObject + 1; //no values
newSub[i][j] = newObject;
}else if(i-1 >= 0 && j-1 < 0){
if(newSub[i-1][j] != 0){
newSub[i][j] = newSub[i-1][j]; //only up has value
}else{
newObject = newObject + 1; //no values
newSub[i][j] = newObject;
}
}else if(i-1 < 0 && j-1 >= 0){
if(newSub[i-1][j] != 0){
newSub[i][j] = newSub[i-1][j]; //only left has value
}else{
newObject = newObject + 1; //no values
newSub[i][j] = newObject;
}
}else{
if(newSub[i-1][j] == 0 && newSub[i][j-1] == 0){
newObject = newObject + 1; //no values
newSub[i][j] = newObject;
}else if(newSub[i-1][j] == newSub[i][j-1]){ //same value
newSub[i][j] = newSub[i-1][j];
}else if((newSub[i-1][j] != 0 && newSub[i][j-1] == 0)){
newSub[i][j] = newSub[i-1][j]; //only up has value
}else if(newSub[i-1][j] == 0 && newSub[i][j-1] != 0 ){
newSub[i][j] = newSub[i][j-1]; //only left has value
}else if(newSub[i-1][j] != newSub[i][j-1]){
newSub[i][j] = newSub[i-1][j]; //different values follow upper's value
}
}
}
}
}
int a = 1;
int maxRows = detected_edges.rows;
int maxCols = detected_edges.cols;
/*CONNECTING PIXELS RIGHT-BOTTOM*/
while(a < newObject){
int update = 0;
for(int i = 0; i < maxRows; i++){
for(int j = 0; j < maxCols; j++){
if(newSub[i][j] == a){
if(i+1 < maxRows && j+1 < maxCols){
if(newSub[i][j+1] > a){ //both points allowed
int value = newSub[i][j+1]; //get the value I need to replace
for(int h = 0; h < maxRows; h++){
for(int k = 0; k < maxCols; k++){
if(newSub[h][k] == value){ //replace all instances of that value
newSub[h][k] = a;
}
}
}
update = 1;
}
if(newSub[i+1][j] > a){ //both points allowed
int value = newSub[i+1][j]; //get the value I need to replace
for(int h = 0; h < maxRows; h++){
for(int k = 0; k < maxCols; k++){
if(newSub[h][k] == value){
newSub[h][k] = a; //replace all instances of that value
}
}
}
update = 1;
}
}else if(i+1 > maxRows && j+1 < maxCols){
if(newSub[i][j+1] > a){ //bottom is not allowed
int value = newSub[i][j+1]; //get the value I need to replace
for(int h = 0; h < maxRows; h++){
for(int k = 0; k < maxCols; k++){
if(newSub[h][k] == value){
newSub[h][k] = a; //replace all instances of that value
}
}
}
update = 1;
}
}else if(i+1 < maxRows && j+1 > maxCols){
if(newSub[i+1][j] > a){ //right is not allowed
int value = newSub[i+1][j]; //get the value I need to replace
for(int h = 0; h < maxRows; h++){
for(int k = 0; k < maxCols; k++){
if(newSub[h][k] == value){ //replace all instances of that value
newSub[h][k] = a;
}
}
}
update = 1;
}
}
}
}
}
a++;
}
/*CONNECTING PIXELS LEFT-TOP*/
a = newObject;
while(a > 0){
int update = 0;
for(int i = maxRows-1; i > 0; i--){
for(int j = maxCols-1; j > 0 ; j--){
if(newSub[i][j] == a){
if(i-1 >= 0 && j-1 >= 0){
if(newSub[i][j-1] > a){ //both points allowed
int value = newSub[i][j-1]; //get the value I need to replace
for(int h = 0; h < maxRows; h++){
for(int k = 0; k < maxCols; k++){
if(newSub[h][k] == value){
newSub[h][k] = a;
}
}
}
update = 1;
}
if(newSub[i-1][j] > a){
int value = newSub[i-1][j]; //get the value I need to replace
for(int h = 0; h < maxRows; h++){
for(int k = 0; k < maxCols; k++){
if(newSub[h][k] == value){ //replace all instances of that value
newSub[h][k] = a;
}
}
}
update = 1;
}
}else if(i-1 >= 0 && j-1 < 0){
if(newSub[i][j-1] > a){ //left is not allowed
int value = newSub[i][j-1]; //get the value I need to replace
for(int h = 0; h < maxRows; h++){
for(int k = 0; k < maxCols; k++){ //replace all instances of that value
if(newSub[h][k] == value){
newSub[h][k] = a;
}
}
}
update = 1;
}
}else if(i-1 < 0 && j-1 >= 0){
if(newSub[i-1][j] > a){ //top is not allowed
int value = newSub[i-1][j]; //get the value I need to replace
for(int h = 0; h < maxRows; h++){
for(int k = 0; k < maxCols; k++){ //replace all instances of that value
if(newSub[h][k] == value){
newSub[h][k] = a;
}
}
}
update = 1;
}
}
}
}
}
a--;
}
for(int i = 0; i < maxRows; i++){
for(int j = 0; j < maxCols; j++){
int check = 0;
if(newSub[i][j] != 0){
for(int k = 0; k < 5; k++){
if(newSub[i][j] == arr[k]){ //check if there is an instance of the value in the given array of values
check = 1;
break;
}
}
if(check == 0){
for(int r = 0; r < 5; r++){
if(arr[r] == 0){
arr[r] = newSub[i][j]; //if new value is found add to array
break;
}
}
}
}
}
}
/*
I HAVE AN ARRAY CONTAINING ALL VALUES
**/
src.copyTo( sub, detected_edges);
sub = Scalar::all(0);
/*SET AN INTENSITY FOR CORRESPONDING VALUES*/
int intensity = 50;
a = 0;
while(a < 5){
int update = 0;
for(int i = 0; i < maxRows; i++){
for(int j = 0; j < maxCols; j++){
if(newSub[i][j] == arr[a]){
sub.at<uchar>(i,j) = intensity;
}
}
}
a++;
intensity = intensity + 50;
}
a = 250;
/*GETTING MIN-MAX COORDINATES*/
while(a >= 50){
int setter = 0;
int minRow = 0;
int minCol = 0;
int maxRow = 0;
int maxCol = 0;
for(int i = 0; i < maxRows; i++){
for(int j = 0; j < maxCols; j++){
if(sub.at<uchar>(i,j) == a){
if(setter == 0){
minRow = i;
minCol = j;
maxRow = i;
maxCol = j;
setter = 1;
}else{
if(i <= minRow){
minRow = i;
}
else{
if(i > maxRow){
maxRow = i;
}
}
if(j <= minCol){
minCol = j;
}
else{
if(j > maxCol){
maxCol = j;
}
}
}
}
}
}
/*THIS IS WHERE I MAKE MY BOUNDING BOX*/
for(int i = minRow; i < maxRow; i++){
sub.at<uchar>(i,minCol) = 255; //set up the horizontal lines
sub.at<uchar>(i,maxCol) = 255;
}
for(int i = minCol; i < maxCol; i++){
sub.at<uchar>(minRow,i) = 255; //set up the vertical lines
sub.at<uchar>(maxRow,i) = 255;
}
a = a - 50;
}
dst = Scalar::all(0);
src.copyTo( dst, detected_edges);
imshow( window_name, dst );
namedWindow("FINAL", WINDOW_NORMAL);
imshow("FINAL",sub); //final output
resizeWindow("FINAL", 300, 300);
for(int i = 0; i < detected_edges.rows; i++)
delete[] newSub[i];
delete[] newSub;
}
/**
* #function main
*/
int main( int, char** argv )
{
/// Load an image
src = imread( argv[1] );
if( src.empty() )
{ return -1; }
/// Create a matrix of the same type and size as src (for dst)
dst.create( src.size(), src.type() );
/// Convert the image to grayscale
cvtColor( src, src_gray, COLOR_BGR2GRAY ); //grayscale for one channel for easy computation
/// Create a window
namedWindow( window_name, WINDOW_NORMAL );
resizeWindow(window_name, 300,300);
/// Show the image
connectedComponent(0, 0);
/// Wait until user exit program by pressing a key
waitKey(0);
return 0;
}

wrong perspective image after taking picture on accelerometer supported blackberry device

There is a perspective image issue when I read a picture that is taken from the camera. When the direction is north, the picture looks like needed to be rotated 270 degrees. When the direction is east, picture should be rotated 180 degrees. But it's good when the direction is west. I tried getMetaData().getKeyValue("orientation") in EncodedImage for producing a good rotating formula, but it returned empty string. Please help me how to solve this problem.
Found solution here:
https://gist.github.com/3788313
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import javax.microedition.io.Connector;
import javax.microedition.io.file.FileConnection;
import net.rim.device.api.system.Bitmap;
import net.rim.device.api.system.EncodedImage;
public class ExifRotate {
/**
* Flip the image horizontally.
*/
public static final int FLIP_H = 1;
/**
* Flip the image vertically.
*/
public static final int FLIP_V = 2;
/**
* Flip the image horizontally and vertically.
*/
public static final int FLIP_HV = 3;
/**
* Rotate the image 90 degrees clockwise.
*/
public static final int FLIP_90CW = 4;
/**
* Rotate the image 90 degrees counter-clockwise.
*/
public static final int FLIP_90CCW = 5;
/**
* Rotate the image 180 degrees.
*/
public static final int FLIP_180 = 6;
private final static int read2bytes(InputStream in) throws IOException {
return in.read() << 8 | in.read();
}
private final static int readByte(InputStream in) throws IOException {
return in.read();
}
public static Bitmap readImageFromFile(String filename, int width, int height) throws IOException {
EncodedImage img = null;
byte[] data = null;
FileConnection file = null;
try {
file = (FileConnection) Connector.open(filename, Connector.READ);
int fileSize = (int) file.fileSize();
if (fileSize == 0) {
throw new IOException("File is empty");
}
data = new byte[fileSize];
InputStream input = file.openInputStream();
input.read(data);
input.close();
img = EncodedImage.createEncodedImage(data, 0, data.length);
int orientation = -1;
if ( filename.toLowerCase().endsWith(".jpg") || filename.toLowerCase().endsWith(".jpeg")) {
ByteArrayInputStream is = new ByteArrayInputStream( data );
orientation = getRotation(is);
}
if ( orientation == 2 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_H);
} else if ( orientation == 3 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_180);
} else if ( orientation == 4 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_V);
} else if ( orientation == 5 ) {
Bitmap tmp = rotateBitmap(img.getBitmap(), ImageUtil.FLIP_H);
tmp = rotateBitmap(tmp, ImageUtil.FLIP_90CCW);
return tmp;
} else if ( orientation == 6 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_90CW);
} else if ( orientation == 7 ) {
Bitmap tmp = rotateBitmap(img.getBitmap(), ImageUtil.FLIP_H);
tmp = rotateBitmap(tmp, ImageUtil.FLIP_90CW);
return tmp;
} else if ( orientation == 8 ) {
return rotateBitmap(img.getBitmap(), ImageUtil.FLIP_90CCW);
} else {
return img.getBitmap();
}
} finally {
if (file != null) {
try { file.close(); }
catch(Exception ex){}
}
}
}
public static int getRotation(InputStream in) throws IOException {
int [] exif_data = new int[100];
int n_flag = 0, set_flag = 0;
int is_motorola = 0;
/* Read File head, check for JPEG SOI + Exif APP1 */
for (int i = 0; i < 4; i++)
exif_data[i] = readByte(in);
if (exif_data[0] != 0xFF || exif_data[1] != 0xD8 || exif_data[2] != 0xFF || exif_data[3] != 0xE1)
return -2;
/* Get the marker parameter length count */
int length = read2bytes(in);
// exif_data = new int[length];
/* Length includes itself, so must be at least 2 */
/* Following Exif data length must be at least 6 */
if (length < 8)
return -1;
length -= 8;
/* Read Exif head, check for "Exif" */
for (int i = 0; i < 6; i++)
exif_data[i] = in.read();
if (exif_data[0] != 0x45 || exif_data[1] != 0x78 || exif_data[2] != 0x69 || exif_data[3] != 0x66 || exif_data[4] != 0 || exif_data[5] != 0)
return -1;
/* Read Exif body */
length = length > exif_data.length ? exif_data.length : length;
for (int i = 0; i < length; i++)
exif_data[i] = in.read();
if (length < 12)
return -1; /* Length of an IFD entry */
/* Discover byte order */
if (exif_data[0] == 0x49 && exif_data[1] == 0x49)
is_motorola = 0;
else if (exif_data[0] == 0x4D && exif_data[1] == 0x4D)
is_motorola = 1;
else
return -1;
/* Check Tag Mark */
if (is_motorola == 1) {
if (exif_data[2] != 0)
return -1;
if (exif_data[3] != 0x2A)
return -1;
} else {
if (exif_data[3] != 0)
return -1;
if (exif_data[2] != 0x2A)
return -1;
}
/* Get first IFD offset (offset to IFD0) */
int offset;
if (is_motorola == 1) {
if (exif_data[4] != 0)
return -1;
if (exif_data[5] != 0)
return -1;
offset = exif_data[6];
offset <<= 8;
offset += exif_data[7];
} else {
if (exif_data[7] != 0)
return -1;
if (exif_data[6] != 0)
return -1;
offset = exif_data[5];
offset <<= 8;
offset += exif_data[4];
}
if (offset > length - 2)
return -1; /* check end of data segment */
/* Get the number of directory entries contained in this IFD */
int number_of_tags;
if (is_motorola == 1) {
number_of_tags = exif_data[offset];
number_of_tags <<= 8;
number_of_tags += exif_data[offset + 1];
} else {
number_of_tags = exif_data[offset + 1];
number_of_tags <<= 8;
number_of_tags += exif_data[offset];
}
if (number_of_tags == 0)
return -1;
offset += 2;
/* Search for Orientation Tag in IFD0 */
for (;;) {
if (offset > length - 12)
return -1; /* check end of data segment */
/* Get Tag number */
int tagnum;
if (is_motorola == 1) {
tagnum = exif_data[offset];
tagnum <<= 8;
tagnum += exif_data[offset + 1];
} else {
tagnum = exif_data[offset + 1];
tagnum <<= 8;
tagnum += exif_data[offset];
}
if (tagnum == 0x0112)
break; /* found Orientation Tag */
if (--number_of_tags == 0)
return -1;
offset += 12;
}
/*
* if (set_flag==1) { Set the Orientation value if (is_motorola==1) {
* exif_data[offset+2] = 0; Format = unsigned short (2 octets)
* exif_data[offset+3] = 3; exif_data[offset+4] = 0; Number Of
* Components = 1 exif_data[offset+5] = 0; exif_data[offset+6] = 0;
* exif_data[offset+7] = 1; exif_data[offset+8] = 0; exif_data[offset+9]
* = set_flag; exif_data[offset+10] = 0; exif_data[offset+11] = 0; }
* else { exif_data[offset+2] = 3; Format = unsigned short (2 octets)
* exif_data[offset+3] = 0; exif_data[offset+4] = 1; Number Of
* Components = 1 exif_data[offset+5] = 0; exif_data[offset+6] = 0;
* exif_data[offset+7] = 0; exif_data[offset+8] = set_flag;
* exif_data[offset+9] = 0; exif_data[offset+10] = 0;
* exif_data[offset+11] = 0; } }
*/
// else {
/* Get the Orientation value */
if (is_motorola == 1) {
if (exif_data[offset + 8] != 0)
return -1;
set_flag = exif_data[offset + 9];
} else {
if (exif_data[offset + 9] != 0)
return -1;
set_flag = exif_data[offset + 8];
}
if (set_flag > 8)
return -1;
// }
/* Write out Orientation value */
if (n_flag == 1)
System.out.println("set_flag " + set_flag);
else
System.out.println("set_flag " + set_flag);
return set_flag;
}
public static Bitmap rotateBitmap(Bitmap src, int operation) {
int width = src.getWidth();
int height = src.getHeight();
int[] inPixels = new int[width*height];
src.getARGB(inPixels, 0, width, 0, 0, width, height);
int x = 0, y = 0;
int w = width;
int h = height;
int newX = 0;
int newY = 0;
int newW = w;
int newH = h;
switch (operation) {
case FLIP_H:
newX = width - (x + w);
break;
case FLIP_V:
newY = height - (y + h);
break;
case FLIP_HV:
newW = h;
newH = w;
newX = y;
newY = x;
break;
case FLIP_90CW:
newW = h;
newH = w;
newX = height - (y + h);
newY = x;
break;
case FLIP_90CCW:
newW = h;
newH = w;
newX = y;
newY = width - (x + w);
break;
case FLIP_180:
newX = width - (x + w);
newY = height - (y + h);
break;
}
int[] newPixels = new int[newW * newH];
int index, newRow, newCol, newIndex;
if ( operation == FLIP_H ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = row;
newCol = w - col - 1;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_V ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = h - row - 1;
newCol = col;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_HV ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = col;
newCol = row;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_90CW ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = col;
newCol = h - row - 1;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_90CCW ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = w - col - 1;
newCol = row;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
} else if ( operation == FLIP_180 ) {
for (int row = 0; row < h; row++) {
for (int col = 0; col < w; col++) {
index = row * width + col;
newRow = h - row - 1;
newCol = w - col - 1;
newIndex = newRow * newW + newCol;
newPixels[newIndex] = inPixels[index];
}
}
}
Bitmap dst = new Bitmap( newW, newH );
dst.setARGB(newPixels, 0, newW, 0, 0, newW, newH);
return dst;
}
}

Blackberry Image processing(sketch effect and charcoal effect)

I am new to blackberry.
I am trying to convert normal image into sketch effect.I have code to do that in ANDROID.
I have tried to implement it in Blackberry but unable to got output.Here is android code and my blackberry code.
This is android code-
public class ConvolutionMatrix
{
public static final int SIZE = 3;
public double[][] Matrix;
public double Factor = 1;
public double Offset = 1;
public ConvolutionMatrix(int size) {
Matrix = new double[size][size];
}
public void setAll(double value) {
for (int x = 0; x < SIZE; ++x) {
for (int y = 0; y < SIZE; ++y) {
Matrix[x][y] = value;
}
}
}
public void applyConfig(double[][] config) {
for(int x = 0; x < SIZE; ++x) {
for(int y = 0; y < SIZE; ++y) {
Matrix[x][y] = config[x][y];
}
}
}
public static Bitmap computeConvolution3x3(Bitmap src, ConvolutionMatrix matrix) {
int width = src.getWidth();
int height = src.getHeight();
Bitmap result = Bitmap.createBitmap(width, height, src.getConfig());
int A, R, G, B;
int sumR, sumG, sumB;
int[][] pixels = new int[SIZE][SIZE];
for(int y = 0; y < height - 2; ++y) {
for(int x = 0; x < width - 2; ++x) {
// get pixel matrix
for(int i = 0; i < SIZE; ++i) {
for(int j = 0; j < SIZE; ++j) {
pixels[i][j] = src.getPixel(x + i, y + j);
}
}
// get alpha of center pixel
A = Color.alpha(pixels[1][1]);
// init color sum
sumR = sumG = sumB = 0;
// get sum of RGB on matrix
for(int i = 0; i < SIZE; ++i) {
for(int j = 0; j < SIZE; ++j) {
sumR += (Color.red(pixels[i][j]) * matrix.Matrix[i][j]);
sumG += (Color.green(pixels[i][j]) * matrix.Matrix[i][j]);
sumB += (Color.blue(pixels[i][j]) * matrix.Matrix[i][j]);
}
}
// get final Red
R = (int)(sumR / matrix.Factor + matrix.Offset);
if(R < 0) { R = 0; }
else if(R > 255) { R = 255; }
// get final Green
G = (int)(sumG / matrix.Factor + matrix.Offset);
if(G < 0) { G = 0; }
else if(G > 255) { G = 255; }
// get final Blue
B = (int)(sumB / matrix.Factor + matrix.Offset);
if(B < 0) { B = 0; }
else if(B > 255) { B = 255; }
// apply new pixel
result.setPixel(x + 1, y + 1, Color.argb(A, R, G, B));
}
}
// final image
return result;
}
}
and following is my Blackberry code that I have tried-
public ConvolutionMatrix(int size) {
Matrix = new double[size][size];
}
public void setAll(double value) {
for (int x = 0; x < SIZE; ++x) {
for (int y = 0; y < SIZE; ++y) {
Matrix[x][y] = value;
}
}
}
public void applyConfig(double[][] config) {
for(int x = 0; x < SIZE; ++x) {
for(int y = 0; y < SIZE; ++y) {
Matrix[x][y] = config[x][y];
}
}
}
public static Bitmap computeConvolution3x3(Bitmap src, ConvolutionMatrix matrix) {
int width = src.getWidth();
int height = src.getHeight();
int A, R, G, B;
int sumR, sumG, sumB;
int[] argb= new int[width*height];
int[][]newargb=new int[width][height];
src.getARGB(argb, 0, width, 0, 0, width, height);
for(int y=0;y<=height;y++)
{
for (int x=0;x<=width;x++)
{
System.out.println(""+x);
System.out.println(""+y);
newargb[y][x]=argb[width*y+x];
}
}
int[][] pixels = new int[SIZE][SIZE];
for(int y = 0; y < height - 2; ++y) {
for(int x = 0; x < width - 2; ++x) {
// get pixel matrix
for(int i = 0; i < SIZE; ++i) {
for(int j = 0; j < SIZE; ++j) {
pixels[i][j] = newargb[x + i][ y + j];
}
}
A=pixels[1][1];
sumR = sumG = sumB = 0;
for(int i = 0; i < SIZE; ++i) {
for(int j = 0; j < SIZE; ++j) {
A =pixels[i][j] >> 24;
R =pixels[i][j]>> 16 & 0xFF;
G =pixels[i][j] >> 8 & 0xFF;
B =pixels[i][j] & 0xFF;
sumR += (R * matrix.Matrix[i][j]);
sumG += (G * matrix.Matrix[i][j]);
sumB += (B * matrix.Matrix[i][j]);
}
}
// get final Red
R = (int)(sumR / matrix.Factor + matrix.Offset);
if(R < 0) { R = 0; }
else if(R > 255) { R = 255; }
// get final Green
G = (int)(sumG / matrix.Factor + matrix.Offset);
if(G < 0) { G = 0; }
else if(G > 255) { G = 255; }
// get final Blue
B = (int)(sumB / matrix.Factor + matrix.Offset);
if(B < 0) { B = 0; }
else if(B > 255) { B = 255; }
for(int i = 0; i < SIZE; ++i) {
for(int j = 0; j < SIZE; ++j) {
pixels[i][j]=(A << 24) | (R << 16) | (G << 8) | B;
newargb[x ][ y]=pixels[i][j];
}
}
}
}
for(int y=0;y<=height;y++)
{
for (int x=0;x<=width;x++)
{
argb[width*y+x]=newargb[y][x];
}
}
src.setARGB(argb, 0, width, 0, 0, width, height);
return src;
}
}
Common Code for both android and Blackberry-
public Bitmap EmbossImage(Bitmap src) {
System.out.println("In Emboss Effect Image method");
double[][] SharpConfig = new double[][] {
{ 0 , -1, 0 },
{ -1, 5, -1 },
{ 0 , -1, 0 }
};
ConvolutionMatrix convMatrix = new ConvolutionMatrix(3);
convMatrix.setAll(0);
convMatrix.applyConfig(SharpConfig);
convMatrix.Factor = 1;
convMatrix.offset=130;
return ConvolutionMatrix.computeConvolution3x3(src, convMatrix);
}
I have found the answer for my own question.I found convolution related code from this site:
android image processing.
See in the comments part on this link page.
This is android code.Just make it compatible it for blackberry by changing Color class methods.

Resources