BlackBerry Bitmap Rotation - blackberry

i tried to rotate Bitmap in BlackBerry in Two Ways
1 -
public static Bitmap rotateImage(Bitmap oldB, int angle) {
int w = oldB.getWidth();
int h = oldB.getHeight();
double angRad = (angle % 360) * (Math.PI / 180);
Bitmap newB = new Bitmap(w, h);
int[] oldD = new int[w * h];
int[] newD = new int[w * h];
oldB.getARGB(oldD, 0, w, 0, 0, w, h);
int axisX = w / 2;
int axisY = h / 2;
for (int x = 0; x < oldD.length; x++) {
int oldX = x % w;
int oldY = x / w;
int op = oldX - axisX;
int adj = oldY - axisY;
double oldT = MathUtilities.atan2(op, adj);
double rad = Math.sqrt((op * op) + (adj * adj));
double newT = oldT + angRad;
int newX = (int) MathUtilities.round((rad * Math.sin(newT))
+ (double) axisX);
int newY = (int) MathUtilities.round((rad * Math.cos(newT))
+ (double) axisY);
if (newX < 0 || newY < 0 || newX >= w || newY >= h) {
newD[x] = 0x00000000;
} else {
newD[x] = oldD[(newY * w) + newX];
}
}
newB.setARGB(newD, 0, w, 0, 0, w, h);
return newB;
}
2 - the second way using drawTexturedPath
------ the function
private void drawRotatedBitmap(Graphics graphics, Bitmap bm, int angle,
int x, int y) {
int w = bm.getWidth();
int h = bm.getHeight();
double a = Math.toRadians(angle);
int x1 = (int) (x - h * Math.sin(a));
int y1 = (int) (y + h * Math.cos(a));
int x2 = (int) (x1 + w * Math.cos(a));
int y2 = (int) (y1 + w * Math.sin(a));
int x3 = (int) (x + w * Math.cos(a));
int y3 = (int) (y + w * Math.sin(a));
int xPts[] = { x, x1, x2, x3 };
int yPts[] = { y, y1, y2, y3 };
int fAngle = Fixed32.toFP(angle);
int dvx = Fixed32.cosd(fAngle);
int dux = -Fixed32.sind(fAngle);
int dvy = Fixed32.sind(fAngle);
int duy = Fixed32.cosd(fAngle);
graphics.drawTexturedPath(xPts, yPts, null, null, 0, 0, dux, dvx, duy,
dvy, bm);
}
------ How to invoke
Graphics graphics = Graphics.create(circleBmp);
drawRotatedBitmap(graphics, , 45, 0, 0);
circleBitmapField.setBitmap(circleBmp);
The First way is too slow , and the second way draw the Bitmap in wrong position
can any one help me to adjust any way of them ? or have another way to rotate bitmap fast and accurate .
Thanks for help .....

You need tu use ImageManipulator class. Find here an 'how to' document.

Related

Histogram based on image as vector graphic

I would like to transform histograms based on images to vector graphics.
This could be a start:
function preload() {
img = loadImage("https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/Cirrus_sky_panorama.jpg/1200px-Cirrus_sky_panorama.jpg");
}
function setup() {
createCanvas(400, 400);
background(255);
img.resize(0, 200);
var maxRange = 256
colorMode(HSL, maxRange);
image(img, 0, 0);
var histogram = new Array(maxRange);
for (i = 0; i <= maxRange; i++) {
histogram[i] = 0
}
loadPixels();
for (var x = 0; x < img.width; x += 5) {
for (var y = 0; y < img.height; y += 5) {
var loc = (x + y * img.width) * 4;
var h = pixels[loc];
var s = pixels[loc + 1];
var l = pixels[loc + 2];
var a = pixels[loc + 3];
b = int(l);
histogram[b]++
}
}
image(img, 0, 0);
stroke(300, 100, 80)
push()
translate(10, 0)
for (x = 0; x <= maxRange; x++) {
index = histogram[x];
y1 = int(map(index, 0, max(histogram), height, height - 300));
y2 = height
xPos = map(x, 0, maxRange, 0, width - 20)
line(xPos, y1, xPos, y2);
}
pop()
}
<script src="https://cdn.jsdelivr.net/npm/p5#1.4.1/lib/p5.js"></script>
But I would need downloadable vector graphic files as results that are closed shapes without any gaps between. It should look like that for example:
<svg viewBox="0 0 399.84 200"><polygon points="399.84 200 399.84 192.01 361.91 192.01 361.91 182.87 356.24 182.87 356.24 183.81 350.58 183.81 350.58 184.74 344.91 184.74 344.91 188.19 339.87 188.19 339.87 189.89 334.6 189.89 334.6 185.29 328.93 185.29 328.93 171.11 323.26 171.11 323.26 172.55 317.59 172.55 317.59 173.99 311.92 173.99 311.92 179.42 306.88 179.42 306.88 182.03 301.21 182.03 301.21 183.01 295.54 183.01 295.54 179.04 289.87 179.04 289.87 175.67 284.21 175.67 284.21 182.03 278.54 182.03 278.54 176 273.5 176 273.5 172.42 267.83 172.42 267.83 179.42 262.79 179.42 262.79 182.03 257.12 182.03 257.12 183.01 251.45 183.01 251.45 178.63 245.78 178.63 245.78 175.21 240.11 175.21 240.11 182.03 234.86 182.03 234.86 150.42 229.2 150.42 229.2 155.98 223.53 155.98 223.53 158.06 217.86 158.06 217.86 167.44 212.19 167.44 212.19 162.58 206.52 162.58 206.52 155.98 200.85 155.98 200.85 158.06 195.18 158.06 195.18 167.44 189.51 167.44 189.51 177.46 183.84 177.46 183.84 166.93 178.17 166.93 178.17 153.69 172.5 153.69 172.5 155.87 166.82 155.87 166.82 158.05 161.78 158.05 161.78 155.63 156.11 155.63 156.11 160.65 150.84 160.65 150.84 146.59 145.17 146.59 145.17 109.63 139.49 109.63 139.49 113.67 133.82 113.67 133.82 61.48 128.15 61.48 128.15 80.59 123.11 80.59 123.11 93.23 117.44 93.23 117.44 97.97 111.76 97.97 111.76 78.07 106.09 78.07 106.09 61.66 100.42 61.66 100.42 93.23 94.75 93.23 94.75 98.51 89.7 98.51 89.7 85.4 84.03 85.4 84.03 111.03 78.99 111.03 78.99 120.57 73.32 120.57 73.32 124.14 67.65 124.14 67.65 23.48 61.97 23.48 61.97 0 56.3 0 56.3 120.57 50.63 120.57 50.63 167.01 45.38 167.01 45.38 170.83 39.71 170.83 39.71 172.26 34.03 172.26 34.03 178.7 28.36 178.7 28.36 175.36 22.69 175.36 22.69 170.83 17.02 170.83 17.02 172.26 11.34 172.26 11.34 178.7 5.67 178.7 5.67 103.85 0 103.85 0 200 399.84 200"/></svg>
Has anyone an idea how to program that? It doesn't necessarily need to be based on p5.js, but would be cool.
Closing Gaps
In order to have a gapless histogram, you need to meet the following condition:
numberOfBars * barWidth === totalWidth
Right now you are using the p5 line() function to draw your bars. You have not explicitly set the width of your bars, so it uses the default value of 1px wide.
We know that the numberOfBars in your code is always maxRange which is 256.
Right now the total width of your histogram is width - 20, where width is set to 400 by createCanvas(400, 400). So the totalWidth is 380.
256 * 1 !== 380
If you have 256 pixels of bars in a 380 pixel space then there are going to be gaps!
We need to change the barWidth and/or the totalWidth to balance the equation.
For example, you can change your canvas size to 276 (256 + your 20px margin) and the gaps disappear!
createCanvas(276, 400);
However this is not an appropriate solution because now your image is cropped and your pixel data is wrong. But actually...it was already wrong before!
Sampling Pixels
When you call the global loadPixels() function in p5.js you are loading all of the pixels for the whole canvas. This includes the white areas outside of your image.
for (var x = 0; x < img.width; x += 5) {
for (var y = 0; y < img.height; y += 5) {
var loc = (x + y * img.width) * 4;
It is a 1-dimensional array, so your approach of limiting the x and y values here is not giving you the correct position. Your loc variable needs to use the width of the entire canvas rather than the width of just the image, since the pixels array includes the entire canvas.
var loc = (x + y * width) * 4;
Alternatively, you can look at just the pixels of the image by using img.loadPixels() and img.pixels.
img.loadPixels();
for (var x = 0; x < img.width; x += 5) {
for (var y = 0; y < img.height; y += 5) {
var loc = (x + y * img.width) * 4;
var h = img.pixels[loc];
var s = img.pixels[loc + 1];
var l = img.pixels[loc + 2];
var a = img.pixels[loc + 3];
b = int(l);
histogram[b]++;
}
}
The pixel values are always returned in RGBA regardless of the colorMode. So your third channel value is actually the blue, not the lightness. You can make use of the p5.js lightness() function to compute the lightness from the RGBA.
Updated Code
The actual lightness histogram looks dumb because 100% dwarfs all of the other bars.
function preload() {
img = loadImage("https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/Cirrus_sky_panorama.jpg/1200px-Cirrus_sky_panorama.jpg");
}
function setup() {
const barCount = 100;
const imageHeight = 200;
createCanvas(400, 400);
background(255);
colorMode(HSL, barCount - 1);
img.resize(0, imageHeight);
imageMode(CENTER);
image(img, width / 2, imageHeight / 2);
img.loadPixels();
const histogram = new Array(barCount).fill(0);
for (let x = 0; x < img.width; x += 5) {
for (let y = 0; y < img.height; y += 5) {
const loc = (x + y * img.width) * 4;
const r = img.pixels[loc];
const g = img.pixels[loc + 1];
const b = img.pixels[loc + 2];
const a = img.pixels[loc + 3];
const barIndex = floor(lightness([r, g, b, a]));
histogram[barIndex]++;
}
}
fill(300, 100, 80);
strokeWeight(0);
const maxCount = max(histogram);
const barWidth = width / barCount;
const histogramHeight = height - imageHeight;
for (let i = 0; i < barCount; i++) {
const count = histogram[i];
const y1 = round(map(count, 0, maxCount, height, imageHeight));
const y2 = height;
const x1 = i * barWidth;
const x2 = x1 + barWidth;
rect(x1, y1, barWidth, height - y1);
}
}
<script src="https://cdn.jsdelivr.net/npm/p5#1.4.1/lib/p5.js"></script>
But the blue channel histogram looks pretty good!
function preload() {
img = loadImage("https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/Cirrus_sky_panorama.jpg/1200px-Cirrus_sky_panorama.jpg");
}
function setup() {
const barCount = 100;
const imageHeight = 200;
createCanvas(400, 400);
background(255);
img.resize(0, imageHeight);
imageMode(CENTER);
image(img, width / 2, imageHeight / 2);
img.loadPixels();
const histogram = new Array(barCount).fill(0);
for (let x = 0; x < img.width; x += 5) {
for (let y = 0; y < img.height; y += 5) {
const loc = (x + y * img.width) * 4;
const r = img.pixels[loc];
const g = img.pixels[loc + 1];
const b = img.pixels[loc + 2];
const a = img.pixels[loc + 3];
const barIndex = floor(barCount * b / 255);
histogram[barIndex]++;
}
}
fill(100, 100, 300);
strokeWeight(0);
const maxCount = max(histogram);
const barWidth = width / barCount;
const histogramHeight = height - imageHeight;
for (let i = 0; i < barCount; i++) {
const count = histogram[i];
const y1 = round(map(count, 0, maxCount, height, imageHeight));
const y2 = height;
const x1 = i * barWidth;
const x2 = x1 + barWidth;
rect(x1, y1, barWidth, height - y1);
}
}
<script src="https://cdn.jsdelivr.net/npm/p5#1.4.1/lib/p5.js"></script>
Just to add to Linda's excellent answer(+1), you can use p5.svg to render to SVG using p5.js:
let histogram;
function setup() {
createCanvas(660, 210, SVG);
background(255);
noStroke();
fill("#ed225d");
// make an array of 256 random values in the (0, 255) range
histogram = Array.from({length: 256}, () => int(random(255)));
//console.log(histogram);
// plot the histogram
barPlot(histogram, 0, 0, width, height);
// change shape rendering so bars appear connected
document.querySelector('g').setAttribute('shape-rendering','crispEdges');
// save the plot
save("histogram.svg");
}
function barPlot(values, x, y, plotWidth, plotHeight){
let numValues = values.length;
// calculate the width of each bar in the plot
let barWidth = plotWidth / numValues;
// calculate min/max value (to map height)
let minValue = min(values);
let maxValue = max(values);
// for each value
for(let i = 0 ; i < numValues; i++){
// map the value to the plot height
let barHeight = map(values[i], minValue, maxValue, 0, plotHeight);
// render each bar, offseting y
rect(x + (i * barWidth),
y + (plotHeight - barHeight),
barWidth, barHeight);
}
}
<script src="https://unpkg.com/p5#1.3.1/lib/p5.js"></script>
<script src="https://unpkg.com/p5.js-svg#1.0.7"></script>
(In the p5 editor (or when testing locally) a save dialog should pop up.
If you use the browser's Developer Tools to inspect the bar chart it should confirm it's an SVG (not <canvas/>))

RenderScript's allocation output returns a black Bitmap

few days ago I've just started learning RenderScript. I managed to create some simple image processing filters e.g. grayscale, color change.
Now I'm working on Canny edge filters with no success.
Question: Why ImageView displays black image and how to solve it?
I'am using implementation of Canny egde filter made by arekolek github
optional: Can I compute it faster?
I ended with all code wrote in on method "runEdgeFilter(...)" which runs when i clicked image on my device, to make sure I'am not messing with imageView in other place. Code that i use so far.
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.support.v8.renderscript.*;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.view.View;
import android.widget.ImageView;
public class MainActivity extends AppCompatActivity {
private static final float THRESHOLD_MULT_LOW = 0.66f * 0.00390625f;
private static final float THRESHOLD_MULT_HIGH = 1.33f * 0.00390625f;
private ImageView imageView;
private Bitmap img;
private boolean setThresholds = true;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
imageView = (ImageView) findViewById(R.id.imageView);
img = BitmapFactory.decodeResource(getResources(), R.drawable.test_img_no_dpi2);
imageView.setImageBitmap(img);
}
public void imageClicked(View view) {
runEdgeFilter(img, this);
}
private void runEdgeFilter(Bitmap image, Context context) {
int width = image.getWidth();
int height = image.getHeight();
RenderScript rs = RenderScript.create(context);
Allocation allocationIn = Allocation.createFromBitmap(rs, image);
Type.Builder tb;
tb = new Type.Builder(rs, Element.F32(rs)).setX(width).setY(height);
Allocation allocationBlurred = Allocation.createTyped(rs, tb.create());
Allocation allocationMagnitude = Allocation.createTyped(rs, tb.create());
tb = new Type.Builder(rs, Element.I32(rs)).setX(width).setY(height);
Allocation allocationDirection = Allocation.createTyped(rs, tb.create());
Allocation allocationEdge = Allocation.createTyped(rs, tb.create());
tb = new Type.Builder(rs, Element.I32(rs)).setX(256);
Allocation allocationHistogram = Allocation.createTyped(rs, tb.create());
tb = new Type.Builder(rs, Element.RGBA_8888(rs)).setX(width).setY(height);
Allocation allocationOut = Allocation.createTyped(rs, tb.create());
ScriptC_edge edgeFilter = new ScriptC_edge(rs);
ScriptIntrinsicHistogram histogram = ScriptIntrinsicHistogram.create(rs, Element.U8(rs));
histogram.setOutput(allocationHistogram);
edgeFilter.invoke_set_histogram(allocationHistogram);
edgeFilter.invoke_set_blur_input(allocationIn);
edgeFilter.invoke_set_compute_gradient_input(allocationBlurred);
edgeFilter.invoke_set_suppress_input(allocationMagnitude, allocationDirection);
edgeFilter.invoke_set_hysteresis_input(allocationEdge);
edgeFilter.invoke_set_thresholds(0.2f, 0.6f);
histogram.forEach_Dot(allocationIn);
int[] histogramOutput = new int[256];
allocationHistogram.copyTo(histogramOutput);
if(setThresholds) {
int median = width * height / 2;
for (int i = 0; i < 256; ++i) {
median -= histogramOutput[i];
if (median < 1) {
edgeFilter.invoke_set_thresholds(i * THRESHOLD_MULT_LOW, i * THRESHOLD_MULT_HIGH);
break;
}
}
}
edgeFilter.forEach_blur(allocationBlurred);
edgeFilter.forEach_compute_gradient(allocationMagnitude);
edgeFilter.forEach_suppress(allocationEdge);
edgeFilter.forEach_hysteresis(allocationOut);
allocationOut.copyTo(image);
allocationIn.destroy();
allocationMagnitude.destroy();
allocationBlurred.destroy();
allocationDirection.destroy();
allocationEdge.destroy();
allocationHistogram.destroy();
allocationOut.destroy();
histogram.destroy();
edgeFilter.destroy();
rs.destroy();
imageView.setImageBitmap(image);
}
}
renderscript edge.rs:
#pragma version(1)
#pragma rs java_package_name(com.lukasz.edgeexamplers)
#pragma rs_fp_relaxed
#include "rs_debug.rsh"
static rs_allocation raw, magnitude, blurred, direction, candidates;
static float low, high;
static const uint32_t zero = 0;
void set_blur_input(rs_allocation u8_buf) {
raw = u8_buf;
}
void set_compute_gradient_input(rs_allocation f_buf) {
blurred = f_buf;
}
void set_suppress_input(rs_allocation f_buf, rs_allocation i_buf) {
magnitude = f_buf;
direction = i_buf;
}
void set_hysteresis_input(rs_allocation i_buf) {
candidates = i_buf;
}
void set_thresholds(float l, float h) {
low = l;
high = h;
}
inline static float getElementAt_uchar_to_float(rs_allocation a, uint32_t x,
uint32_t y) {
return rsGetElementAt_uchar(a, x, y) / 255.0f;
}
static rs_allocation histogram;
void set_histogram(rs_allocation h) {
histogram = h;
}
uchar4 __attribute__((kernel)) addhisto(uchar in, uint32_t x, uint32_t y) {
int px = (x - 100) / 2;
if (px > -1 && px < 256) {
int v = log((float) rsGetElementAt_int(histogram, (uint32_t) px)) * 30;
int py = (400 - y);
if (py > -1 && v > py) {
in = 255;
}
if (py == -1) {
in = 255;
}
}
uchar4 out = { in, in, in, 255 };
return out;
}
uchar4 __attribute__((kernel)) copy(uchar in) {
uchar4 out = { in, in, in, 255 };
return out;
}
uchar4 __attribute__((kernel)) blend(uchar4 in, uint32_t x, uint32_t y) {
uchar r = rsGetElementAt_uchar(raw, x, y);
uchar4 out = { r, r, r, 255 };
return max(out, in);
}
float __attribute__((kernel)) blur(uint32_t x, uint32_t y) {
float pixel = 0;
pixel += 2 * getElementAt_uchar_to_float(raw, x - 2, y - 2);
pixel += 4 * getElementAt_uchar_to_float(raw, x - 1, y - 2);
pixel += 5 * getElementAt_uchar_to_float(raw, x, y - 2);
pixel += 4 * getElementAt_uchar_to_float(raw, x + 1, y - 2);
pixel += 2 * getElementAt_uchar_to_float(raw, x + 2, y - 2);
pixel += 4 * getElementAt_uchar_to_float(raw, x - 2, y - 1);
pixel += 9 * getElementAt_uchar_to_float(raw, x - 1, y - 1);
pixel += 12 * getElementAt_uchar_to_float(raw, x, y - 1);
pixel += 9 * getElementAt_uchar_to_float(raw, x + 1, y - 1);
pixel += 4 * getElementAt_uchar_to_float(raw, x + 2, y - 1);
pixel += 5 * getElementAt_uchar_to_float(raw, x - 2, y);
pixel += 12 * getElementAt_uchar_to_float(raw, x - 1, y);
pixel += 15 * getElementAt_uchar_to_float(raw, x, y);
pixel += 12 * getElementAt_uchar_to_float(raw, x + 1, y);
pixel += 5 * getElementAt_uchar_to_float(raw, x + 2, y);
pixel += 4 * getElementAt_uchar_to_float(raw, x - 2, y + 1);
pixel += 9 * getElementAt_uchar_to_float(raw, x - 1, y + 1);
pixel += 12 * getElementAt_uchar_to_float(raw, x, y + 1);
pixel += 9 * getElementAt_uchar_to_float(raw, x + 1, y + 1);
pixel += 4 * getElementAt_uchar_to_float(raw, x + 2, y + 1);
pixel += 2 * getElementAt_uchar_to_float(raw, x - 2, y + 2);
pixel += 4 * getElementAt_uchar_to_float(raw, x - 1, y + 2);
pixel += 5 * getElementAt_uchar_to_float(raw, x, y + 2);
pixel += 4 * getElementAt_uchar_to_float(raw, x + 1, y + 2);
pixel += 2 * getElementAt_uchar_to_float(raw, x + 2, y + 2);
pixel /= 159;
return pixel;
}
float __attribute__((kernel)) compute_gradient(uint32_t x, uint32_t y) {
float gx = 0;
gx -= rsGetElementAt_float(blurred, x - 1, y - 1);
gx -= rsGetElementAt_float(blurred, x - 1, y) * 2;
gx -= rsGetElementAt_float(blurred, x - 1, y + 1);
gx += rsGetElementAt_float(blurred, x + 1, y - 1);
gx += rsGetElementAt_float(blurred, x + 1, y) * 2;
gx += rsGetElementAt_float(blurred, x + 1, y + 1);
float gy = 0;
gy += rsGetElementAt_float(blurred, x - 1, y - 1);
gy += rsGetElementAt_float(blurred, x, y - 1) * 2;
gy += rsGetElementAt_float(blurred, x + 1, y - 1);
gy -= rsGetElementAt_float(blurred, x - 1, y + 1);
gy -= rsGetElementAt_float(blurred, x, y + 1) * 2;
gy -= rsGetElementAt_float(blurred, x + 1, y + 1);
int d = ((int) round(atan2pi(gy, gx) * 4.0f) + 4) % 4;
rsSetElementAt_int(direction, d, x, y);
return hypot(gx, gy);
}
int __attribute__((kernel)) suppress(uint32_t x, uint32_t y) {
int d = rsGetElementAt_int(direction, x, y);
float g = rsGetElementAt_float(magnitude, x, y);
if (d == 0) {
// horizontal, check left and right
float a = rsGetElementAt_float(magnitude, x - 1, y);
float b = rsGetElementAt_float(magnitude, x + 1, y);
return a < g && b < g ? 1 : 0;
} else if (d == 2) {
// vertical, check above and below
float a = rsGetElementAt_float(magnitude, x, y - 1);
float b = rsGetElementAt_float(magnitude, x, y + 1);
return a < g && b < g ? 1 : 0;
} else if (d == 1) {
// NW-SE
float a = rsGetElementAt_float(magnitude, x - 1, y - 1);
float b = rsGetElementAt_float(magnitude, x + 1, y + 1);
return a < g && b < g ? 1 : 0;
} else {
// NE-SW
float a = rsGetElementAt_float(magnitude, x + 1, y - 1);
float b = rsGetElementAt_float(magnitude, x - 1, y + 1);
return a < g && b < g ? 1 : 0;
}
}
static const int NON_EDGE = 0b000;
static const int LOW_EDGE = 0b001;
static const int MED_EDGE = 0b010;
static const int HIG_EDGE = 0b100;
inline static int getEdgeType(uint32_t x, uint32_t y) {
int e = rsGetElementAt_int(candidates, x, y);
float g = rsGetElementAt_float(magnitude, x, y);
if (e == 1) {
if (g < low)
return LOW_EDGE;
if (g > high)
return HIG_EDGE;
return MED_EDGE;
}
return NON_EDGE;
}
uchar4 __attribute__((kernel)) hysteresis(uint32_t x, uint32_t y) {
uchar4 white = { 255, 255, 255, 255 };
uchar4 red = { 255, 0, 0, 255 };
uchar4 black = { 0, 0, 0, 255 };
int type = getEdgeType(x, y);
if (type) {
if (type & LOW_EDGE) {
return black;
}
if (type & HIG_EDGE) {
//rsDebug("wh : x=", x);
//rsDebug("wh : y=", y);
return white;
}
// it's medium, check nearest neighbours
type = getEdgeType(x - 1, y - 1);
type |= getEdgeType(x, y - 1);
type |= getEdgeType(x + 1, y - 1);
type |= getEdgeType(x - 1, y);
type |= getEdgeType(x + 1, y);
type |= getEdgeType(x - 1, y + 1);
type |= getEdgeType(x, y + 1);
type |= getEdgeType(x + 1, y + 1);
if (type & HIG_EDGE) {
//rsDebug("wh : x=", x);
//rsDebug("wh : y=", y);
return white;
}
if (type & MED_EDGE) {
// check further
type = getEdgeType(x - 2, y - 2);
type |= getEdgeType(x - 1, y - 2);
type |= getEdgeType(x, y - 2);
type |= getEdgeType(x + 1, y - 2);
type |= getEdgeType(x + 2, y - 2);
type |= getEdgeType(x - 2, y - 1);
type |= getEdgeType(x + 2, y - 1);
type |= getEdgeType(x - 2, y);
type |= getEdgeType(x + 2, y);
type |= getEdgeType(x - 2, y + 1);
type |= getEdgeType(x + 2, y + 1);
type |= getEdgeType(x - 2, y + 2);
type |= getEdgeType(x - 1, y + 2);
type |= getEdgeType(x, y + 2);
type |= getEdgeType(x + 1, y + 2);
type |= getEdgeType(x + 2, y + 2);
if (type & HIG_EDGE) {
//rsDebug("wh : x=", x);
//rsDebug("wh : y=", y);
return white;
}
}
}
return black;
}
After some debugging I found that:
uchar4 __attribute__((kernel)) hysteresis(uint32_t x, uint32_t y) {...}
returns white and black pixels so renderscript works properly I think.
Output is the same type as my previous renderscript filters (uchar4) which I assign to Bitmap with success.
I have no idea what I've done wrong.
Also my logcat prints:
V/RenderScript_jni: RS compat mode
V/RenderScript_jni: Unable to load libRSSupportIO.so, USAGE_IO not supported
V/RenderScript_jni: Unable to load BLAS lib, ONLY BNNM will be supported: java.lang.UnsatisfiedLinkError: Couldn't load blasV8 from loader dalvik.system.PathClassLoader[dexPath=/data/app/com.lukasz.edgeexamplers-20.apk,libraryPath=/data/app-lib/com.lukasz.edgeexamplers-20]: findLibrary returned null
E/RenderScript: Couldn't load libRSSupportIO.so
in every program which use renderscript, but other programs works even with this warnings.
Update #1
As #Stephen Hines mention, there was issue with reading out of bounds. I think I fixed it for now (without messing with renderscript) by changing those lines:
edgeFilter.forEach_blur(allocationBlurred);
edgeFilter.forEach_compute_gradient(allocationMagnitude);
edgeFilter.forEach_suppress(allocationEdge);
edgeFilter.forEach_hysteresis(allocationOut);
into:
Script.LaunchOptions sLaunchOpt = new Script.LaunchOptions();
sLaunchOpt.setX(2, width - 3);
sLaunchOpt.setY(2, height - 3);
edgeFilter.forEach_blur(allocationBlurred, sLaunchOpt);
edgeFilter.forEach_compute_gradient(allocationMagnitude, sLaunchOpt);
edgeFilter.forEach_suppress(allocationEdge, sLaunchOpt);
edgeFilter.forEach_hysteresis(allocationOut, sLaunchOpt);
But my problem is still not solved. Output is black as earlier.

What's the best way to fit a set of points in an image one or more good lines using RANSAC using OpenCV?

What's the best way to fit a set of points in an image one or more good lines using RANSAC using OpenCV?
Is RANSAC is the most efficient way to fit a line?
RANSAC is not the most efficient but it is better for a large number of outliers. Here is how to do it using opencv:
A useful structure-
struct SLine
{
SLine():
numOfValidPoints(0),
params(-1.f, -1.f, -1.f, -1.f)
{}
cv::Vec4f params;//(cos(t), sin(t), X0, Y0)
int numOfValidPoints;
};
Total Least squares used to make a fit for a successful pair
cv::Vec4f TotalLeastSquares(
std::vector<cv::Point>& nzPoints,
std::vector<int> ptOnLine)
{
//if there are enough inliers calculate model
float x = 0, y = 0, x2 = 0, y2 = 0, xy = 0, w = 0;
float dx2, dy2, dxy;
float t;
for( size_t i = 0; i < nzPoints.size(); ++i )
{
x += ptOnLine[i] * nzPoints[i].x;
y += ptOnLine[i] * nzPoints[i].y;
x2 += ptOnLine[i] * nzPoints[i].x * nzPoints[i].x;
y2 += ptOnLine[i] * nzPoints[i].y * nzPoints[i].y;
xy += ptOnLine[i] * nzPoints[i].x * nzPoints[i].y;
w += ptOnLine[i];
}
x /= w;
y /= w;
x2 /= w;
y2 /= w;
xy /= w;
//Covariance matrix
dx2 = x2 - x * x;
dy2 = y2 - y * y;
dxy = xy - x * y;
t = (float) atan2( 2 * dxy, dx2 - dy2 ) / 2;
cv::Vec4f line;
line[0] = (float) cos( t );
line[1] = (float) sin( t );
line[2] = (float) x;
line[3] = (float) y;
return line;
}
The actual RANSAC
SLine LineFitRANSAC(
float t,//distance from main line
float p,//chance of hitting a valid pair
float e,//percentage of outliers
int T,//number of expected minimum inliers
std::vector<cv::Point>& nzPoints)
{
int s = 2;//number of points required by the model
int N = (int)ceilf(log(1-p)/log(1 - pow(1-e, s)));//number of independent trials
std::vector<SLine> lineCandidates;
std::vector<int> ptOnLine(nzPoints.size());//is inlier
RNG rng((uint64)-1);
SLine line;
for (int i = 0; i < N; i++)
{
//pick two points
int idx1 = (int)rng.uniform(0, (int)nzPoints.size());
int idx2 = (int)rng.uniform(0, (int)nzPoints.size());
cv::Point p1 = nzPoints[idx1];
cv::Point p2 = nzPoints[idx2];
//points too close - discard
if (cv::norm(p1- p2) < t)
{
continue;
}
//line equation -> (y1 - y2)X + (x2 - x1)Y + x1y2 - x2y1 = 0
float a = static_cast<float>(p1.y - p2.y);
float b = static_cast<float>(p2.x - p1.x);
float c = static_cast<float>(p1.x*p2.y - p2.x*p1.y);
//normalize them
float scale = 1.f/sqrt(a*a + b*b);
a *= scale;
b *= scale;
c *= scale;
//count inliers
int numOfInliers = 0;
for (size_t i = 0; i < nzPoints.size(); ++i)
{
cv::Point& p0 = nzPoints[i];
float rho = abs(a*p0.x + b*p0.y + c);
bool isInlier = rho < t;
if ( isInlier ) numOfInliers++;
ptOnLine[i] = isInlier;
}
if ( numOfInliers < T)
{
continue;
}
line.params = TotalLeastSquares( nzPoints, ptOnLine);
line.numOfValidPoints = numOfInliers;
lineCandidates.push_back(line);
}
int bestLineIdx = 0;
int bestLineScore = 0;
for (size_t i = 0; i < lineCandidates.size(); i++)
{
if (lineCandidates[i].numOfValidPoints > bestLineScore)
{
bestLineIdx = i;
bestLineScore = lineCandidates[i].numOfValidPoints;
}
}
if ( lineCandidates.empty() )
{
return SLine();
}
else
{
return lineCandidates[bestLineIdx];
}
}
Take a look at Least Mean Square metod. It's faster and simplier than RANSAC.
Also take look at OpenCV's fitLine method.
RANSAC performs better when you have a lot of outliers in your data, or a complex hypothesis.

Set origin at image center for warpPerspective in OpenCV [duplicate]

I try to specify a different origin for the warpPerspective() function than the basic (0,0), in order to apply the transform independently of the support image size. I added a CvPoint parameter to the original code, but I can't find where to use these coordinates. I tried to use them in the computation of X0, Y0 and W0 but it didn't work, this only shift the transformed image in the resulting image. Any idea?
Here the code:
void warpPerspective( const Mat& src, Mat& dst, const Mat& M0, Size dsize,
int flags, int borderType, const Scalar& borderValue, CvPoint origin )
{
dst.create( dsize, src.type() );
const int BLOCK_SZ = 32;
short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
double M[9];
Mat _M(3, 3, CV_64F, M);
int interpolation = flags & INTER_MAX;
if( interpolation == INTER_AREA )
interpolation = INTER_LINEAR;
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
M0.convertTo(_M, _M.type());
if( !(flags & WARP_INVERSE_MAP) )
invert(_M, _M);
int x, y, x1, y1, width = dst.cols, height = dst.rows;
int bh0 = std::min(BLOCK_SZ/2, height);
int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);
for( y = 0; y < height; y += bh0 )
{
for( x = 0; x < width; x += bw0 )
{
int bw = std::min( bw0, width - x);
int bh = std::min( bh0, height - y);
Mat _XY(bh, bw, CV_16SC2, XY), _A;
Mat dpart(dst, Rect(x, y, bw, bh));
for( y1 = 0; y1 < bh; y1++ )
{
short* xy = XY + y1*bw*2;
double X0 = M[0]*x + M[1]*(y + y1) + M[2];
double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
double W0 = M[6]*x + M[7]*(y + y1) + M[8];
if( interpolation == INTER_NEAREST )
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? 1./W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)X;
xy[x1*2+1] = (short)Y;
}
else
{
short* alpha = A + y1*bw;
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? INTER_TAB_SIZE/W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)(X >> INTER_BITS);
xy[x1*2+1] = (short)(Y >> INTER_BITS);
alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
(X & (INTER_TAB_SIZE-1)));
}
}
}
if( interpolation == INTER_NEAREST )
remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
else
{
Mat _A(bh, bw, CV_16U, A);
remap( src, dpart, _XY, _A, interpolation, borderType, borderValue );
}
}
}
}
Ok, I found it myself! You have 2 things to do:
compute the destination dimensions in source referential, and do the remap using these dimensions ;
increment the computed points coordinates.
Here is the code thus transformed:
void warpPerspective( const Mat& src, Mat& dst, const Mat& M0, Size dsize,
int flags, int borderType, const Scalar& borderValue, CvPoint origin )
{
dst.create( dsize, src.type() );
const int BLOCK_SZ = 32;
short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
double M[9];
Mat _M(3, 3, CV_64F, M);
int interpolation = flags & INTER_MAX;
if( interpolation == INTER_AREA )
interpolation = INTER_LINEAR;
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
M0.convertTo(_M, _M.type());
if( !(flags & WARP_INVERSE_MAP) )
invert(_M, _M);
int x, xDest, y, yDest, x1, y1, width = dst.cols, height = dst.rows;
int bh0 = std::min(BLOCK_SZ/2, height);
int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);
for( y = -origin.y, yDest = 0; y < height; y += bh0, yDest += bh0 )
{
for( x = -origin.x, xDest = 0; x < width; x += bw0, xDest += bw0 )
{
int bw = std::min( bw0, width - x);
int bh = std::min( bh0, height - y);
// to avoid dimensions errors
if (bw <= 0 || bh <= 0)
break;
Mat _XY(bh, bw, CV_16SC2, XY), _A;
Mat dpart(dst, Rect(xDest, yDest, bw, bh));
for( y1 = 0; y1 < bh; y1++ )
{
short* xy = XY + y1*bw*2;
double X0 = M[0]*x + M[1]*(y + y1) + M[2];
double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
double W0 = M[6]*x + M[7]*(y + y1) + M[8];
if( interpolation == INTER_NEAREST )
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? 1./W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)X;
xy[x1*2+1] = (short)Y;
}
else
{
short* alpha = A + y1*bw;
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? INTER_TAB_SIZE/W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)(X >> INTER_BITS) + origin.x;
xy[x1*2+1] = (short)(Y >> INTER_BITS) + origin.y;
alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
(X & (INTER_TAB_SIZE-1)));
}
}
}
if( interpolation == INTER_NEAREST )
remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
else
{
Mat _A(bh, bw, CV_16U, A);
remap( src, dpart, _XY, _A, interpolation, borderType, borderValue );
}
}
}
}
with this function:
CvPoint transformPoint(const CvPoint pointToTransform, const CvMat* matrix) {
double coordinates[3] = {pointToTransform.x, pointToTransform.y, 1};
CvMat originVector = cvMat(3, 1, CV_64F, coordinates);
CvMat transformedVector = cvMat(3, 1, CV_64F, coordinates);
cvMatMul(matrix, &originVector, &transformedVector);
CvPoint outputPoint = cvPoint((int)(cvmGet(&transformedVector, 0, 0) / cvmGet(&transformedVector, 2, 0)), (int)(cvmGet(&transformedVector, 1, 0) / cvmGet(&transformedVector, 2, 0)));
return outputPoint;
}
A much simpler and cleaner solution is to modify the perspective transformation. You can do a translation which moves the origin to the desired position, then do the perspective transformation and finally do the inverse translation.
Here is a small example program in python, which rotates an image by 45 degrees around the point(100, 100):
import cv2
import numpy as np
def translation_mat(dx, dy):
return np.array([1, 0, dx, 0, 1, dy, 0, 0, 1]).reshape((3,3))
def main():
img = cv2.imread(r"pigeon.png", cv2.IMREAD_GRAYSCALE)
# a simple rotation by 45 degrees
rot = np.array([np.sin(np.pi/4), -np.cos(np.pi/4), 0, np.cos(np.pi/4), np.sin(np.pi/4), 0, 0, 0, 1]).reshape((3,3))
t1 = translation_mat(-100, -100)
t2 = translation_mat(100, 100)
rot_shifted = t2.dot(rot.dot(t1))
size = (img.shape[1], img.shape[0])
img1 = cv2.warpPerspective(img, rot, size)
img2 = cv2.warpPerspective(img, rot_shifted, size)
cv2.imshow("Original image", img)
cv2.imshow("Rotated around (0,0)", img1)
cv2.imshow("Rotated around(100, 100)", img2)
cv2.waitKey(0)
if __name__ == '__main__':
main()
Not that you read the order of transformations from right to left.
rot_shifted = t2.dot(rot.dot(t1))
will apply t1 first, then rot, and then t2.
For those of you looking for this piece in Python, here's a start. I'm not 100% sure it works as I've stripped some optimizations from it. Also there is an issue with lineair interpolation, I simply didn't use it but you might want to take a closer look if you do.
import cv2
import numpy as np
def warp_perspective(src, M, (width, height), (origin_x, origin_y),
flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,
borderValue=0, dst=None):
"""
Implementation in Python using base code from
http://stackoverflow.com/questions/4279008/specify-an-origin-to-warpperspective-function-in-opencv-2-x
Note there is an issue with linear interpolation.
"""
B_SIZE = 32
if dst == None:
dst = np.zeros((height, width, 3), dtype=src.dtype)
# Set interpolation mode.
interpolation = flags & cv2.INTER_MAX
if interpolation == cv2.INTER_AREA:
raise Exception('Area interpolation is not supported!')
# Prepare matrix.
M = M.astype(np.float64)
if not(flags & cv2.WARP_INVERSE_MAP):
M = cv2.invert(M)[1]
M = M.flatten()
x_dst = y_dst = 0
for y in xrange(-origin_y, height, B_SIZE):
for x in xrange(-origin_x, width, B_SIZE):
print (x, y)
# Block dimensions.
bw = min(B_SIZE, width - x_dst)
bh = min(B_SIZE, height - y_dst)
# To avoid dimension errors.
if bw <= 0 or bh <= 0:
break
# View of the destination array.
dpart = dst[y_dst:y_dst+bh, x_dst:x_dst+bw]
# Original code used view of array here, but we're using numpy array's.
XY = np.zeros((bh, bw, 2), dtype=np.int16)
A = np.zeros((bh, bw), dtype=np.uint16)
for y1 in xrange(bh):
X0 = M[0]*x + M[1]*(y + y1) + M[2]
Y0 = M[3]*x + M[4]*(y + y1) + M[5]
W0 = M[6]*x + M[7]*(y + y1) + M[8]
if interpolation == cv2.INTER_NEAREST:
for x1 in xrange(bw):
W = np.float64(W0 + M[6]*x1);
if W != 0:
W = np.float64(1.0)/W
X = np.int32((X0 + M[0]*x1)*W)
Y = np.int32((Y0 + M[3]*x1)*W)
XY[y1, x1][0] = np.int16(X)
XY[y1, x1][1] = np.int16(Y)
else:
for x1 in xrange(bw):
W = np.float64(W0 + M[6]*x1);
if W != 0:
W = cv2.INTER_TAB_SIZE/W
X = np.int32((X0 + M[0]*x1)*W)
Y = np.int32((Y0 + M[3]*x1)*W)
XY[y1, x1][0] = np.int16((X >> cv2.INTER_BITS) + origin_x)
XY[y1, x1][1] = np.int16((Y >> cv2.INTER_BITS) + origin_y)
A[y1, x1] = np.int16(((Y & (cv2.INTER_TAB_SIZE-1))*cv2.INTER_TAB_SIZE + (X & (cv2.INTER_TAB_SIZE-1))))
if interpolation == cv2.INTER_NEAREST:
cv2.remap(src, XY, None, interpolation, dst=dpart,
borderMode=borderMode, borderValue=borderValue)
else:
cv2.remap(src, XY, A, interpolation, dst=dpart,
borderMode=borderMode, borderValue=borderValue)
x_dst += B_SIZE
x_dst = 0
y_dst += B_SIZE
return dst

Specify an origin to warpPerspective() function in OpenCV 2.x

I try to specify a different origin for the warpPerspective() function than the basic (0,0), in order to apply the transform independently of the support image size. I added a CvPoint parameter to the original code, but I can't find where to use these coordinates. I tried to use them in the computation of X0, Y0 and W0 but it didn't work, this only shift the transformed image in the resulting image. Any idea?
Here the code:
void warpPerspective( const Mat& src, Mat& dst, const Mat& M0, Size dsize,
int flags, int borderType, const Scalar& borderValue, CvPoint origin )
{
dst.create( dsize, src.type() );
const int BLOCK_SZ = 32;
short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
double M[9];
Mat _M(3, 3, CV_64F, M);
int interpolation = flags & INTER_MAX;
if( interpolation == INTER_AREA )
interpolation = INTER_LINEAR;
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
M0.convertTo(_M, _M.type());
if( !(flags & WARP_INVERSE_MAP) )
invert(_M, _M);
int x, y, x1, y1, width = dst.cols, height = dst.rows;
int bh0 = std::min(BLOCK_SZ/2, height);
int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);
for( y = 0; y < height; y += bh0 )
{
for( x = 0; x < width; x += bw0 )
{
int bw = std::min( bw0, width - x);
int bh = std::min( bh0, height - y);
Mat _XY(bh, bw, CV_16SC2, XY), _A;
Mat dpart(dst, Rect(x, y, bw, bh));
for( y1 = 0; y1 < bh; y1++ )
{
short* xy = XY + y1*bw*2;
double X0 = M[0]*x + M[1]*(y + y1) + M[2];
double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
double W0 = M[6]*x + M[7]*(y + y1) + M[8];
if( interpolation == INTER_NEAREST )
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? 1./W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)X;
xy[x1*2+1] = (short)Y;
}
else
{
short* alpha = A + y1*bw;
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? INTER_TAB_SIZE/W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)(X >> INTER_BITS);
xy[x1*2+1] = (short)(Y >> INTER_BITS);
alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
(X & (INTER_TAB_SIZE-1)));
}
}
}
if( interpolation == INTER_NEAREST )
remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
else
{
Mat _A(bh, bw, CV_16U, A);
remap( src, dpart, _XY, _A, interpolation, borderType, borderValue );
}
}
}
}
Ok, I found it myself! You have 2 things to do:
compute the destination dimensions in source referential, and do the remap using these dimensions ;
increment the computed points coordinates.
Here is the code thus transformed:
void warpPerspective( const Mat& src, Mat& dst, const Mat& M0, Size dsize,
int flags, int borderType, const Scalar& borderValue, CvPoint origin )
{
dst.create( dsize, src.type() );
const int BLOCK_SZ = 32;
short XY[BLOCK_SZ*BLOCK_SZ*2], A[BLOCK_SZ*BLOCK_SZ];
double M[9];
Mat _M(3, 3, CV_64F, M);
int interpolation = flags & INTER_MAX;
if( interpolation == INTER_AREA )
interpolation = INTER_LINEAR;
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
M0.convertTo(_M, _M.type());
if( !(flags & WARP_INVERSE_MAP) )
invert(_M, _M);
int x, xDest, y, yDest, x1, y1, width = dst.cols, height = dst.rows;
int bh0 = std::min(BLOCK_SZ/2, height);
int bw0 = std::min(BLOCK_SZ*BLOCK_SZ/bh0, width);
bh0 = std::min(BLOCK_SZ*BLOCK_SZ/bw0, height);
for( y = -origin.y, yDest = 0; y < height; y += bh0, yDest += bh0 )
{
for( x = -origin.x, xDest = 0; x < width; x += bw0, xDest += bw0 )
{
int bw = std::min( bw0, width - x);
int bh = std::min( bh0, height - y);
// to avoid dimensions errors
if (bw <= 0 || bh <= 0)
break;
Mat _XY(bh, bw, CV_16SC2, XY), _A;
Mat dpart(dst, Rect(xDest, yDest, bw, bh));
for( y1 = 0; y1 < bh; y1++ )
{
short* xy = XY + y1*bw*2;
double X0 = M[0]*x + M[1]*(y + y1) + M[2];
double Y0 = M[3]*x + M[4]*(y + y1) + M[5];
double W0 = M[6]*x + M[7]*(y + y1) + M[8];
if( interpolation == INTER_NEAREST )
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? 1./W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)X;
xy[x1*2+1] = (short)Y;
}
else
{
short* alpha = A + y1*bw;
for( x1 = 0; x1 < bw; x1++ )
{
double W = W0 + M[6]*x1;
W = W ? INTER_TAB_SIZE/W : 0;
int X = saturate_cast<int>((X0 + M[0]*x1)*W);
int Y = saturate_cast<int>((Y0 + M[3]*x1)*W);
xy[x1*2] = (short)(X >> INTER_BITS) + origin.x;
xy[x1*2+1] = (short)(Y >> INTER_BITS) + origin.y;
alpha[x1] = (short)((Y & (INTER_TAB_SIZE-1))*INTER_TAB_SIZE +
(X & (INTER_TAB_SIZE-1)));
}
}
}
if( interpolation == INTER_NEAREST )
remap( src, dpart, _XY, Mat(), interpolation, borderType, borderValue );
else
{
Mat _A(bh, bw, CV_16U, A);
remap( src, dpart, _XY, _A, interpolation, borderType, borderValue );
}
}
}
}
with this function:
CvPoint transformPoint(const CvPoint pointToTransform, const CvMat* matrix) {
double coordinates[3] = {pointToTransform.x, pointToTransform.y, 1};
CvMat originVector = cvMat(3, 1, CV_64F, coordinates);
CvMat transformedVector = cvMat(3, 1, CV_64F, coordinates);
cvMatMul(matrix, &originVector, &transformedVector);
CvPoint outputPoint = cvPoint((int)(cvmGet(&transformedVector, 0, 0) / cvmGet(&transformedVector, 2, 0)), (int)(cvmGet(&transformedVector, 1, 0) / cvmGet(&transformedVector, 2, 0)));
return outputPoint;
}
A much simpler and cleaner solution is to modify the perspective transformation. You can do a translation which moves the origin to the desired position, then do the perspective transformation and finally do the inverse translation.
Here is a small example program in python, which rotates an image by 45 degrees around the point(100, 100):
import cv2
import numpy as np
def translation_mat(dx, dy):
return np.array([1, 0, dx, 0, 1, dy, 0, 0, 1]).reshape((3,3))
def main():
img = cv2.imread(r"pigeon.png", cv2.IMREAD_GRAYSCALE)
# a simple rotation by 45 degrees
rot = np.array([np.sin(np.pi/4), -np.cos(np.pi/4), 0, np.cos(np.pi/4), np.sin(np.pi/4), 0, 0, 0, 1]).reshape((3,3))
t1 = translation_mat(-100, -100)
t2 = translation_mat(100, 100)
rot_shifted = t2.dot(rot.dot(t1))
size = (img.shape[1], img.shape[0])
img1 = cv2.warpPerspective(img, rot, size)
img2 = cv2.warpPerspective(img, rot_shifted, size)
cv2.imshow("Original image", img)
cv2.imshow("Rotated around (0,0)", img1)
cv2.imshow("Rotated around(100, 100)", img2)
cv2.waitKey(0)
if __name__ == '__main__':
main()
Not that you read the order of transformations from right to left.
rot_shifted = t2.dot(rot.dot(t1))
will apply t1 first, then rot, and then t2.
For those of you looking for this piece in Python, here's a start. I'm not 100% sure it works as I've stripped some optimizations from it. Also there is an issue with lineair interpolation, I simply didn't use it but you might want to take a closer look if you do.
import cv2
import numpy as np
def warp_perspective(src, M, (width, height), (origin_x, origin_y),
flags=cv2.INTER_NEAREST, borderMode=cv2.BORDER_CONSTANT,
borderValue=0, dst=None):
"""
Implementation in Python using base code from
http://stackoverflow.com/questions/4279008/specify-an-origin-to-warpperspective-function-in-opencv-2-x
Note there is an issue with linear interpolation.
"""
B_SIZE = 32
if dst == None:
dst = np.zeros((height, width, 3), dtype=src.dtype)
# Set interpolation mode.
interpolation = flags & cv2.INTER_MAX
if interpolation == cv2.INTER_AREA:
raise Exception('Area interpolation is not supported!')
# Prepare matrix.
M = M.astype(np.float64)
if not(flags & cv2.WARP_INVERSE_MAP):
M = cv2.invert(M)[1]
M = M.flatten()
x_dst = y_dst = 0
for y in xrange(-origin_y, height, B_SIZE):
for x in xrange(-origin_x, width, B_SIZE):
print (x, y)
# Block dimensions.
bw = min(B_SIZE, width - x_dst)
bh = min(B_SIZE, height - y_dst)
# To avoid dimension errors.
if bw <= 0 or bh <= 0:
break
# View of the destination array.
dpart = dst[y_dst:y_dst+bh, x_dst:x_dst+bw]
# Original code used view of array here, but we're using numpy array's.
XY = np.zeros((bh, bw, 2), dtype=np.int16)
A = np.zeros((bh, bw), dtype=np.uint16)
for y1 in xrange(bh):
X0 = M[0]*x + M[1]*(y + y1) + M[2]
Y0 = M[3]*x + M[4]*(y + y1) + M[5]
W0 = M[6]*x + M[7]*(y + y1) + M[8]
if interpolation == cv2.INTER_NEAREST:
for x1 in xrange(bw):
W = np.float64(W0 + M[6]*x1);
if W != 0:
W = np.float64(1.0)/W
X = np.int32((X0 + M[0]*x1)*W)
Y = np.int32((Y0 + M[3]*x1)*W)
XY[y1, x1][0] = np.int16(X)
XY[y1, x1][1] = np.int16(Y)
else:
for x1 in xrange(bw):
W = np.float64(W0 + M[6]*x1);
if W != 0:
W = cv2.INTER_TAB_SIZE/W
X = np.int32((X0 + M[0]*x1)*W)
Y = np.int32((Y0 + M[3]*x1)*W)
XY[y1, x1][0] = np.int16((X >> cv2.INTER_BITS) + origin_x)
XY[y1, x1][1] = np.int16((Y >> cv2.INTER_BITS) + origin_y)
A[y1, x1] = np.int16(((Y & (cv2.INTER_TAB_SIZE-1))*cv2.INTER_TAB_SIZE + (X & (cv2.INTER_TAB_SIZE-1))))
if interpolation == cv2.INTER_NEAREST:
cv2.remap(src, XY, None, interpolation, dst=dpart,
borderMode=borderMode, borderValue=borderValue)
else:
cv2.remap(src, XY, A, interpolation, dst=dpart,
borderMode=borderMode, borderValue=borderValue)
x_dst += B_SIZE
x_dst = 0
y_dst += B_SIZE
return dst

Resources