Pixi.js crashes on rendering a 700 by 700 grid - webgl

I want to display a 700 by 700 grid. Pixi.js crashes on rendering it. The same code works for 100 by 100 grid.
var app = new PIXI.Application({
width: window.innerWidth,
height: window.innerHeight,
backgroundColor: 0x2c3e50
});
document.body.appendChild(app.view);
const PE_COLOR = 0xe2b692; // 0xf5912f;
const PE_WIDTH = 20;
const PE_HEIGHT = 20;
const VIZ_AREA_VIEWPORT_SIZE = 750;
const VIZ_AREA_FABRIC_VIEWPORT_INIT_RATIO = 2;
const VIZ_AREA_VIEWPORT_MAX_SCALE = 20;
const VIZ_AREA_VIEWPORT_MIN_SCALE = 0.25;
const VIZ_AREA_WIDTH_GAP = 0.5;
const VIZ_AREA_HEIGHT_GAP = 0.5;
const ROUTER_OFFSET = 6;
const ROUTER_RADIUS = 3;
const PE_BORDER_COLOR = 0x0f0f0f;
const ROUTER_COLOR = 0x1d58a6; //0x1F1F1F;
const CE_COLOR = 0x1d5866;
const CE_OFFSET = 12;
const CE_SIZE = 4;
const width = 700;
const height = 700;
const box = new PIXI.Graphics();
app.stage.addChild(box);
for (let i = 0; i < width; ++i) {
for (let j = 0; j < height; ++j) {
const x = i * PE_WIDTH + i * VIZ_AREA_WIDTH_GAP;
const y = j * PE_HEIGHT + j * VIZ_AREA_HEIGHT_GAP;
box.lineStyle(1, PE_BORDER_COLOR, 0.4);
box.beginFill(PE_COLOR, 0.8);
box.drawRoundedRect(x, y, PE_WIDTH, PE_HEIGHT, 2);
box.endFill();
box.lineStyle(1, ROUTER_COLOR, 0.2);
box.beginFill(ROUTER_COLOR, 0.4);
box.drawCircle(x + ROUTER_OFFSET, y + ROUTER_OFFSET, ROUTER_RADIUS);
box.endFill();
box.lineStyle(1, CE_COLOR, 0.2);
box.beginFill(CE_COLOR, 0.4);
box.drawRect(x + CE_OFFSET, y + CE_OFFSET, CE_SIZE, CE_SIZE);
box.endFill();
}
}
The code draws a 700 by 700 grid.
This is what the output looks like when I try it for a 100 by 100 grid:
Any tips on how to fix the crash?

I was able to improve performance by using GraphicsGeometry object to create the Graphics object. The relevant code is shown below.
const box = new PIXI.Graphics();
box.lineStyle(1, PE_BORDER_COLOR, 0.4);
box.beginFill(PE_COLOR, 0.8);
box.drawRoundedRect(0, 0, PE_WIDTH, PE_HEIGHT, 2);
box.endFill();
box.lineStyle(1, ROUTER_COLOR, 0.2);
box.beginFill(ROUTER_COLOR, 0.4);
box.drawCircle(ROUTER_OFFSET, ROUTER_OFFSET, ROUTER_RADIUS);
box.endFill();
box.lineStyle(1, CE_COLOR, 0.2);
box.beginFill(CE_COLOR, 0.4);
box.drawRect(CE_OFFSET, CE_OFFSET, CE_SIZE, CE_SIZE);
box.endFill();
for (let i = 0; i < width; ++i) {
for (let j = 0; j < height; ++j) {
const x = i * PE_WIDTH + i * VIZ_AREA_WIDTH_GAP;
const y = j * PE_HEIGHT + j * VIZ_AREA_HEIGHT_GAP;
const boxG = new PIXI.Graphics(box.geometry);
boxG.x = x;
boxG.y = y;
app.stage.addChild(boxG);
}
}
In the code above, each square is rendered as a generic GraphicsGeometry object. This object is used to create the Graphics object and is placed on specific positions.

Related

Histogram based on image as vector graphic

I would like to transform histograms based on images to vector graphics.
This could be a start:
function preload() {
img = loadImage("https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/Cirrus_sky_panorama.jpg/1200px-Cirrus_sky_panorama.jpg");
}
function setup() {
createCanvas(400, 400);
background(255);
img.resize(0, 200);
var maxRange = 256
colorMode(HSL, maxRange);
image(img, 0, 0);
var histogram = new Array(maxRange);
for (i = 0; i <= maxRange; i++) {
histogram[i] = 0
}
loadPixels();
for (var x = 0; x < img.width; x += 5) {
for (var y = 0; y < img.height; y += 5) {
var loc = (x + y * img.width) * 4;
var h = pixels[loc];
var s = pixels[loc + 1];
var l = pixels[loc + 2];
var a = pixels[loc + 3];
b = int(l);
histogram[b]++
}
}
image(img, 0, 0);
stroke(300, 100, 80)
push()
translate(10, 0)
for (x = 0; x <= maxRange; x++) {
index = histogram[x];
y1 = int(map(index, 0, max(histogram), height, height - 300));
y2 = height
xPos = map(x, 0, maxRange, 0, width - 20)
line(xPos, y1, xPos, y2);
}
pop()
}
<script src="https://cdn.jsdelivr.net/npm/p5#1.4.1/lib/p5.js"></script>
But I would need downloadable vector graphic files as results that are closed shapes without any gaps between. It should look like that for example:
<svg viewBox="0 0 399.84 200"><polygon points="399.84 200 399.84 192.01 361.91 192.01 361.91 182.87 356.24 182.87 356.24 183.81 350.58 183.81 350.58 184.74 344.91 184.74 344.91 188.19 339.87 188.19 339.87 189.89 334.6 189.89 334.6 185.29 328.93 185.29 328.93 171.11 323.26 171.11 323.26 172.55 317.59 172.55 317.59 173.99 311.92 173.99 311.92 179.42 306.88 179.42 306.88 182.03 301.21 182.03 301.21 183.01 295.54 183.01 295.54 179.04 289.87 179.04 289.87 175.67 284.21 175.67 284.21 182.03 278.54 182.03 278.54 176 273.5 176 273.5 172.42 267.83 172.42 267.83 179.42 262.79 179.42 262.79 182.03 257.12 182.03 257.12 183.01 251.45 183.01 251.45 178.63 245.78 178.63 245.78 175.21 240.11 175.21 240.11 182.03 234.86 182.03 234.86 150.42 229.2 150.42 229.2 155.98 223.53 155.98 223.53 158.06 217.86 158.06 217.86 167.44 212.19 167.44 212.19 162.58 206.52 162.58 206.52 155.98 200.85 155.98 200.85 158.06 195.18 158.06 195.18 167.44 189.51 167.44 189.51 177.46 183.84 177.46 183.84 166.93 178.17 166.93 178.17 153.69 172.5 153.69 172.5 155.87 166.82 155.87 166.82 158.05 161.78 158.05 161.78 155.63 156.11 155.63 156.11 160.65 150.84 160.65 150.84 146.59 145.17 146.59 145.17 109.63 139.49 109.63 139.49 113.67 133.82 113.67 133.82 61.48 128.15 61.48 128.15 80.59 123.11 80.59 123.11 93.23 117.44 93.23 117.44 97.97 111.76 97.97 111.76 78.07 106.09 78.07 106.09 61.66 100.42 61.66 100.42 93.23 94.75 93.23 94.75 98.51 89.7 98.51 89.7 85.4 84.03 85.4 84.03 111.03 78.99 111.03 78.99 120.57 73.32 120.57 73.32 124.14 67.65 124.14 67.65 23.48 61.97 23.48 61.97 0 56.3 0 56.3 120.57 50.63 120.57 50.63 167.01 45.38 167.01 45.38 170.83 39.71 170.83 39.71 172.26 34.03 172.26 34.03 178.7 28.36 178.7 28.36 175.36 22.69 175.36 22.69 170.83 17.02 170.83 17.02 172.26 11.34 172.26 11.34 178.7 5.67 178.7 5.67 103.85 0 103.85 0 200 399.84 200"/></svg>
Has anyone an idea how to program that? It doesn't necessarily need to be based on p5.js, but would be cool.
Closing Gaps
In order to have a gapless histogram, you need to meet the following condition:
numberOfBars * barWidth === totalWidth
Right now you are using the p5 line() function to draw your bars. You have not explicitly set the width of your bars, so it uses the default value of 1px wide.
We know that the numberOfBars in your code is always maxRange which is 256.
Right now the total width of your histogram is width - 20, where width is set to 400 by createCanvas(400, 400). So the totalWidth is 380.
256 * 1 !== 380
If you have 256 pixels of bars in a 380 pixel space then there are going to be gaps!
We need to change the barWidth and/or the totalWidth to balance the equation.
For example, you can change your canvas size to 276 (256 + your 20px margin) and the gaps disappear!
createCanvas(276, 400);
However this is not an appropriate solution because now your image is cropped and your pixel data is wrong. But actually...it was already wrong before!
Sampling Pixels
When you call the global loadPixels() function in p5.js you are loading all of the pixels for the whole canvas. This includes the white areas outside of your image.
for (var x = 0; x < img.width; x += 5) {
for (var y = 0; y < img.height; y += 5) {
var loc = (x + y * img.width) * 4;
It is a 1-dimensional array, so your approach of limiting the x and y values here is not giving you the correct position. Your loc variable needs to use the width of the entire canvas rather than the width of just the image, since the pixels array includes the entire canvas.
var loc = (x + y * width) * 4;
Alternatively, you can look at just the pixels of the image by using img.loadPixels() and img.pixels.
img.loadPixels();
for (var x = 0; x < img.width; x += 5) {
for (var y = 0; y < img.height; y += 5) {
var loc = (x + y * img.width) * 4;
var h = img.pixels[loc];
var s = img.pixels[loc + 1];
var l = img.pixels[loc + 2];
var a = img.pixels[loc + 3];
b = int(l);
histogram[b]++;
}
}
The pixel values are always returned in RGBA regardless of the colorMode. So your third channel value is actually the blue, not the lightness. You can make use of the p5.js lightness() function to compute the lightness from the RGBA.
Updated Code
The actual lightness histogram looks dumb because 100% dwarfs all of the other bars.
function preload() {
img = loadImage("https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/Cirrus_sky_panorama.jpg/1200px-Cirrus_sky_panorama.jpg");
}
function setup() {
const barCount = 100;
const imageHeight = 200;
createCanvas(400, 400);
background(255);
colorMode(HSL, barCount - 1);
img.resize(0, imageHeight);
imageMode(CENTER);
image(img, width / 2, imageHeight / 2);
img.loadPixels();
const histogram = new Array(barCount).fill(0);
for (let x = 0; x < img.width; x += 5) {
for (let y = 0; y < img.height; y += 5) {
const loc = (x + y * img.width) * 4;
const r = img.pixels[loc];
const g = img.pixels[loc + 1];
const b = img.pixels[loc + 2];
const a = img.pixels[loc + 3];
const barIndex = floor(lightness([r, g, b, a]));
histogram[barIndex]++;
}
}
fill(300, 100, 80);
strokeWeight(0);
const maxCount = max(histogram);
const barWidth = width / barCount;
const histogramHeight = height - imageHeight;
for (let i = 0; i < barCount; i++) {
const count = histogram[i];
const y1 = round(map(count, 0, maxCount, height, imageHeight));
const y2 = height;
const x1 = i * barWidth;
const x2 = x1 + barWidth;
rect(x1, y1, barWidth, height - y1);
}
}
<script src="https://cdn.jsdelivr.net/npm/p5#1.4.1/lib/p5.js"></script>
But the blue channel histogram looks pretty good!
function preload() {
img = loadImage("https://upload.wikimedia.org/wikipedia/commons/thumb/3/36/Cirrus_sky_panorama.jpg/1200px-Cirrus_sky_panorama.jpg");
}
function setup() {
const barCount = 100;
const imageHeight = 200;
createCanvas(400, 400);
background(255);
img.resize(0, imageHeight);
imageMode(CENTER);
image(img, width / 2, imageHeight / 2);
img.loadPixels();
const histogram = new Array(barCount).fill(0);
for (let x = 0; x < img.width; x += 5) {
for (let y = 0; y < img.height; y += 5) {
const loc = (x + y * img.width) * 4;
const r = img.pixels[loc];
const g = img.pixels[loc + 1];
const b = img.pixels[loc + 2];
const a = img.pixels[loc + 3];
const barIndex = floor(barCount * b / 255);
histogram[barIndex]++;
}
}
fill(100, 100, 300);
strokeWeight(0);
const maxCount = max(histogram);
const barWidth = width / barCount;
const histogramHeight = height - imageHeight;
for (let i = 0; i < barCount; i++) {
const count = histogram[i];
const y1 = round(map(count, 0, maxCount, height, imageHeight));
const y2 = height;
const x1 = i * barWidth;
const x2 = x1 + barWidth;
rect(x1, y1, barWidth, height - y1);
}
}
<script src="https://cdn.jsdelivr.net/npm/p5#1.4.1/lib/p5.js"></script>
Just to add to Linda's excellent answer(+1), you can use p5.svg to render to SVG using p5.js:
let histogram;
function setup() {
createCanvas(660, 210, SVG);
background(255);
noStroke();
fill("#ed225d");
// make an array of 256 random values in the (0, 255) range
histogram = Array.from({length: 256}, () => int(random(255)));
//console.log(histogram);
// plot the histogram
barPlot(histogram, 0, 0, width, height);
// change shape rendering so bars appear connected
document.querySelector('g').setAttribute('shape-rendering','crispEdges');
// save the plot
save("histogram.svg");
}
function barPlot(values, x, y, plotWidth, plotHeight){
let numValues = values.length;
// calculate the width of each bar in the plot
let barWidth = plotWidth / numValues;
// calculate min/max value (to map height)
let minValue = min(values);
let maxValue = max(values);
// for each value
for(let i = 0 ; i < numValues; i++){
// map the value to the plot height
let barHeight = map(values[i], minValue, maxValue, 0, plotHeight);
// render each bar, offseting y
rect(x + (i * barWidth),
y + (plotHeight - barHeight),
barWidth, barHeight);
}
}
<script src="https://unpkg.com/p5#1.3.1/lib/p5.js"></script>
<script src="https://unpkg.com/p5.js-svg#1.0.7"></script>
(In the p5 editor (or when testing locally) a save dialog should pop up.
If you use the browser's Developer Tools to inspect the bar chart it should confirm it's an SVG (not <canvas/>))

Is the Sharpness filter available in Konvajs, if it is there how to use that?

https://konvajs.org/api/Konva.Filters.html
in this link the sharpness filter is not available
Konva doesn't have such a filter in its core. You have to implement it manually.
For that use case, you can write your own custom filter. See custom filters docs.
I tried to use that sharpen implementation: https://gist.github.com/mikecao/65d9fc92dc7197cb8a7c
// noprotect
const stage = new Konva.Stage({
container: 'container',
width: window.innerWidth,
height: window.innerHeight
});
const layer = new Konva.Layer();
stage.add(layer);
function Sharpen(srcData) {
const mix = 1;
const w = srcData.width;
const h = srcData.height;
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
var x, sx, sy, r, g, b, a, dstOff, srcOff, wt, cx, cy, scy, scx,
weights = [0, -1, 0, -1, 5, -1, 0, -1, 0],
katet = Math.round(Math.sqrt(weights.length)),
half = (katet * 0.5) | 0,
dstData = ctx.createImageData(w, h),
dstBuff = dstData.data,
srcBuff = srcData.data,
y = h;
while (y--) {
x = w;
while (x--) {
sy = y;
sx = x;
dstOff = (y * w + x) * 4;
r = 0;
g = 0;
b = 0;
a = 0;
for (cy = 0; cy < katet; cy++) {
for (cx = 0; cx < katet; cx++) {
scy = sy + cy - half;
scx = sx + cx - half;
if (scy >= 0 && scy < h && scx >= 0 && scx < w) {
srcOff = (scy * w + scx) * 4;
wt = weights[cy * katet + cx];
r += srcBuff[srcOff] * wt;
g += srcBuff[srcOff + 1] * wt;
b += srcBuff[srcOff + 2] * wt;
a += srcBuff[srcOff + 3] * wt;
}
}
}
dstBuff[dstOff] = r * mix + srcBuff[dstOff] * (1 - mix);
dstBuff[dstOff + 1] = g * mix + srcBuff[dstOff + 1] * (1 - mix);
dstBuff[dstOff + 2] = b * mix + srcBuff[dstOff + 2] * (1 - mix);
dstBuff[dstOff + 3] = srcBuff[dstOff + 3];
}
}
for(var i = 0; i < dstData.data.length; i++) {
srcData.data[i] = dstData.data[i];
}
}
Konva.Image.fromURL('https://i.imgur.com/ktWThtZ.png', img => {
img.setAttrs({filters: [Sharpen]});
img.cache();
layer.add(img);
layer.draw();
});
Demo: https://jsbin.com/tejalusano/1/edit?html,js,output

Image lens distortion correction

I am using Aptina 5Mp sensor with Fish-eye lens for capturing an image.
I am using following algorithm to correct lens distortion.
http://www.tannerhelland.com/4743/simple-algorithm-correcting-lens-distortion/
this is not correcting the image properly.
Any help will be appreciated.
//code----
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
#include <math.h>
using namespace cv;
using namespace std;
// globals
Mat src, dst;
Mat map_x, map_y;
#define REMAP_WINDOW "Remap Circle"
void make_circle_map(float , float , float , float );
int main(int argc, char** argv) {
// load image
src = imread(argv[1], 1);
float qvDepth = atof(argv[2]);
float fixStrength = atof(argv[3]);
float fixZoom = atof(argv[4]);
float lensRadius = atof(argv[5]);
// create destination and the maps
dst.create(src.size(), src.type());
map_x.create(src.size(), CV_32FC1);
map_y.create(src.size(), CV_32FC1);
// create window
// namedWindow(REMAP_WINDOW, CV_WINDOW_AUTOSIZE);
make_circle_map(qvDepth, fixStrength, fixZoom, lensRadius);
remap(src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0,0, 0));
//imshow(REMAP_WINDOW, dst);
imwrite("got1.jpg",dst);
// while(27 != waitKey()) {
// just wait
// }
// cvDestroyWindow(REMAP_WINDOW);
return 0;
}
void make_circle_map(float qvDepth, float fixStrength, float fixZoom, float lensRadius ) {
//ApplyLensCorrection(double fixStrength, double fixZoom, double lensRadius, long long edgeHandling, long long superSamplingAmount
cout<<"qvDepth :"<<qvDepth<<" fixStrength :"<<fixStrength<<" fixZoom :"<<fixZoom<<" lensRadius :"<<lensRadius<<endl;
//float qvDepth = 32;//24;
//float fixStrength = 4.5; // has to utilized further
//float fixZoom = 0.5;
//float lensRadius =2;
//Calculate the center of the image
//double midX = 0;
//double midY = 0;
long tWidth = 1944;
long tHeight = 2580;
// the center
double midX = (double)src.cols/2;
double midY = (double)src.rows/2;
//Rotation values
double theta = 0;
double sRadius = 0;
double sRadius2 = 0;
double sDistance = 0;
double radius = 0;
double j = 0;
double k = 0;
//X and Y values, remapped around a center point of (0, 0)
double nX = 0;
double nY = 0;
double QuickVal =0;
float ssX;
float ssY;
//Source X and Y values, which may or may not be used as part of a bilinear interpolation function
double srcX = 0;
double srcY = 0;
sRadius = sqrt(tWidth * tWidth + tHeight * tHeight) / 2;
cout<<"sRadius :"<<sRadius<<endl;
double refDistance = 0;//modified 0 to 2
if (fixStrength == 0)
{
fixStrength = 0.00000001;
}
refDistance = sRadius * 2 / fixStrength;
sRadius = sRadius * (lensRadius / 100);
sRadius2 = sRadius * sRadius;
cout<<"refDistance :"<<refDistance<<" sRadius :"<<sRadius<<" sRadius2 :"<<sRadius2<<endl;
float sampleIndex =1; //has to be changed in future
for (int x = 0; x <= tWidth; x++)
{
QuickVal = x * qvDepth;
for (int y = 0; y <= tHeight; y++)
{
//Remap the coordinates around a center point of (0, 0)
nX = x - midX;
nY = y - midY;
//Offset the pixel amount by the supersampling lookup table
for(int ii = 1; ii<4;ii++){
j = nX + ii;
k = nY + ii;
//Calculate distance automatically
sDistance = (j * j) + (k * k);
//cout<<"nx :"<<nX<<" ny :"<<nY<<" j :"<<j<<" k :"<<k<<" sDistance :"<<sDistance<<" sRadius2 :"<<sRadius2<<endl;
if (sDistance <= sRadius2)
{
sDistance = sqrt(sDistance);
radius = sDistance / refDistance;
if (radius == 0)
{
theta = 1;
}
else
{
theta = atan(radius) / radius;
}
//srcX = midX + theta * j * fixZoom;
//srcY = midY + theta * k * fixZoom;
map_x.at<float>(x,y) = midX + cos(fabs(theta)) * j * fixZoom;
map_y.at<float>(x,y) = midY + sin(fabs(theta)) * k * fixZoom;
}
else
{
map_x.at<float>(x,y) = x + cos(fabs(theta)) ;//* fixZoom;//x;
map_y.at<float>(x,y) = y + sin(fabs(theta)) ;//* fixZoom;//y;
}
}
}
}
}
Image
replace the following line.
map_x.at<float>(x,y) = midX + theta * j * fixZoom;
map_y.at<float>(x,y) = midY + theta * k * fixZoom;
}
else
{
map_x.at<float>(x,y) = x ;//* fixZoom;//x;
map_y.at<float>(x,y) = y ;//* fixZoom;//y;
use argument executable [image name], BBP, correction parameter, zoom parameter, applied ratio.
ex-> ./lensdistortcorrect image.jpg 24 6.2 2.2 100

Angle and Scale Invariant template matching using OpenCV

Function rotates the template image from 0 to 180 (or upto 360) degrees to search all related matches(in all angles) in source image even with different scale.
The function had been written in OpenCV C interface. When I tried to port it to openCV C++ interface , I am getting lot of errors. Some one please help me to port it to OpenCV C++ interface.
void TemplateMatch()
{
int i, j, x, y, key;
double minVal;
char windowNameSource[] = "Original Image";
char windowNameDestination[] = "Result Image";
char windowNameCoefficientOfCorrelation[] = "Coefficient of Correlation Image";
CvPoint minLoc;
CvPoint tempLoc;
IplImage *sourceImage = cvLoadImage("template_source.jpg", CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
IplImage *templateImage = cvLoadImage("template.jpg", CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
IplImage *graySourceImage = cvCreateImage(cvGetSize(sourceImage), IPL_DEPTH_8U, 1);
IplImage *grayTemplateImage =cvCreateImage(cvGetSize(templateImage),IPL_DEPTH_8U,1);
IplImage *binarySourceImage = cvCreateImage(cvGetSize(sourceImage), IPL_DEPTH_8U, 1);
IplImage *binaryTemplateImage = cvCreateImage(cvGetSize(templateImage), IPL_DEPTH_8U, 1);
IplImage *destinationImage = cvCreateImage(cvGetSize(sourceImage), IPL_DEPTH_8U, 3);
cvCopy(sourceImage, destinationImage);
cvCvtColor(sourceImage, graySourceImage, CV_RGB2GRAY);
cvCvtColor(templateImage, grayTemplateImage, CV_RGB2GRAY);
cvThreshold(graySourceImage, binarySourceImage, 200, 255, CV_THRESH_OTSU );
cvThreshold(grayTemplateImage, binaryTemplateImage, 200, 255, CV_THRESH_OTSU);
int templateHeight = templateImage->height;
int templateWidth = templateImage->width;
float templateScale = 0.5f;
for(i = 2; i <= 3; i++)
{
int tempTemplateHeight = (int)(templateWidth * (i * templateScale));
int tempTemplateWidth = (int)(templateHeight * (i * templateScale));
IplImage *tempBinaryTemplateImage = cvCreateImage(cvSize(tempTemplateWidth, tempTemplateHeight), IPL_DEPTH_8U, 1);
// W - w + 1, H - h + 1
IplImage *result = cvCreateImage(cvSize(sourceImage->width - tempTemplateWidth + 1, sourceImage->height - tempTemplateHeight + 1), IPL_DEPTH_32F, 1);
cvResize(binaryTemplateImage, tempBinaryTemplateImage, CV_INTER_LINEAR);
float degree = 20.0f;
for(j = 0; j <= 9; j++)
{
IplImage *rotateBinaryTemplateImage = cvCreateImage(cvSize(tempBinaryTemplateImage- >width, tempBinaryTemplateImage->height), IPL_DEPTH_8U, 1);
//cvShowImage(windowNameSource, tempBinaryTemplateImage);
//cvWaitKey(0);
for(y = 0; y < tempTemplateHeight; y++)
{
for(x = 0; x < tempTemplateWidth; x++)
{
rotateBinaryTemplateImage->imageData[y * tempTemplateWidth + x] = 255;
}
}
for(y = 0; y < tempTemplateHeight; y++)
{
for(x = 0; x < tempTemplateWidth; x++)
{
float radian = (float)j * degree * CV_PI / 180.0f;
int scale = y * tempTemplateWidth + x;
int rotateY = - sin(radian) * ((float)x - (float)tempTemplateWidth / 2.0f) + cos(radian) * ((float)y - (float)tempTemplateHeight / 2.0f) + tempTemplateHeight / 2;
int rotateX = cos(radian) * ((float)x - (float)tempTemplateWidth / 2.0f) + sin(radian) * ((float)y - (float)tempTemplateHeight / 2.0f) + tempTemplateWidth / 2;
if(rotateY < tempTemplateHeight && rotateX < tempTemplateWidth && rotateY >= 0 && rotateX >= 0)
rotateBinaryTemplateImage->imageData[scale] = tempBinaryTemplateImage->imageData[rotateY * tempTemplateWidth + rotateX];
}
}
//cvShowImage(windowNameSource, rotateBinaryTemplateImage);
//cvWaitKey(0);
cvMatchTemplate(binarySourceImage, rotateBinaryTemplateImage, result, CV_TM_SQDIFF_NORMED);
//cvMatchTemplate(binarySourceImage, rotateBinaryTemplateImage, result, CV_TM_SQDIFF);
cvMinMaxLoc(result, &minVal, NULL, &minLoc, NULL, NULL);
printf(": %f%%\n", (int)(i * 0.5 * 100), j * 20, (1 - minVal) * 100);
if(minVal < 0.065) // 1 - 0.065 = 0.935 : 93.5%
{
tempLoc.x = minLoc.x + tempTemplateWidth;
tempLoc.y = minLoc.y + tempTemplateHeight;
cvRectangle(destinationImage, minLoc, tempLoc, CV_RGB(0, 255, 0), 1, 8, 0);
}
}
//cvShowImage(windowNameSource, result);
//cvWaitKey(0);
cvReleaseImage(&tempBinaryTemplateImage);
cvReleaseImage(&result);
}
// cvShowImage(windowNameSource, sourceImage);
// cvShowImage(windowNameCoefficientOfCorrelation, result);
cvShowImage(windowNameDestination, destinationImage);
key = cvWaitKey(0);
cvReleaseImage(&sourceImage);
cvReleaseImage(&templateImage);
cvReleaseImage(&graySourceImage);
cvReleaseImage(&grayTemplateImage);
cvReleaseImage(&binarySourceImage);
cvReleaseImage(&binaryTemplateImage);
cvReleaseImage(&destinationImage);
cvDestroyWindow(windowNameSource);
cvDestroyWindow(windowNameDestination);
cvDestroyWindow(windowNameCoefficientOfCorrelation);
}
RESULT :
Template Image:
Result image:
The function above puts rectangles around the perfect matches (angle and scale invariant) in this image .....
Now, I have been trying to port the code into C++ interface. If anyone needs more details please let me know.
C++ Port of above code:
Mat TemplateMatch(Mat sourceImage, Mat templateImage){
double minVal;
Point minLoc;
Point tempLoc;
Mat graySourceImage = Mat(sourceImage.size(),CV_8UC1);
Mat grayTemplateImage = Mat(templateImage.size(),CV_8UC1);
Mat binarySourceImage = Mat(sourceImage.size(),CV_8UC1);
Mat binaryTemplateImage = Mat(templateImage.size(),CV_8UC1);
Mat destinationImage = Mat(sourceImage.size(),CV_8UC3);
sourceImage.copyTo(destinationImage);
cvtColor(sourceImage, graySourceImage, CV_BGR2GRAY);
cvtColor(templateImage, grayTemplateImage, CV_BGR2GRAY);
threshold(graySourceImage, binarySourceImage, 200, 255, CV_THRESH_OTSU );
threshold(grayTemplateImage, binaryTemplateImage, 200, 255, CV_THRESH_OTSU);
int templateHeight = templateImage.rows;
int templateWidth = templateImage.cols;
float templateScale = 0.5f;
for(int i = 2; i <= 3; i++){
int tempTemplateHeight = (int)(templateWidth * (i * templateScale));
int tempTemplateWidth = (int)(templateHeight * (i * templateScale));
Mat tempBinaryTemplateImage = Mat(Size(tempTemplateWidth,tempTemplateHeight),CV_8UC1);
Mat result = Mat(Size(sourceImage.cols - tempTemplateWidth + 1,sourceImage.rows - tempTemplateHeight + 1),CV_32FC1);
resize(binaryTemplateImage,tempBinaryTemplateImage,Size(tempBinaryTemplateImage.cols,tempBinaryTemplateImage.rows),0,0,INTER_LINEAR);
float degree = 20.0f;
for(int j = 0; j <= 9; j++){
Mat rotateBinaryTemplateImage = Mat(Size(tempBinaryTemplateImage.cols, tempBinaryTemplateImage.rows), CV_8UC1);
for(int y = 0; y < tempTemplateHeight; y++){
for(int x = 0; x < tempTemplateWidth; x++){
rotateBinaryTemplateImage.data[y * tempTemplateWidth + x] = 255;
}
}
for(int y = 0; y < tempTemplateHeight; y++){
for(int x = 0; x < tempTemplateWidth; x++){
float radian = (float)j * degree * CV_PI / 180.0f;
int scale = y * tempTemplateWidth + x;
int rotateY = - sin(radian) * ((float)x - (float)tempTemplateWidth / 2.0f) + cos(radian) * ((float)y - (float)tempTemplateHeight / 2.0f) + tempTemplateHeight / 2;
int rotateX = cos(radian) * ((float)x - (float)tempTemplateWidth / 2.0f) + sin(radian) * ((float)y - (float)tempTemplateHeight / 2.0f) + tempTemplateWidth / 2;
if(rotateY < tempTemplateHeight && rotateX < tempTemplateWidth && rotateY >= 0 && rotateX >= 0)
rotateBinaryTemplateImage.data[scale] = tempBinaryTemplateImage.data[rotateY * tempTemplateWidth + rotateX];
}
}
matchTemplate(binarySourceImage, rotateBinaryTemplateImage, result, CV_TM_SQDIFF_NORMED);
minMaxLoc(result, &minVal, 0, &minLoc, 0, Mat());
cout<<(int)(i * 0.5 * 100)<<" , "<< j * 20<<" , "<< (1 - minVal) * 100<<endl;
if(minVal < 0.065){ // 1 - 0.065 = 0.935 : 93.5%
tempLoc.x = minLoc.x + tempTemplateWidth;
tempLoc.y = minLoc.y + tempTemplateHeight;
rectangle(destinationImage, minLoc, tempLoc, CV_RGB(0, 255, 0), 1, 8, 0);
}
}
}
return destinationImage;
}

Mean image with two functions difference

I want process image so each pixel value will be mean of its value and 4 neighbours.
Created two different functions:
Mat meanImage(cv::Mat& inputImage)
{
Mat output;
Mat kernel(3,3,CV_32F,0.0);
kernel.at<float>(0,1) = 0.2;
kernel.at<float>(1,0) = 0.2;
kernel.at<float>(1,1) = 0.2;
kernel.at<float>(1,2) = 0.2;
kernel.at<float>(2,1) = 0.2;
filter2D(inputImage,output,-1,kernel);
return output;
}
and:
Mat meanImage2(Mat& inputImage)
{
Mat temp;
Mat output(inputImage.rows,inputImage.cols,inputImage.type());
copyMakeBorder(inputImage,temp,1,1,1,1,BORDER_REPLICATE);
CV_Assert(output.isContinuous());
CV_Assert(temp.isContinuous());
const int len = output.rows * output.cols * output.channels();
const int rowLenTemp = temp.cols * temp.channels();
const int twoRowLenTemp = 2 * rowLenTemp;
const int rowLen = output.cols * output.channels();
uchar* outPtr = output.ptr<uchar>(0);
uchar* tempPtr = temp.ptr<uchar>(0);
for(int i = 0; i < len; ++i)
{
const int a = 6 * (i / rowLen) + 3;
outPtr[i] = (tempPtr[i+rowLenTemp+a] + tempPtr[i+a] +
tempPtr[i+rowLenTemp+a+3] + tempPtr[i+rowLenTemp+a-3] +
tempPtr[i+twoRowLenTemp+a]) / 5;
}
return output;
}
I've assumed that the result will be the same. So I've compared images:
Mat diff;
compare(meanImg1,meanImg2,diff,CMP_NE);
printf("Difference: %d\n",countNonZero(diff));
imshow("diff",diff);
And get a lot off differences. What is the difference between this functions?
Edit:
Difference for lena image taken from Lena
Beware that when you do the sum of pixels, you add unsigned chars and you may overflow.
Test your code by casting these pixels values to int.
outPtr[i] = ((int)tempPtr[i+rowLenTemp+a] + (int)tempPtr[i+a] +
(int)tempPtr[i+rowLenTemp+a+3] + (int)tempPtr[i+rowLenTemp+a-3] +
(int)tempPtr[i+twoRowLenTemp+a]) / 5;
Edit: I'd rather code this like (assuming image type is uchar and it has 3 channels)
for (int r = 0; r < output.rows; r++)
{
uchar* previousRow = temp.ptr<uchar>(r) + 3;
uchar* currentRow = temp.ptr<uchar>(r+1) + 3;
uchar* nextRow = temp.ptr<uchar>(r+2) + 3;
uchar* outRow = output.ptr<uchar>(r);
for (int c = 0; c < 3*output.cols; c++)
{
int value = (int)previousRow[c] +
(int)currentRow[c-3] + (int)currentRow [c] + (int)currentRow[c+3] +
(int)nextRow [c];
outRow[c] = value / 5;
}
}

Resources