Is the Sharpness filter available in Konvajs, if it is there how to use that? - konvajs

https://konvajs.org/api/Konva.Filters.html
in this link the sharpness filter is not available

Konva doesn't have such a filter in its core. You have to implement it manually.
For that use case, you can write your own custom filter. See custom filters docs.
I tried to use that sharpen implementation: https://gist.github.com/mikecao/65d9fc92dc7197cb8a7c
// noprotect
const stage = new Konva.Stage({
container: 'container',
width: window.innerWidth,
height: window.innerHeight
});
const layer = new Konva.Layer();
stage.add(layer);
function Sharpen(srcData) {
const mix = 1;
const w = srcData.width;
const h = srcData.height;
const canvas = document.createElement('canvas');
const ctx = canvas.getContext('2d');
var x, sx, sy, r, g, b, a, dstOff, srcOff, wt, cx, cy, scy, scx,
weights = [0, -1, 0, -1, 5, -1, 0, -1, 0],
katet = Math.round(Math.sqrt(weights.length)),
half = (katet * 0.5) | 0,
dstData = ctx.createImageData(w, h),
dstBuff = dstData.data,
srcBuff = srcData.data,
y = h;
while (y--) {
x = w;
while (x--) {
sy = y;
sx = x;
dstOff = (y * w + x) * 4;
r = 0;
g = 0;
b = 0;
a = 0;
for (cy = 0; cy < katet; cy++) {
for (cx = 0; cx < katet; cx++) {
scy = sy + cy - half;
scx = sx + cx - half;
if (scy >= 0 && scy < h && scx >= 0 && scx < w) {
srcOff = (scy * w + scx) * 4;
wt = weights[cy * katet + cx];
r += srcBuff[srcOff] * wt;
g += srcBuff[srcOff + 1] * wt;
b += srcBuff[srcOff + 2] * wt;
a += srcBuff[srcOff + 3] * wt;
}
}
}
dstBuff[dstOff] = r * mix + srcBuff[dstOff] * (1 - mix);
dstBuff[dstOff + 1] = g * mix + srcBuff[dstOff + 1] * (1 - mix);
dstBuff[dstOff + 2] = b * mix + srcBuff[dstOff + 2] * (1 - mix);
dstBuff[dstOff + 3] = srcBuff[dstOff + 3];
}
}
for(var i = 0; i < dstData.data.length; i++) {
srcData.data[i] = dstData.data[i];
}
}
Konva.Image.fromURL('https://i.imgur.com/ktWThtZ.png', img => {
img.setAttrs({filters: [Sharpen]});
img.cache();
layer.add(img);
layer.draw();
});
Demo: https://jsbin.com/tejalusano/1/edit?html,js,output

Related

WebGL High GPU Usage

I am drawing thousand of colored quads by using WebGL (no any framework) and on my laptop, around 80k quads moves nicely in 60fps but more than 80K quads, fps starts waving regularly. Like a few frame 30fp, one frame 60 fps. When i check it Chrome's performance tools, i noticed that GPU is taking too much time.
This is how Chrome Performance tool look like when i run 100k quads
This is my example with no moving quads. Dynamic one also has same effect but STATIC one shows my problem better since no JS overhead.
My code here:
var objects = [];
var MAX_COUNT = 10000;
var projectionMatrix;
var gl;
var positionVertexBuffer;
var colorVertexBuffer;
var indicesBuffer;
{
gl = document.getElementById("renderCanvas").getContext("webgl", {preserveDrawingBuffer: false});
gl.disable(gl.STENCIL_TEST);
gl.disable(gl.DEPTH_TEST);
document.getElementById("renderCanvas").onclick = createObjects;
createObjects();
requestAnimationFrame(updateScreen);
}
function createObjects () {
projectionMatrix = new Float32Array([
0.0033333333333333335,0,0,
0,-0.0033333333333333335,0,
0,0,1
]);
var rObject = {};
rObject.projectionMatrix = projectionMatrix;
createPrograms(rObject);
createAttributes(rObject);
createMoveObjects(rObject);
rObject.id = "id_" + objects.length ;
objects.push(rObject);
}
function createMoveObjects (outObject) {
outObject.points = [];
var k = 0;
for (var i = 0; i < MAX_COUNT; i++) {
var x = (Math.random() * 600) - 300;
var y = (Math.random() * 600) - 300;
var vx = (Math.random() * 10) - 5;
var vy = (Math.random() * 10) - 5;
var size = 30 + Math.random() * 1;
var w = 26 / 2;
var h = 37 / 2;
var p = {w:w, h:h, x:x, y:y, vx:vx, vy:vy, size:size};
outObject.points.push(p);
}
}
var shaderProgram;
function createPrograms(outObject) {
var vertexShader = gl.createShader(gl.VERTEX_SHADER);
gl.shaderSource(vertexShader, document.getElementById("vertexShader").textContent );
gl.compileShader(vertexShader);
if ( !gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS) ) {
let finfo = gl.getShaderInfoLog( vertexShader );
console.log("Vertex Shader Fail" , finfo);
}
var fragmentShader = gl.createShader(gl.FRAGMENT_SHADER);
gl.shaderSource(fragmentShader, document.getElementById("fragmentShader").textContent);
gl.compileShader(fragmentShader);
if ( !gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS) ) {
let finfo = gl.getShaderInfoLog( fragmentShader );
console.log("Fragment Shader Fail" , finfo);
}
shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertexShader);
gl.attachShader(shaderProgram, fragmentShader);
gl.linkProgram(shaderProgram);
var pmlocation = gl.getUniformLocation(shaderProgram,"projectionMatrix");
gl.useProgram(shaderProgram);
gl.uniformMatrix3fv(pmlocation, false , outObject.projectionMatrix);
outObject.projectionMatrixLocation = pmlocation;
outObject.shaderProgram = shaderProgram;
}
function createAttributes(outObject) {
var vertices = new Float32Array(MAX_COUNT * 8);
var colors = new Float32Array(MAX_COUNT * 12);
var indices = new Uint16Array(6 * MAX_COUNT);
var index = 0;
for (var i = 0; i < indices.length; i+=6) {
indices[i ] = index;
indices[i + 1] = index + 1;
indices[i + 2] = index + 2;
indices[i + 3] = index + 1;
indices[i + 4] = index + 3;
indices[i + 5] = index + 2;
index += 4;
}
var r,g,b;
for (var i = 0; i < colors.length; i+=12) {
r = Math.random();
g = Math.random();
b = Math.random();
colors[i] = r;
colors[i + 1] = g;
colors[i + 2] = b;
colors[i + 3] = r;
colors[i + 4] = g;
colors[i + 5] = b;
colors[i + 6] = r;
colors[i + 7] = g;
colors[i + 8] = b;
colors[i + 9] = r;
colors[i + 10] = g;
colors[i + 11] = b;
}
var k = 0;
var w = 26 / 2;
var h = 37 / 2;
var x,y;
for (var i = 0; i < vertices.length; i++) {
x = (Math.random() * 600) - 300;
y = (Math.random() * 600) - 300;
vertices[k] = -w + x; vertices[k + 1] = h + y;
vertices[k + 2] = -w + x; vertices[k + 3] = -h + y;
vertices[k + 4] = w + x; vertices[k + 5] = h + y;
vertices[k + 6] = w + x; vertices[k + 7] = -h + y;
k +=8;
}
positionVertexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, positionVertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
positionVertexBuffer.location = gl.getAttribLocation(shaderProgram,"position");
gl.vertexAttribPointer(positionVertexBuffer.location,2 ,gl.FLOAT, false, 0,0);
gl.enableVertexAttribArray(positionVertexBuffer.location);
colorVertexBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorVertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, colors, gl.STATIC_DRAW);
colorVertexBuffer.location = gl.getAttribLocation(shaderProgram,"color");
gl.vertexAttribPointer(colorVertexBuffer.location,3 ,gl.FLOAT, false, 0,0);
gl.enableVertexAttribArray(colorVertexBuffer.location);
indicesBuffer = gl.createBuffer();
gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, indicesBuffer);
gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, indices, gl.STATIC_DRAW);
outObject.positionVertexBuffer = positionVertexBuffer;
outObject.colorVertexBuffer = colorVertexBuffer;
outObject.indicesBuffer = indicesBuffer;
outObject.vertices = vertices;
outObject.indices = indices;
outObject.colors = colors;
outObject.colorVertexLocation = colorVertexBuffer.location;
outObject.positionVertexLocation = positionVertexBuffer.location;
}
function updateAllPoints() {
var points;
var p;
for (var i = 0; i < objects.length; i++) {
points = objects[i].points;
var k = 0;
for (var j = 0; j < points.length; j++) {
p = points[j];
p.x += p.vx;
p.y += p.vy;
if(p.x >= 300){
p.x = 300;
p.vx *= -1;
} else if(p.x <= -300) {
p.x = -300;
p.vx *= -1;
} else if(p.y >= 300){
p.y = 300;
p.vy *= -1;
} else if(p.y <= -300) {
p.y = -300;
p.vy *= -1;
}
var vertices = objects[i].vertices;
vertices[k] = -p.w + p.x; vertices[k + 1] = p.h + p.y;
vertices[k + 2] = -p.w + p.x; vertices[k + 3] = -p.h + p.y;
vertices[k + 4] = p.w + p.x; vertices[k + 5] = p.h + p.y;
vertices[k + 6] = p.w + p.x; vertices[k + 7] = -p.h + p.y;
k +=8;
}
}
}
function renderScene() {
// updateAllPoints();
var totalDraw = 0;
gl.clearColor(0.3,0.3,0.3,1);
gl.clear(gl.COLOR_BUFFER_BIT);
var rO;
for (var i = 0; i < objects.length; i++) {
rO = objects[i];
drawObjects(rO);
totalDraw += MAX_COUNT;
}
document.getElementById("objectCounter").innerHTML = totalDraw + " Objects"
}
function drawObjects (rO) {
gl.useProgram(rO.shaderProgram);
gl.bindBuffer(gl.ARRAY_BUFFER, rO.positionVertexBuffer);
// gl.bufferSubData(gl.ARRAY_BUFFER, 0, rO.vertices);
gl.vertexAttribPointer(rO.positionVertexLocation,2 ,gl.FLOAT, false, 0,0);
gl.bindBuffer(gl.ARRAY_BUFFER, rO.colorVertexBuffer);
gl.vertexAttribPointer(rO.colorVertexLocation,3 ,gl.FLOAT, false, 0,0);
gl.drawElements(gl.TRIANGLES,MAX_COUNT * 6 , gl.UNSIGNED_SHORT, 0);
}
function updateScreen() {
if(gl){
renderScene();
requestAnimationFrame(updateScreen);
}
}
<script id="vertexShader" type="x-shader/x-vertex">
uniform mat3 projectionMatrix;
attribute vec2 position;
attribute vec3 color;
varying vec3 colorData;
void main() {
colorData = color;
vec3 newPos = vec3(position.x, position.y, 1.0 ) * projectionMatrix;
gl_Position = vec4(newPos , 1.0);
}
</script>
<script id="fragmentShader" type="x-shader/x-fragment">
precision lowp float;
uniform sampler2D uSampler;
varying vec3 colorData;
void main() {
gl_FragColor = vec4(colorData, 1.0);
}
</script>
<canvas id="renderCanvas" width="200" height="200"></canvas>
<div id="objectCounter">10000 Objects</div>
<div>Evevy Click adds 10K Squares </div>
I also checked other examples and found PixiJS's Bunnymark test where you can run 120k bunnies in 60fps but no GPU overhead.
When comparing Bunnymark test, my GPU is taking too much time and I don't know why. I opimized it (of I think I did) but problem insists.
It turned out it is because of i left antialias default as context attributes which seems "true". Can't believe i did not notice.
This code worked for me
canvasDom.getContext("webgl", {antialias : false});
Use scene.remove(mesh) OR mesh.parent.remove(mesh)

What is slices in OpenGL?

In the code bellow , Why we need slices ? and what does it for ?
//https://github.com/danginsburg/opengles-book-samples/blob/604a02cc84f9cc4369f7efe93d2a1d7f2cab2ba7/iPhone/Common/esUtil.h#L110
int esGenSphere(int numSlices, float radius, float **vertices,
float **texCoords, uint16_t **indices, int *numVertices_out) {
int numParallels = numSlices / 2;
int numVertices = (numParallels + 1) * (numSlices + 1);
int numIndices = numParallels * numSlices * 6;
float angleStep = (2.0f * ES_PI) / ((float) numSlices);
if (vertices != NULL) {
*vertices = malloc(sizeof(float) * 3 * numVertices);
}
if (texCoords != NULL) {
*texCoords = malloc(sizeof(float) * 2 * numVertices);
}
if (indices != NULL) {
*indices = malloc(sizeof(uint16_t) * numIndices);
}
for (int i = 0; i < numParallels + 1; i++) {
for (int j = 0; j < numSlices + 1; j++) {
int vertex = (i * (numSlices + 1) + j) * 3;
if (vertices) {
(*vertices)[vertex + 0] = radius * sinf(angleStep * (float)i) * sinf(angleStep * (float)j);
(*vertices)[vertex + 1] = radius * cosf(angleStep * (float)i);
(*vertices)[vertex + 2] = radius * sinf(angleStep * (float)i) * cosf(angleStep * (float)j);
}
if (texCoords) {
int texIndex = (i * (numSlices + 1) + j) * 2;
(*texCoords)[texIndex + 0] = (float)j / (float)numSlices;
(*texCoords)[texIndex + 1] = 1.0f - ((float)i / (float)numParallels);
}
}
}
// Generate the indices
if (indices != NULL) {
uint16_t *indexBuf = (*indices);
for (int i = 0; i < numParallels ; i++) {
for (int j = 0; j < numSlices; j++) {
*indexBuf++ = i * (numSlices + 1) + j;
*indexBuf++ = (i + 1) * (numSlices + 1) + j;
*indexBuf++ = (i + 1) * (numSlices + 1) + (j + 1);
*indexBuf++ = i * (numSlices + 1) + j;
*indexBuf++ = (i + 1) * (numSlices + 1) + (j + 1);
*indexBuf++ = i * (numSlices + 1) + (j + 1);
}
}
}
if (numVertices_out) {
*numVertices_out = numVertices;
}
return numIndices;
}
That code generates a sphere mesh that looks like this:
Source: https://commons.wikimedia.org/wiki/File:Sphere_wireframe_10deg_6r.svg CC BY 3.0
As you can see in the picture, there are horizontal parallel lines, and vertical lines which all meet at the poles. The horizontal lines are typically called parallels whereas the vertical ones are called meridians. The author of that code apparently didn't know this term, so they called it "slices" instead.

Converting from RGB to Lαβ Color spaces and converting it back to RGB using OpenCV

I am currently trying to convert colors between RGB (red, green, blue) color space and Lαβ color space, Based on the details in the this paper.
My difficulties are in reversing the conversion process. When the result is not as same as initial RGB Mat. I think I missing something in type castings between Mats but I can't tell what is it!
here is my code:
<!-- language: lang-cc -->
Mat DetectTrackFace::RGB2LAlphBeta(Mat &src)
{
Mat dest;
Mat L_AlphBeta(src.rows, src.cols, CV_32FC3);
//cvtColor(src,dest,CV_BGR2XYZ);
float X,Y,Z,L,M,S,_L,Alph,Beta;
int R,G,B;
for(int i = 0; i < src.rows; i++)
{
for(int j = 0; j < src.cols; j++)
{
B = src.at<Vec3b>(i, j)[0];
G = src.at<Vec3b>(i, j)[1];
R = src.at<Vec3b>(i, j)[2];
X = ( 0.4124 * R ) + ( 0.3576 * G ) + ( 0.1805 * B);
Y = ( 0.2126 * R ) + ( 0.7152 * G ) + ( 0.0722 * B);
Z = ( 0.0193 * R ) + ( 0.1192 * G ) + ( 0.9505 * B);
L = (0.3897 * X) + (0.6890 * Y) + (-0.0787 * Z);
M = (-0.2298 * X) + (1.1834* Y) + (0.0464 * Z);
S = (0.0000 * X) + (0.0000 * Y) + (1.0000 * Z);
//for handling log
if(L == 0.0000) L=1.0000;
if(M == 0.0000) M = 1.0000;
if( S == 0.0000) S = 1.0000;
//LMS to Lab
_L = (1.0 / sqrt(3.0)) *((1.0000 * log10(L)) + (1.0000 * log10(M)) + (1.0000 * log10(S)));
Alph =(1.0 / sqrt(6.0)) * ((1.0000 * log10(L)) + (1.0000 * log10(M)) + (-2.0000 * log10(S)));
Beta = (1.0 / sqrt(2.0)) * ((1.0000 * log10(L)) + (-1.0000 * log10(M)) + (-0.0000 * log10(S)));
L_AlphBeta.at<Vec3f>(i, j)[0] = _L;
L_AlphBeta.at<Vec3f>(i, j)[1] = Alph;
L_AlphBeta.at<Vec3f>(i, j)[2] = Beta;
}
}
return L_AlphBeta;
}
Mat DetectTrackFace::LAlphBeta2RGB(Mat &src)
{
Mat XYZ(src.rows, src.cols, src.type());
Mat BGR(src.rows, src.cols, CV_8UC3);
float X,Y,Z,L,M,S,_L,Alph,Beta, B,G,R;
for(int i = 0; i < src.rows; i++)
{
for(int j = 0; j < src.cols; j++)
{
_L = src.at<Vec3f>(i, j)[0]*1.7321;
Alph = src.at<Vec3f>(i, j)[1]*2.4495;
Beta = src.at<Vec3f>(i, j)[2]*1.4142;
/*Inv_Transform_logLMS2lab =
0.33333 0.16667 0.50000
0.33333 0.16667 -0.50000
0.33333 -0.33333 0.00000*/
L = (0.33333*_L) + (0.16667 * Alph) + (0.50000 * Beta);
M = (0.33333 * _L) + (0.16667 * Alph) + (-0.50000 * Beta);
S = (0.33333 * _L) + (-0.33333 * Alph) + (0.00000* Beta);
L = pow(10 , L);
if(L == 1) L=0;
M = pow(10 , M);
if(M == 1) M=0;
S = pow(10 , S);
if(S == 1) S=0;
/*Inv_Transform_XYZ2LMS
1.91024 -1.11218 0.20194
0.37094 0.62905 0.00001
0.00000 0.00000 1.00000*/
X = (1.91024 *L ) + (-1.11218 * M ) +(0.20194 * S);
Y = (0.37094 * L ) + (0.62905 * M ) +(0.00001 * S);
Z = (0.00000 * L) + (0.00000 * M ) +(1.00000 * S);
/*Inv_Transform_RGB2XYZ
3.240625 -1.537208 -0.498629
-0.968931 1.875756 0.041518
0.055710 -0.204021 1.056996*/
R = ( 3.240625 * X) + ( -1.537208 * Y) + ( -0.498629 * Z);
G = ( -0.968931 * X) + ( 1.875756 * Y) + ( 0.041518 * Z);
B = ( 0.055710 * X) + ( -0.204021 * Y) + ( 1.056996 * Z);
if(R>255) R = 255;
if(G>255) G = 255;
if(B>255) B = 255;
if(R<0) R = 0;
if(G<0) G = 0;
if(B<0) B = 0;
if(R > 255 || G > 255 || B > 255 || R < 0 || G < 0 || B<0)
cout<<"R = "<<R<<" G = "<<G <<" B = "<<B<<endl;
BGR.at<Vec3b>(i, j)[0] = (uchar)B;
BGR.at<Vec3b>(i, j)[1] = (uchar)G;
BGR.at<Vec3b>(i, j)[2] = (uchar)R;
}
}
//normalize(BGR,BGR, 255, 0, NORM_MINMAX, CV_8UC3 );
return BGR;
}
You have float to uchar truncation errors in the function LAlphBeta2RGB here:
BGR.at<Vec3b>(i, j)[0] = (uchar)B;
BGR.at<Vec3b>(i, j)[1] = (uchar)G;
BGR.at<Vec3b>(i, j)[2] = (uchar)R;
You can solve this using:
BGR(i, j)[0] = uchar(cvRound(B));
BGR(i, j)[1] = uchar(cvRound(G));
BGR(i, j)[2] = uchar(cvRound(R));
However, you shouldn't take care of conversion problems explicitly. You can use saturate_cast to handle this for you. You can declare R,G,B variables as uchar:
uchar B, G, R;
and perform the conversion as:
R = saturate_cast<uchar>((3.240625 * X) + (-1.537208 * Y) + (-0.498629 * Z));
G = saturate_cast<uchar>((-0.968931 * X) + (1.875756 * Y) + (0.041518 * Z));
B = saturate_cast<uchar>((0.055710 * X) + (-0.204021 * Y) + (1.056996 * Z));
and then assign as:
BGR(i, j)[0] = B;
BGR(i, j)[1] = G;
BGR(i, j)[2] = R;
Or avoid using R,G,B entirely using:
BGR(i, j)[2] = saturate_cast<uchar>((3.240625 * X) + (-1.537208 * Y) + (-0.498629 * Z));
BGR(i, j)[1] = saturate_cast<uchar>((-0.968931 * X) + (1.875756 * Y) + (0.041518 * Z));
BGR(i, j)[0] = saturate_cast<uchar>((0.055710 * X) + (-0.204021 * Y) + (1.056996 * Z));
Here the full code. I took the liberty to use Mat_ instead of Mat as functions arguments, to avoid using at<type>() to access pixel values. In fact, you are already assuming that inputs of your functions are CV_8UC3 and CV_32FC3, respectively.
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
Mat RGB2LAlphBeta(Mat3b &src)
{
Mat3f L_AlphBeta(src.rows, src.cols);
//cvtColor(src,dest,CV_BGR2XYZ);
float X, Y, Z, L, M, S, _L, Alph, Beta;
int R, G, B;
for (int i = 0; i < src.rows; i++)
{
for (int j = 0; j < src.cols; j++)
{
B = src(i, j)[0];
G = src(i, j)[1];
R = src(i, j)[2];
X = (0.4124 * R) + (0.3576 * G) + (0.1805 * B);
Y = (0.2126 * R) + (0.7152 * G) + (0.0722 * B);
Z = (0.0193 * R) + (0.1192 * G) + (0.9505 * B);
L = (0.3897 * X) + (0.6890 * Y) + (-0.0787 * Z);
M = (-0.2298 * X) + (1.1834* Y) + (0.0464 * Z);
S = (0.0000 * X) + (0.0000 * Y) + (1.0000 * Z);
//for handling log
if (L == 0.0000) L = 1.0000;
if (M == 0.0000) M = 1.0000;
if (S == 0.0000) S = 1.0000;
//LMS to Lab
_L = (1.0 / sqrt(3.0)) *((1.0000 * log10(L)) + (1.0000 * log10(M)) + (1.0000 * log10(S)));
Alph = (1.0 / sqrt(6.0)) * ((1.0000 * log10(L)) + (1.0000 * log10(M)) + (-2.0000 * log10(S)));
Beta = (1.0 / sqrt(2.0)) * ((1.0000 * log10(L)) + (-1.0000 * log10(M)) + (-0.0000 * log10(S)));
L_AlphBeta(i, j)[0] = _L;
L_AlphBeta(i, j)[1] = Alph;
L_AlphBeta(i, j)[2] = Beta;
}
}
return L_AlphBeta;
}
Mat LAlphBeta2RGB(Mat3f &src)
{
Mat3f XYZ(src.rows, src.cols);
Mat3b BGR(src.rows, src.cols);
float X, Y, Z, L, M, S, _L, Alph, Beta;
for (int i = 0; i < src.rows; i++)
{
for (int j = 0; j < src.cols; j++)
{
_L = src(i, j)[0] * 1.7321;
Alph = src(i, j)[1] * 2.4495;
Beta = src(i, j)[2] * 1.4142;
/*Inv_Transform_logLMS2lab =
0.33333 0.16667 0.50000
0.33333 0.16667 -0.50000
0.33333 -0.33333 0.00000*/
L = (0.33333*_L) + (0.16667 * Alph) + (0.50000 * Beta);
M = (0.33333 * _L) + (0.16667 * Alph) + (-0.50000 * Beta);
S = (0.33333 * _L) + (-0.33333 * Alph) + (0.00000* Beta);
L = pow(10, L);
if (L == 1) L = 0;
M = pow(10, M);
if (M == 1) M = 0;
S = pow(10, S);
if (S == 1) S = 0;
/*Inv_Transform_XYZ2LMS
1.91024 -1.11218 0.20194
0.37094 0.62905 0.00001
0.00000 0.00000 1.00000*/
X = (1.91024 *L) + (-1.11218 * M) + (0.20194 * S);
Y = (0.37094 * L) + (0.62905 * M) + (0.00001 * S);
Z = (0.00000 * L) + (0.00000 * M) + (1.00000 * S);
/*Inv_Transform_RGB2XYZ
3.240625 -1.537208 -0.498629
-0.968931 1.875756 0.041518
0.055710 -0.204021 1.056996*/
BGR(i, j)[2] = saturate_cast<uchar>((3.240625 * X) + (-1.537208 * Y) + (-0.498629 * Z));
BGR(i, j)[1] = saturate_cast<uchar>((-0.968931 * X) + (1.875756 * Y) + (0.041518 * Z));
BGR(i, j)[0] = saturate_cast<uchar>((0.055710 * X) + (-0.204021 * Y) + (1.056996 * Z));
}
}
//normalize(BGR,BGR, 255, 0, NORM_MINMAX, CV_8UC3 );
return BGR;
}
int main()
{
Mat3b img = imread("path_to_image");
Mat3f labb = RGB2LAlphBeta(img);
Mat3b rgb = LAlphBeta2RGB(labb);
Mat3b diff;
absdiff(img, rgb, diff);
// Check if all pixels are equals
cout << ((sum(diff) == Scalar(0, 0, 0, 0)) ? "Equals" : "Different");
return 0;
}

converting an ARGB 8888 image into yuv 420 sp in android causing greenish imag

Hello I am trying to convert an ARGB 8888 image into yuv 420 sp in android and I am getting a totally greenish and compressed image.Please help me in code if I am doing it the correct way.
The code seems something as below.
Image(Context context) {
// This Constructor is used to initialize height and width of screen
screenHeight = 800;//m1.heightPixels;
screenWidth = 480;//m1.widthPixels;
bufferSize = 4 * screenHeight * screenWidth;
buffer = new byte[bufferSize];
newarrs =new byte[bufferSize];
log("constructor width:- " + screenWidth + " height:- " + screenHeight);
}
public void capture() {
// Take the Data from frame buffer and store in buffer
log("capture Screen");
BufferedInputStream bis = null;
try {
// log("in try");
bis = new BufferedInputStream(new FileInputStream("/data/fb0.raw"));
readSize = bis.read(buffer, 0, bufferSize);
bis.close();
}
catch (Exception e) {
// log("in catch");
e.printStackTrace();
}
encodeYUV420(buffer);
byte[] arr = resize1(buffer);
FileOutputStream fos;
try {
File f = Files.getImageFile();
fos = new FileOutputStream(f);
fos.write(arr);
fos.close();
} catch (Exception e) {
}
private byte[] resize1(byte[] buffer) {
final int RATIO = 4;
byte[][][] newBuff = new byte[screenWidth][screenHeight][4];
int pos1 = 0;
for (int i = 0; i < screenWidth; i++) {
for (int j = 0; j < screenHeight; j++) {
newBuff[i][j][0] = buffer[pos1++];
newBuff[i][j][1] = buffer[pos1++];
newBuff[i][j][2] = buffer[pos1++];
newBuff[i][j][3] = buffer[pos1++];
}
}
byte[] buffer1 = new byte[buffer.length*3 / (RATIO * RATIO)];
int pos2 = 0;
int i = 0, j = 0;
for (i = 0; i < screenWidth; i++) {
for (j = 0; j < screenHeight; j++) {
try {
if (i % RATIO == 0 && j % RATIO == 0) {
buffer1[pos2++] = newBuff[i][j][0];
buffer1[pos2++] = newBuff[i][j][1];
buffer1[pos2++] = newBuff[i][j][2];
buffer1[pos2++] = newBuff[i][j][3];
}
} catch (Exception e) {
log(" i " + i + " j " + j);
}
}
}
log(" valuesof i " + i + " j " + j);
if (pos2 == buffer.length / (RATIO * RATIO))
log("S size:- " + pos2);
else
log("F size:- " + pos2);
return buffer1;
}
private byte[] encodeYUV420(byte[] argb) {
byte[] yuv420sp = new byte[(screenHeight * screenWidth * 3) / 2];
final int frameSize = screenWidth * screenHeight;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize + (frameSize / 4);
int R, G, B;
int Y, U, V;
int index = 0;
for (int j = 0; j < screenHeight; j++) {
for (int i = 0; i < screenWidth; i++) {
int pp = (j * screenWidth + i) * 4;
//a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && i % 2 == 0) {
yuv420sp[uIndex++] = (byte) ((U<0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[vIndex++] = (byte) ((V<0) ? 0 : ((V > 255) ? 255 : V));
}
}
return yuv420sp;
}
Update:
Screenshot illustrating the problem:
I think I have made some changes and it looks some what as the original image but it is not clear or legible.Can I get some ideas on how to make it almost as the original image
Image(Context context) {
// This Constructor is used to initialize height and width of screen
screenHeight = 800;//m1.heightPixels;
screenWidth = 480;//m1.widthPixels;
bufferSize = 4 * screenHeight * screenWidth;
buffer = new byte[bufferSize];
newarrs =new byte[bufferSize];
log("constructor width:- " + screenWidth + " height:- " + screenHeight);
}
public void capture() {
// Take the Data from frame buffer and store in buffer
log("capture Screen");
BufferedInputStream bis = null;
try {
// log("in try");
bis = new BufferedInputStream(new FileInputStream("/data/fb0.raw"));
readSize = bis.read(buffer, 0, bufferSize);
bis.close();
}
catch (Exception e) {
// log("in catch");
e.printStackTrace();
}
encodeYUV420(buffer);
byte[] arr = resize1(buffer);
FileOutputStream fos;
try {
File f = Files.getImageFile();
fos = new FileOutputStream(f);
fos.write(arr);
fos.close();
} catch (Exception e) {
}
private byte[] resize1(byte[] buffer) {
final int RATIO = 4;
byte[][][] newBuff = new byte[screenWidth][screenHeight][4];
int pos1 = 0;
for (int i = 0; i < screenWidth; i++) {
for (int j = 0; j < screenHeight; j++) {
newBuff[i][j][0] = buffer[pos1++];
newBuff[i][j][1] = buffer[pos1++];
newBuff[i][j][2] = buffer[pos1++];
newBuff[i][j][3] = buffer[pos1++];
}
}
byte[] buffer1 = new byte[buffer.length*3 / (RATIO * RATIO)];
int pos2 = 0;
int i = 0, j = 0;
for (i = 0; i < screenWidth; i++) {
for (j = 0; j < screenHeight; j++) {
try {
if (i % RATIO == 0 && j % RATIO == 0) {
buffer1[pos2++] = newBuff[i][j][0];
buffer1[pos2++] = newBuff[i][j][1];
buffer1[pos2++] = newBuff[i][j][2];
buffer1[pos2++] = newBuff[i][j][3];
}
} catch (Exception e) {
log(" i " + i + " j " + j);
}
}
}
log(" valuesof i " + i + " j " + j);
if (pos2 == buffer.length / (RATIO * RATIO))
log("S size:- " + pos2);
else
log("F size:- " + pos2);
return buffer1;
}
private byte[] encodeYUV420(byte[] argb) {
byte[] yuv420sp = new byte[(screenHeight * screenWidth * 3) / 2];
final int frameSize = screenWidth * screenHeight;
int yIndex = 0;
int uvIndex=frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
int pp = (j * width + i) * 4;
R = argb[pp+ 0];
G = argb[pp + 1];
B = argb[pp + 2];
a = argb[pp + 3];
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && i % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
}
}
return yuv420sp;
}
You're not storing the YUV data correctly. According to this document, YUV420SP data is stored in two planes, one containing the Y data, and another containing the interleaved U and V data:
| Y_0 | Y_1 | Y_2 | Y_3 | Y_4 | ... | Y_w-2 | Y_w-1 | /* h rows */
| Y_w | Y_w+1 | Y_w+2 | ...
:
:
| U_0 | V_0 | U_2 | V_2 | U_4 | ... | U_w-2 | V_w-2 | /* h/2 rows */
| U_2w | V_2w | U_2w+2| ...
:
:
Your code seems to be storing the U and V data in separate planes:
int uIndex = frameSize;
int vIndex = frameSize + (frameSize / 4);

Trouble Rendering a Sphere in WebGL and Typescript

Ive ported over some c code that renders a sphere in opengl for a webgl/typescript project I'm working on, however its not rendering correctly. I've compared the indices and vertices between the c and ts versions and they appear to match. The code is as follows:
constructor(ctx: WebGLRenderingContext, stacks:number,
slices:number, scale: number){
var vertices: number[] = [];
var normals: number[] = [];
var indices: number[] = [];
var ii: number;
var jj: number;
var v: number;
var u: number;
normals.push(0, 0, 1);
vertices.push(0, 0, scale);
for (ii = 0; ii < slices; ++ii) {
indices.push(0);
indices.push(ii + 1);
}
indices.push(0);
indices.push(1);
for (ii = 1; ii < stacks; ++ii) {
v = ii / stacks;
for (jj = 0; jj < slices; ++jj) {
u = jj / slices;
normals.push.apply(normals, this.shapeNormal(u, v));
vertices.push.apply(vertices, this.shapeVertex(scale, u, v));
indices.push((ii - 1) * slices + (jj + 1));
var index_offset: number = ((ii + 1) === stacks) ? 0 : jj;
var second: number = ii * slices + (index_offset + 1);
//console.log("Offset: " + String(index_offset) + " Value: " + String(second));
indices.push(second);
}
indices.push((ii - 1) * slices + 1);
indices.push(ii * slices + 1);
}
normals.push(0, 0, -1);
vertices.push(0, 0, -scale);
//console.log("Theoretical vertices: " + String(3 * (2 + slices * (stacks - 1))));
//initialise vbos
console.log("Vertices: " + String(vertices.length / 3));
for(var l = 0; l < vertices.length; l += 3)
console.log(vertices[l].toFixed(6) + " " + vertices[l+1].toFixed(6) + " " + vertices[l+2].toFixed(6));
this.vertices = new VertexBufferObject(ctx, 3, vertices.length / 3);
//console.log("Normals: " + String(normals.length));
this.normals = new VertexBufferObject(ctx, 3, normals.length / 3);
console.log("Indices: " + String(indices.length) + " " + indices.toString());
this.indices = new VertexBufferObject(ctx, 1, indices.length);
//populate vbo
ctx.enableVertexAttribArray(0);
ctx.bindBuffer(ctx.ARRAY_BUFFER, this.vertices.buffer);
ctx.bufferData(ctx.ARRAY_BUFFER, new Float32Array(vertices), ctx.STATIC_DRAW);
ctx.enableVertexAttribArray(1);
ctx.bindBuffer(ctx.ARRAY_BUFFER, this.normals.buffer);
ctx.bufferData(ctx.ARRAY_BUFFER, new Float32Array(normals), ctx.STATIC_DRAW);
ctx.bindBuffer(ctx.ELEMENT_ARRAY_BUFFER, this.indices.buffer);
ctx.bufferData(ctx.ELEMENT_ARRAY_BUFFER, new Uint16Array(indices),
ctx.STATIC_DRAW);
ctx.bindBuffer(ctx.ARRAY_BUFFER, null);
ctx.bindBuffer(ctx.ELEMENT_ARRAY_BUFFER, null);
ctx.disableVertexAttribArray(0);
ctx.disableVertexAttribArray(1);
this.ctx = ctx;
}
private shapeVertex(r: number, u: number, v: number): number[] {
/* Use maths rather than physics spherical coordinate convention */
var theta: number = u * 2.0 * Math.PI;
var phi: number = v * Math.PI;
var vert: number[] = [
r * Math.cos(theta) * Math.sin(phi),
r * Math.sin(theta) * Math.sin(phi),
r * Math.cos(phi)
];
return vert;
}
private shapeNormal(u: number, v: number): number[] {
/* Use maths rather than physics spherical coordinate convention */
var theta: number = u * 2.0 * Math.PI;
var phi: number = v * Math.PI;
var norm: number[] = [
Math.cos(theta) * Math.sin(phi),
Math.sin(theta) * Math.sin(phi),
Math.cos(phi)
];
var mag: number = Math.sqrt(norm[0] * norm[0] + norm[1] * norm[1] + norm[2] * norm[2]);
norm[0] /= mag;
norm[1] /= mag;
norm[2] /= mag;
return norm;
}
public draw(shaderProgram: ShaderProgram): void {
//bind and draw vbo's
this.ctx.enableVertexAttribArray(0);
this.ctx.bindBuffer(this.ctx.ARRAY_BUFFER, this.vertices.buffer);
this.ctx.vertexAttribPointer(shaderProgram.attributes.position,
this.vertices.itemSize, this.ctx.FLOAT, false, 0, 0);
this.ctx.enableVertexAttribArray(1);
this.ctx.bindBuffer(this.ctx.ARRAY_BUFFER, this.normals.buffer);
this.ctx.vertexAttribPointer(shaderProgram.attributes.normal,
this.normals.itemSize, this.ctx.FLOAT, false, 0, 0);
this.ctx.bindBuffer(this.ctx.ELEMENT_ARRAY_BUFFER, this.indices.buffer);
this.ctx.drawElements(this.ctx.TRIANGLES, this.indices.numItems,
this.ctx.UNSIGNED_SHORT, 0);
this.ctx.bindBuffer(this.ctx.ELEMENT_ARRAY_BUFFER, null);
this.ctx.bindBuffer(this.ctx.ARRAY_BUFFER, null);
this.ctx.disableVertexAttribArray(0);
this.ctx.disableVertexAttribArray(1);
}
and a screenshot of the result:
Broken Sphere
Thank you in advance
As TypeScript is just a supersed of Javascript, your problem is probably related to how Javascript handle your code computations.
I'm not sure about your code as you didn't provide the original source.
Assuming your code is correct, you may encounter a floating point approximation error.

Resources