Store modified pixel back into texture memory - image-processing

Hi i am trying to implement a image re-targeting algorithm using openGL ES2.0. This algorithm first uses a Sobel filter and then a image difference to check edges and then motion respectively. It then use these values to create a matrix which will determine which pixels from the image can be removed. I have so far implemented the sobel filter and the image difference using a fragment shader. Once i have the edge map and the motion map i add the two values an normalize them to between 0 and 1. Now i need to again do a sobel filter on this normalized matrix. But i don't know if i can store it back into texture memory to load it as a texture again. Or should i store these values in a matrix the same size as the image? How do i do that using GLSL? (I am a beginner to GLSL. This might be a stupid question :p)
here is the frag shader:
precision mediump float;
varying vec2 coordVar;
uniform sampler2D s_baseMap;
uniform sampler2D s_lightMap;
void main()
{
vec4 baseColor;
float basemono;
vec4 lightColor;
float lightmono;
float diffmap;
baseColor = texture2D( s_baseMap, coordVar );
lightColor = texture2D( s_lightMap, coordVar );
//######TEMPORAL SALIENCY MAP#########
basemono = baseColor .r * 0.299 + baseColor .g * 0.587 + baseColor .b * 0.114;
lightmono = lightColor.r * 0.299 + lightColor.g * 0.587 + lightColor.b * 0.114;
diffmap = basemono - lightmono;
//######## SPATIAL SALIENCY MAP #######
float pixelHSobel;
float pixelVSobel;
float pixel;
mat3 sobelHoriz = mat3(
0.125, 0.250, 0.125,
0.000, 0.000, 0.000,
-0.125,-0.250,-0.125);
mat3 sobelVert = mat3(
-0.125, 0, 0.125,
-0.250, 0, 0.250,
-0.125, 0, 0.125);
float stepH = 1.0/800.0;
float stepV = 1.0/600.0;
float tmp1;
float tmp2;
float tmp3;
float tmp4;
float tmp5;
float tmp6;
float tmp7;
float tmp8;
float tmp9;
// ####### Horizontal Sobel ######
tmp1 = texture2D(s_baseMap, coordVar.st + vec2(-stepH, stepV)).r * sobelHoriz[0][0];
tmp3 = texture2D(s_baseMap, coordVar.st + vec2(stepH, stepV)).r * sobelHoriz[2][0];
tmp4 = texture2D(s_baseMap, coordVar.st + vec2(-stepH, 0.0)).r * sobelHoriz[0][1];
tmp6 = texture2D(s_baseMap, coordVar.st + vec2(stepH,0.0)).r *sobelHoriz[2][1];
tmp7 = (texture2D(s_baseMap, coordVar.st + vec2(-stepH, -stepV)).r *sobelHoriz[0][2]);
tmp9 = (texture2D(s_baseMap, coordVar.st + vec2(stepH, -stepV)).r *sobelHoriz[2][2]);
pixelHSobel = tmp1+tmp3 + tmp4 + tmp6+ tmp7 + tmp9 + 0.5;
// ####### Vertical Sobel #######
tmp1 = texture2D(s_baseMap, coordVar.st + vec2(-stepH, stepV)).r * sobelVert[0][0];
tmp2 = texture2D(s_baseMap, coordVar.st + vec2(0.0, stepV)).r * sobelVert[1][0];
tmp3 = texture2D(s_baseMap, coordVar.st + vec2(stepH, stepV)).r * sobelVert[2][0];
tmp4 = texture2D(s_baseMap, coordVar.st + vec2(-stepH, 0.0)).r * sobelVert[0][1];
tmp5 = texture2D(s_baseMap, coordVar.st + vec2(0.0, 0.0)).r *sobelVert[1][1];
tmp6 = texture2D(s_baseMap, coordVar.st + vec2(stepH,0.0)).r *sobelVert[2][1];
tmp7 = (texture2D(s_baseMap, coordVar.st + vec2(-stepH, -stepV)).r *sobelVert[0][2]);
tmp8 = (texture2D(s_baseMap, coordVar.st + vec2(0.0, -stepV)).r *sobelVert[1][2]);
tmp9 = (texture2D(s_baseMap, coordVar.st + vec2(stepH, -stepV)).r *sobelVert[2][2]);
pixelVSobel = tmp1 + tmp2 + tmp3 + tmp4 + tmp5 + tmp6 + tmp7 + tmp8 + tmp9 + 0.5;
pixel = (sqrt(pixelVSobel*pixelVSobel +pixelHSobel*pixelHSobel));
if (pixel <= 0.715)
{
pixel=0.0;
}
else
{
pixel=1.0;
}
// ###########IMPORTANCE MATRIX###########
float impmap = (pixel+diffmap)/2.0;
// ########## display ######
gl_FragColor = vec4(impmap,impmap,impmap,1.0);
I need to now somehow run a sobel filter on the image impmap.

Using a framebuffer object (FBO) your sobel filtering is written to a texture, instead of the on-screen framebuffer. You can then bind this texture instead of your original image and apply another filter pass on it.
So the order of operations would about as:
bind original image as texuring source
bind target texture as FBO color attachment
render original image via sobel shader to target FBO
unbind target texture as FBO color attachment and bind as texturing source
bind secondary target texture as FBO color attachment
goto 3, repeat as often as needed.

Related

iOS 12 CIKernel Filters CRASH

I am using Following CIColorKernel Code to generate customFilter.
kernel vec4 customFilter(__sample image, __sample noise, float time, float inputNoise) {
vec2 uv = destCoord() / 1280.0;
float d = length(uv - vec2(0.5,0.5));
float blur = inputNoise;
float myTime = time * 1.0;
vec2 myuv = vec2(uv.x + sin( (uv.y + sin(myTime)) * abs(sin(myTime) + sin(2.0 * myTime) + sin(0.3 * myTime) + sin(1.4 * myTime) + cos(0.7 * myTime) + cos(1.3 * myTime)) * 4.0 ) * 0.02,uv.y) ;
vec2 finalUV = myuv * 1280.0;
vec3 col; col.r = sample(image, samplerTransform(image, finalUV)).r; col.g = sample(image, samplerTransform(image, finalUV)).g; col.b = sample(image, samplerTransform(image, finalUV)).b;
float scanline = sin(uv.y * 1280.0 *400.0)*0.08; col -= scanline;
// vignette
col *= 1.0 - d * 0.5;
return vec4(col, 1.0);
}
this piece of code works fine with iOS 10 / iOS 11 devices, However. It generate weird crash with iOS 12 Device
[CIKernelPool] 16:40: ERROR: parameter has unexpected type 'vec4' (should be a sampler type)
col.r = sample(image, samplerTransform(image, finalUV)).r;
[CIKernelPool] 17:40: ERROR: parameter has unexpected type 'vec4' (should be a sampler type)
col.g = sample(image, samplerTransform(image, finalUV)).g;
[CIKernelPool] 18:40: ERROR: parameter has unexpected type 'vec4' (should be a sampler type)
col.b = sample(image, samplerTransform(image, finalUV)).b;
this seem to happen in all CIColorKernel using __sample. However using sampler in place of __sample and coverting CIColorKernel to CIKernel solves the crash but it doesn't generating the expected result.
As the error stated, you are supplying wrong object to the
sample(image, samplerTransform(image, finalUV)).r
Here image is of type __sample. But it actually requires sampler type.
CIColorKernel does expect __sample type in its parameters. Thus, what you need is to use
CIKernel instead of CIColorKernel. Then you can supply sampler in your kernel.
kernel vec4 customFilter(sampler image, sampler noise, float time, float inputNoise) {

Fragment shader and Vertex Shader in Metal

Fragment Shader
vertex VertexOutBezier bezier_vertex(constant BezierParameters *allParams[[buffer(0)]],
// constant GlobalParameters& globalParams[[buffer(1)]],
uint vertexId [[vertex_id]],
uint instanceId [[instance_id]])
{
float t = (float) vertexId / 300;
rint(t);
BezierParameters params = allParams[instanceId];
float lineWidth = (1 - (((float) (vertexId % 2)) * 2.0)) * params.lineThickness;
float2 a = params.a;
float2 b = params.b;
float nt = 1.0f - t;
float nt_2 = nt * nt;
float nt_3 = nt_2 * nt;
float t_2 = t * t;
float t_3 = t_2 * t;
float2 point = a * nt_3 + params.p1 * nt_2 * t + params.p2 * nt * t_2 + b * t_3;
float2 tangent = -3.0 * a * nt_2 + params.p1 * (1.0 - 4.0 * t + 3.0 * t_2) + params.p2 * (2.0 * t - 3.0 * t_2) + 3 * b * t_2;
tangent = normalize(float2(-tangent.y, tangent.x));
VertexOutBezier vo;
vo.pos.xy = point + (tangent * (lineWidth / 3.0f));
vo.pos.zw = float2(0, 1);
vo.color = params.color ;
return vo;
}
My Fragment shader is
fragment float4 bezier_fragment(VertexOutBezier params[[stage_in]],
texture2d<float> texture [[texture(0)]]
)
{
constexpr sampler defaultSampler;
float4 canvasColor = texture.sample(defaultSampler, params.pos.xy);
return canvasColor;
}
Here i expect to get the pixel color of the texture. But here it is only getting single color. It is not getting the color of the texture according to its position.
Even when I do this in fragment I am getting the single color it is not varying with coordinates
fragment float4 bezier_fragment(VertexOutBezier params[[stage_in]],
texture2d<float> texture [[texture(0)]]
)
{
constexpr sampler defaultSampler;
float4 canvasColor = params.color * params.pos.x;
return canvasColor;
}
If I do this in Vertex Shader I got color varying according position of x
vo.pos.xy = point + (tangent * (lineWidth / 3.0f));
vo.pos.zw = float2(0, 1);
vo.color = params.color * vo.pos.x;
What is the Issue in fragment Shader. I cannot get the coordinates from Vertex Shader
Please make sure the VertexOutBezier.pos.xy value is normalization ( 0 ~ 1.0) ,due to the defaultSampler only receive normalization position value, if always return a single may be the position is beyond the bounds.

WEBGL Fluid simulation

I am trying to get a fluid simulation to work using WebGL using http://meatfighter.com/fluiddynamics/GPU_Gems_Chapter_38.pdf as a resource. I have implemented everything but I feel like there are multiple things that aren't working correctly. I added boundaries but it seems like they are having no effect, which makes me suspicious about how much pressure and advection are working. I displayed the divergence and I get very little around where I am moving the object around as well as when the velocity hits the edge (boundary), but the pressure that I get is completely empty. I calculate pressure using the diffusion shader as described in the linked resource.
I know the code I am posting is al little confusing due to the nature of what it is about. I can supply any pictures/links to the simulation if that would help.
--EDIT--
After some more investigation I believe the problem is related to my advection function. or at least a problem. I am unsure how to fix it though.
Instead of posting all of my code, the general process I follow is:
advect velocity
diffuse velocity
add velocity
calculate divergence
compute pressure
subtract gradient
for diffusing velocity and computing pressure I am only do 10 iterations because thats all my computer can handle with my implementation (I will optimize once I get it working), but I feel like the computing pressure and subtracting gradient are not having any effect.
here are the shaders I am using:
//advection
uniform vec2 res;//The width and height of our screen
uniform sampler2D velocity;//input velocity
uniform sampler2D quantity;//quantity to advect
void main() {
vec2 pixel = gl_FragCoord.xy / res.xy;
float i0, j0, i1, j1;
float x, y, s0, s1, t0, t1, dxt0, dyt0;
float dt = 1.0/60.0;
float Nx = res.x -1.0;
float Ny = res.y -1.0;
float i = pixel.x;
float j = pixel.y;
dxt0 = dt ;
dyt0 = dt ;
x = gl_FragCoord.x - dxt0 * (texture2D(velocity, pixel).x );
y = gl_FragCoord.y - dyt0 * (texture2D(velocity, pixel).y );
i0=x-0.5;
i1=x+0.5;
j0=y-0.5;
j1=y+0.5;
s1 = x-i0;
s0 = 1.0-s1;
t1 = y-j0;
t0 = 1.0-t1;
float p1 = (t0 * texture2D(quantity, vec2(i0,j0)/res.xy).r);
float p2 = (t1 * texture2D(quantity, vec2(i0,j1)/res.xy).r);
float p3 = (t0 * texture2D(quantity, vec2(i1,j0)/res.xy).r);
float p4 = (t1 * texture2D(quantity, vec2(i1,j1)/res.xy).r);
float total1 = s0 * (p1 + p2);
float total2 = s1 * (p3 + p4);
gl_FragColor.r = total1 + total2;
p1 = (t0 * texture2D(quantity, vec2(i0,j0)/res.xy).g);
p2 = (t1 * texture2D(quantity, vec2(i0,j1)/res.xy).g);
p3 = (t0 * texture2D(quantity, vec2(i1,j0)/res.xy).g);
p4 = (t1 * texture2D(quantity, vec2(i1,j1)/res.xy).g);
total1 = s0 * (p1 + p2);
total2 = s1 * (p3 + p4);
gl_FragColor.g = total1 + total2;
}
//diffusion shader starts here
uniform vec2 res;//The width and height of our screen
uniform sampler2D x;//Our input texture
uniform sampler2D b;
uniform float alpha;
uniform float rBeta;
void main() {
float xPixel = 1.0/res.x;
float yPixel = 1.0/res.y;
vec2 pixel = gl_FragCoord.xy / res.xy;
gl_FragColor = texture2D( b, pixel );
vec4 leftColor = texture2D(x,vec2(pixel.x-xPixel,pixel.y));
vec4 rightColor = texture2D(x,vec2(pixel.x+xPixel,pixel.y));
vec4 upColor = texture2D(x,vec2(pixel.x,pixel.y-yPixel));
vec4 downColor = texture2D(x,vec2(pixel.x,pixel.y+yPixel));
gl_FragColor.r = (gl_FragColor.r * alpha +leftColor.r + rightColor.r + upColor.r + downColor.r) * rBeta;
gl_FragColor.g = (gl_FragColor.g * alpha +leftColor.g + rightColor.g + upColor.g + downColor.g)* rBeta;
gl_FragColor.b = (gl_FragColor.b * alpha +leftColor.b + rightColor.b + upColor.b + downColor.b)* rBeta;
}
//gradient
uniform vec2 res;//The width and height of our screen
uniform sampler2D velocity;//Our input velocity
uniform sampler2D pressure;//Our input pressure
void main() {
float xPixel = 1.0/res.x;
float yPixel = 1.0/res.y;
vec2 pixel = gl_FragCoord.xy / res.xy;
vec4 leftColor = texture2D(pressure, vec2(pixel.x-xPixel,pixel.y));
vec4 rightColor = texture2D(pressure, vec2(pixel.x+xPixel,pixel.y));
vec4 upColor = texture2D(pressure, vec2(pixel.x,pixel.y-yPixel));
vec4 downColor = texture2D(pressure, vec2(pixel.x,pixel.y+yPixel));
vec2 gradient = xPixel/2.0 * vec2((rightColor.x - leftColor.x), (upColor.y - downColor.y));
//Diffuse equation
gl_FragColor = texture2D(velocity, pixel) ;
gl_FragColor.xy -= gradient;
}
uniform vec2 res;//The width and height of our screen
uniform sampler2D velocity;//Our input texture
void main() {
float xPixel = 1.0/res.x;
float yPixel = 1.0/res.y;
vec2 pixel = gl_FragCoord.xy / res.xy;
vec4 leftColor = texture2D(velocity, vec2(pixel.x-xPixel,pixel.y));
vec4 rightColor = texture2D(velocity, vec2(pixel.x+xPixel,pixel.y));
vec4 upColor = texture2D(velocity, vec2(pixel.x,pixel.y-yPixel));
vec4 downColor = texture2D(velocity, vec2(pixel.x,pixel.y+yPixel));
float div = xPixel/2.0 * ((rightColor.x - leftColor.x) + (upColor.y - downColor.y));
//Diffuse equation
gl_FragColor = vec4(div);
}

GPU Accelerated Harris Corner Detection

I am trying to use GLSL to implement a Harris Corner Detection. But it does not work properly(I am guessing). First off it doesn't detect all corners and it detects alot of points that are not corners, another big problem is that it the threshold is very specific for each image. Maybe that is normal with a Harris detector?
All help is appreciated.
Shader Passes:
1st: Standard passthrough.
2nd: I turn the image into a grayscale image.
3rd: Sobel filtering the image and passing the x, y gradient intensitys and the product of the xy intensitys.
uniform sampler2D texture;
varying vec2 vUV;
void main() {
vec2 uv = vUV;
// Offset used to get access to neighbours
float w = 1.0/800.0;
float h = 1.0/600.0;
vec3 temp;
vec3 sum = vec3(0.0);
// Sobel - Edge Detection
// y gradient
vec3 texel0 = texture2D(texture, uv + vec2(-w, h)).xyz;
vec3 texel1 = texture2D(texture, uv + vec2(-w, 0)).xyz;
vec3 texel2 = texture2D(texture, uv + vec2(-w, -h)).xyz;
vec3 texel6 = texture2D(texture, uv + vec2(w, h)).xyz;
vec3 texel7 = texture2D(texture, uv + vec2(w, 0)).xyz;
vec3 texel8 = texture2D(texture, uv + vec2(w, -h)).xyz;
vec3 vertEdge = 1.0 * texel0 + (2.0*texel1) + 1.0 * texel2 -
(1.0 * texel6 + (2.0*texel7) + 1.0 * texel8);
// x gradient
vec3 texe0 = texture2D(texture, uv + vec2(-w,h)).xyz;
vec3 texe1 = texture2D(texture, uv + vec2(0, h)).xyz;
vec3 texe2 = texture2D(texture, uv + vec2(w, h)).xyz;
vec3 texe6 = texture2D(texture, uv + vec2(-w,-h)).xyz;
vec3 texe7 = texture2D(texture, uv + vec2(0,-h)).xyz;
vec3 texe8 = texture2D(texture, uv + vec2(w,-h)).xyz;
vec3 horizEdge = 1.0 * texe0 + (2.0*texe1) + 1.0 * texe2 -
(1.0 * texe6 + (2.0*texe7) + 1.0 * texe8);
// Gradient intensity values
float iy = (vertEdge.r + vertEdge.g + vertEdge.b) /3.0;
float ix = (horizEdge.r + horizEdge.g + horizEdge.b) /3.0;
// Absolute to get negative values
iy = abs(iy);
ix = abs(ix);
float gradProcduct = ix * iy;
gl_FragColor = vec4(ix,iy,gradProcduct, 0.0);
Not the best looking code - just want it to work for now
4th and 5th: Standard Gaussian Blur
6th: Calculating Harris Response.
If it is a corner i paint that pixel in magenta.
void main() {
vec2 uv = vUV;
float w = 1.0/800.0;
float h = 1.0/600.0;
float threshold = 0.05;
vec4 gradientInfo = texture2D(texture, uv);
/************** Harris Reponse **********************
R is calculated as R = det(M)- K(Trace(M)) which leads to
R = Ix^2*Ix^y - Ixy^2-K(ix^2+iy^2)^2
Ix = X-gradient intesity
Iy = Y-gradient intesity
Ixy = product of the X- and Y-gradient intensities
*********************************************************/
float R = pow(gradientInfo.r,2.0)*pow(gradientInfo.g,2.0)
- pow(gradientInfo.b,2.0)
- threshold * pow((pow(gradientInfo.r,2.0)+pow(gradientInfo.g,2.0)),2.0);
vec4 test;
//if(R > 0.000000000005)
if(R > 0.0000000000750){
// Extremley small values, ugly soloution for now to be able to use R in maxSupress
test = vec4(1.0, 0.0, 1.0, R*1000000000.0);
}
else
test = vec4(vec3(gradientInfo.xyz),0.0);
gl_FragColor = vec4( test);
}
Results
Result on a simple square
Result on a more complex figure with the same R and Threshold
And the result when the response check is mulitplied by a 1000. Doesn't really seem to work.
Below is the code for the maximum supression.
void main() {
vec2 uv = vUV;
float vOffset = 1.0/800.0;
float hOffset = 1.0/600.0;
vec4 neighbourPixels[9];
vec3 result;
int check = 0;
vec3 previous = texture2D(texture2, uv).xyz;
vec4 current = texture2D(texture, uv);
float temp = current.a;
vec4 neighbourArray[25];
if(current.a > 0.0){
for(int i = -2; i<3;i++){
for(int j = -2; j<3;j++){
if(temp < texture2D(texture, vUV.xy+vec2(i,j)*vec2(vOffset,hOffset)).a ){
//result = vec3(1.0,0.0,1.0);
check = 1;
break;
}
}
if(check==1){
break;
}
}
if(check==1){
result = vec3(1.0,0.0,0.0);
}
else{
result = vec3(0.0,1.0,1.0);
}
}
else{
result = previous.xyz;
}
gl_FragColor = vec4( result, 0.0);
}
You need to look for local maxima in the Harris response, rather than just thresholding. Dilate the response with a 3x3 or 5x5 box, then find pixels whether original response and dilated version are equal.

Using CIEdgeWork Filters in iOS

I am using Core Image filters and trying to make the CIEdgeWork filter. When the filter is applied the image turns black. Am I initializing the CIFilter correctly.
CIFilter *edgeWork = [CIFilter filterWithName:#"CIEdgeWork"
keysAndValues:kCIInputImageKey,filterPreviewImage,
#"inputRadius",[NSNumber numberWithFloat:3.0],
nil];
CIEdgeWork is not available in Core Image on iOS as of iOS 5.x, so it's no surprise that you're seeing a black image when trying to use it.
However, you can use the GPUImageSketchFilter or GPUImageThresholdEdgeDetection from my GPUImage framework to pull off this same effect. You can see the result of the first filter in this answer. The latter filter might be closer to the actual effect that Apple supplies via CIEdgeWork, given that they seem to binarize the resulting edge detected image.
Now CIEdgeWork and CILineOverlay available for iOS9
CIEdgeWork
Also you can use CoreImage filter Sobel Sketch, based on GPUImageSketchFilter.
FWKSketchFilter
Kernel of it:
kernel vec4 sketch(sampler image, float strength){
vec2 d = destCoord();
vec2 bottomLeftTextureCoordinate = samplerTransform(image, d + vec2(-1.0, -1.0));
vec2 topRightTextureCoordinate = samplerTransform(image, d + vec2(1.0, 1.0));
vec2 topLeftTextureCoordinate = samplerTransform(image, d + vec2(-1.0, 1.0));
vec2 bottomRightTextureCoordinate = samplerTransform(image, d + vec2(1.0, -1.0));
vec2 leftTextureCoordinate = samplerTransform(image, d + vec2(-1.0, 0.0));
vec2 rightTextureCoordinate = samplerTransform(image, d + vec2(1.0, 0.0));
vec2 bottomTextureCoordinate = samplerTransform(image, d + vec2(0.0, -1.0));
vec2 topTextureCoordinate = samplerTransform(image, d + vec2(0.0, 1.0));
float bottomLeftIntensity = sample(image, bottomLeftTextureCoordinate).r;
float topRightIntensity = sample(image, topRightTextureCoordinate).r;
float topLeftIntensity = sample(image, topLeftTextureCoordinate).r;
float bottomRightIntensity = sample(image, bottomRightTextureCoordinate).r;
float leftIntensity = sample(image, leftTextureCoordinate).r;
float rightIntensity = sample(image, rightTextureCoordinate).r;
float bottomIntensity = sample(image, bottomTextureCoordinate).r;
float topIntensity = sample(image, topTextureCoordinate).r;
float h = -topLeftIntensity - 2.0 * topIntensity - topRightIntensity + bottomLeftIntensity + 2.0 * bottomIntensity + bottomRightIntensity;
float v = -bottomLeftIntensity - 2.0 * leftIntensity - topLeftIntensity + bottomRightIntensity + 2.0 * rightIntensity + topRightIntensity;
float mag = 1.0 - (length(vec2(h, v))*strength);
return vec4(vec3(mag), 1.0);}

Resources