I'd like to do a simple raytracer to display a shaded sphere using the fragment shader.
I did the current code to at least display a circle, but this does not display anything. I assume the maths to be correct since it is a simple quadratic formula :
struct Sphere
{
vec3 center;
float radius;
};
struct Light
{
vec3 pos;
vec3 color;
float intensity;
};
struct Ray
{
vec3 orig;
vec3 dir;
};
bool quadratic(float a, float b, float c, out float s1, out float s2)
{
float delta = (b*b) - (4.0*a*c);
if(delta < 0.0)
{
return false;
}
if(delta == 0.0)
{
s1 = s2 = (-b / (2.0*a));
return true;
}
else
{
s1 = (-b-sqrt(delta))/(2.0*a);
s2 = (-b+sqrt(delta))/(2.0*a);
return true;
}
}
bool iSphere(Ray r, Sphere s, out float t)
{
vec3 l = r.orig - s.center;
float a = dot(r.dir, r.dir);
float b = 2.0*dot(r.dir,l);
float c = dot(l,l) - (s.radius*s.radius);
float s1, s2;
if(quadratic(a,b,c,s1,s2) == true)
{
t = min(s1,s2);
return true;
}
return false;
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
vec2 uv = fragCoord.xy / iResolution.xy;
/////////////////////////////////////////
/////////DECLARE SCENE///////////////////
/////////////////////////////////////////
Sphere s;
s.center = vec3(0.0, 0.0, -3.0);
s.radius = 1.0;
Light l;
l.pos = vec3(0.0, 5.0, -3.0);
l.color = vec3(1.0, 1.0, 1.0);
l.intensity = 2.0;
/////////////////////////////////////////
////////////CAST THE RAY/////////////////
/////////////////////////////////////////
Ray r;
r.orig = vec3(0.0, 2.0, -3.0);
r.dir = vec3(-1.0+2.0*uv, -1.0);
/////////////////////////////////////////
////////////COMPUTE INTERSECTION/////////
/////////////////////////////////////////
float t;
if(iSphere(r,s,t) == true)
{
fragColor = vec4(1,0,0,1);
}
else
{
fragColor = vec4(1,1,0,1);
}
}
I'm having a hard time to get why this is not working...
Any ideas ?
this is not an good answer :)
your intersection test is valid
I have changed the origin from negative to positive and i can see a sphere
r.orig = vec3(0.0, 2.0, 3.0);
P=o + t*dir
Im also learning raytracing. if you don't mind to share your shadertoy account. You leave a comment, something like that.
I can follow your progression, and we learn together.
here is the shadertoy (i did some code clean up, we are a good team)
Related
I've been porting Shadertoy shaders to Metal in order to learn how to write Metal shaders. I don't think I'm doing it correctly as I have been writing every one of my shaders as a compute shader, rather than vertex/fragment shaders. This has worked for quite a few shaders I've ported, almost 20. However some ports are extremely slow, and others include functions that aren't available.
Here is one of the shaders that is tripping me up:
https://www.shadertoy.com/view/4t2SRh
The fwidth() call in render() and mainImage() is not allowed within a metal compute shader. Metal Shader Language does however have fwidth(), but it can only be called within a fragment shader.
Here is my attempt at porting to a compute shader:
#include <metal_stdlib>
using namespace metal;
float float_mod(float f1, float f2) {
return f1-f2 * floor(f1/f2);
}
float sdfCircle(float2 center, float radius, float2 coord )
{
float2 offset = coord - center;
return sqrt((offset.x * offset.x) + (offset.y * offset.y)) - radius;
}
float sdfEllipse(float2 center, float a, float b, float2 coord)
{
float a2 = a * a;
float b2 = b * b;
return (b2 * (coord.x - center.x) * (coord.x - center.x) +
a2 * (coord.y - center.y) * (coord.y - center.y) - a2 * b2)/(a2 * b2);
}
float sdfLine(float2 p0, float2 p1, float width, float2 coord)
{
float2 dir0 = p1 - p0;
float2 dir1 = coord - p0;
float h = clamp(dot(dir0, dir1)/dot(dir0, dir0), 0.0, 1.0);
return (length(dir1 - dir0 * h) - width * 0.5);
}
float sdfUnion( const float a, const float b )
{
return min(a, b);
}
float sdfDifference( const float a, const float b)
{
return max(a, -b);
}
float sdfIntersection( const float a, const float b )
{
return max(a, b);
}
float anti(float d) {
return fwidth(d) * 1.0;
}
float4 render(float d, float3 color, float stroke)
{
//stroke = fwidth(d) * 2.0;
float anti = fwidth(d) * 1.0;
float4 strokeLayer = float4(float3(0.05), 1.0-smoothstep(-anti, anti, d - stroke));
float4 colorLayer = float4(color, 1.0-smoothstep(-anti, anti, d));
if (stroke < 0.000001) {
return colorLayer;
}
return float4(mix(strokeLayer.rgb, colorLayer.rgb, colorLayer.a), strokeLayer.a);
}
kernel void compute(texture2d<float, access::write> output [[texture(0)]],
texture2d<float, access::sample> input [[texture(1)]],
constant float &timer [[buffer(0)]],
uint2 gid [[thread_position_in_grid]])
{
float4 fragColor;
int width = output.get_width();
int height = output.get_height();
float2 resolution = float2(width,height);
float2 uv = float2(gid) / resolution;
float size = min(resolution.x, resolution.y);
float pixSize = 1.0 / size;
float stroke = pixSize * 1.5;
float2 center = float2(0.5, 0.5 * resolution.y/resolution.x);
float a = sdfEllipse(float2(0.5, center.y*2.0-0.34), 0.25, 0.25, uv);
float b = sdfEllipse(float2(0.5, center.y*2.0+0.03), 0.8, 0.35, uv);
b = sdfIntersection(a, b);
float4 layer1 = render(b, float3(0.32, 0.56, 0.53), fwidth(b) * 2.0);
// Draw strips
float4 layer2 = layer1;
float t, r0, r1, r2, e, f;
float2 sinuv = float2(uv.x, (sin(uv.x*40.0)*0.02 + 1.0)*uv.y);
for (float i = 0.0; i < 10.0; i++) {
t = float_mod(timer + 0.3 * i, 3.0) * 0.2;
r0 = (t - 0.15) / 0.2 * 0.9 + 0.1;
r1 = (t - 0.15) / 0.2 * 0.1 + 0.9;
r2 = (t - 0.15) / 0.2 * 0.15 + 0.85;
e = sdfEllipse(float2(0.5, center.y*2.0+0.37-t*r2), 0.7*r0, 0.35*r1, sinuv);
f = sdfEllipse(float2(0.5, center.y*2.0+0.41-t), 0.7*r0, 0.35*r1, sinuv);
f = sdfDifference(e, f);
f = sdfIntersection(f, b);
float4 layer = render(f, float3(1.0, 0.81, 0.27), 0.0);
layer2 = mix(layer2, layer, layer.a);
}
// Draw the handle
float bottom = 0.08;
float handleWidth = 0.01;
float handleRadius = 0.04;
float d = sdfCircle(float2(0.5-handleRadius+0.5*handleWidth, bottom), handleRadius, uv);
float c = sdfCircle(float2(0.5-handleRadius+0.5*handleWidth, bottom), handleRadius-handleWidth, uv);
d = sdfDifference(d, c);
c = uv.y - bottom;
d = sdfIntersection(d, c);
c = sdfLine(float2(0.5, center.y*2.0-0.05), float2(0.5, bottom), handleWidth, uv);
d = sdfUnion(d, c);
c = sdfCircle(float2(0.5, center.y*2.0-0.05), 0.01, uv);
d = sdfUnion(c, d);
c = sdfCircle(float2(0.5-handleRadius*2.0+handleWidth, bottom), handleWidth*0.5, uv);
d = sdfUnion(c, d);
float4 layer0 = render(d, float3(0.404, 0.298, 0.278), stroke);
float2 p = (2.0*float2(gid).xy-resolution.xy)/min(resolution.y,resolution.x);
float3 bcol = float3(1.0,0.8,0.7-0.07*p.y)*(1.0-0.25*length(p));
fragColor = float4(bcol, 1.0);
fragColor.rgb = mix(fragColor.rgb, layer0.rgb, layer0.a);
fragColor.rgb = mix(fragColor.rgb, layer1.rgb, layer1.a);
fragColor.rgb = mix(fragColor.rgb, layer2.rgb, layer2.a);
fragColor.rgb = pow(fragColor.rgb, float3(1.0/2.2));
output.write(fragColor,gid);
}
This doesn't compile, as fwidth() is not available. However, if I do get rid of fwidth(), it will compile... but of course not draw the right thing.
I was wondering if there is a better way to port this to a fragment/vertex shader, so that I can use MSL's fwidth() ? Or is writing it as a compute shader fine, and I should find a different way around using fwidth() ?
I am a beginner with webgl programming, and still have more to learn.
I have found an existing fragment shader that contains this function:
float shadow(vec3 origin, vec3 ray) {
float tSphere0 = intersectSphere(origin, ray, sphereCenter0, sphereRadius0);
if(tSphere0 < 1.0) return 0.0;
float tSphere1 = intersectSphere(origin, ray, sphereCenter1, sphereRadius1);
if(tSphere1 < 1.0) return 0.0;
float tSphere2 = intersectSphere(origin, ray, sphereCenter2, sphereRadius2);
if(tSphere2 < 1.0) return 0.0;
float tSphere3 = intersectSphere(origin, ray, sphereCenter3, sphereRadius3);
if(tSphere3 < 1.0) return 0.0;
return 1.0;
}
My questions are: Will that function always return 1.0?
What does that function mean?
This is the function of intersectSphere
float intersectSphere(vec3 origin, vec3 ray, vec3 sphereCenter, float sphereRadius) {
vec3 toSphere = origin - sphereCenter;
float a = dot(ray, ray);
float b = 2.0 * dot(toSphere, ray);
float c = dot(toSphere, toSphere) - sphereRadius*sphereRadius;
float discriminant = b*b - 4.0*a*c;
if(discriminant > 0.0) {
float t = (-b - sqrt(discriminant)) / (2.0 * a);
if(t > 0.0) return t; }
return 10000.0;
}
I'm trying to create a displacement map cikernel for iOS 8 that shifts the pixels horizontally from the map R channel and vertically from the G channel.
The map pixel coordinates must be picked relative to the source image size mapPixel = ((dest.x/source.width) * map.width, (dest.y / source.height) * map.height)
The input image size that I test with is 2048 x 2048
and the map is red-green perlin noise 2560 x 2560
In Quartz Composer the cikernel works almost as expected, except that the map is not applied to the whole image
kernel vec4 coreImageKernel(sampler image, sampler displaceMap, float scaleX, float scaleY)
{
vec2 destination = destCoord();
vec2 imageSize = samplerSize(image);
float xPercent = destination.x / imageSize.x;
float yPercent = destination.y / imageSize.y;
vec2 mapSize = samplerSize(displaceMap);
vec2 mapCoord = vec2(mapSize.x * xPercent, mapSize.y * yPercent);
vec4 mapPixel = sample(displaceMap, mapCoord);
float ratioShiftX = ((mapPixel.x) * 2.0) - 1.0;
float ratioShiftY = ((mapPixel.y) * 2.0) - 1.0;
vec2 pixelShift = vec2(ratioShiftX * scaleX, ratioShiftY * scaleY);
return sample(image, destination - pixelShift);
}
Here's what the filter function looks like:
function __image main(__image image, __image displaceMap, __number scaleX, __number scaleY) {
return coreImageKernel.apply(image.definition, null, image, displaceMap, scaleX, scaleY);
}
But when I load the cikernel in CIFilter the result is far from what I see in Quartz Composer.
Here's what my apply function looks like in the CIFilter
override var outputImage:CIImage? {
if let inputImage = inputImage {
if let inputMap = inputMap {
let args = [inputImage as AnyObject, inputMap as AnyObject, inputScaleX, inputScaleY]
return CIDisplacementMapFilter.kernel?.applyWithExtent(inputImage.extent, roiCallback: {
(index, rect) in
if index == 0 {
return rect
}
return CGRectInfinite
}, arguments: args)
}
}
return nil
}
I'm guessing the ROI is wrong and the sampler is tiled, but I can't figure it out.
As it turns out the kernel was wrong.
Here's a kernel that does the job
kernel vec4 displace(sampler source, sampler map, float scaleX, float scaleY)
{
vec2 d = destCoord();
vec4 mapPixel = sample(map, samplerTransform(map, d));
float shiftX = ((mapPixel.x * 2.0) - 1.0) * scaleX;
float shiftY = ((mapPixel.y * 2.0) - 1.0) * scaleY;
vec2 s = samplerTransform(source, d + vec2(shiftX, shiftY));
return sample(source, s);
}
This is the same code for Metal
#include <metal_stdlib>
using namespace metal;
#include <CoreImage/CoreImage.h>
extern "C" {
namespace coreimage {
float4 displaceFilterKernel(sampler source, sampler map, float scaleX, float scaleY)
{
float2 d = map.coord();
float4 mapPixel = map.sample(d);
float shiftX = ((mapPixel.x * 2.0) - 1.0) * scaleX;
float shiftY = ((mapPixel.y * 2.0) - 1.0) * scaleY;
float2 s = float2(d.x, 1.0 - d.y) + float2(shiftX, shiftY);
return sample(source, s);
}
}
}
I'm currently trying to get this bokeh shader to work with GPUImage: http://blenderartists.org/forum/showthread.php?237488-GLSL-depth-of-field-with-bokeh-v2-4-(update)
This is what I've got at the moment:
precision mediump float;
varying highp vec2 textureCoordinate;
varying highp vec2 textureCoordinate2;
uniform sampler2D inputImageTexture;
uniform sampler2D inputImageTexture2;
uniform float inputImageTextureWidth;
uniform float inputImageTextureHeight;
#define PI 3.14159265
float width = inputImageTextureWidth; //texture width
float height = inputImageTextureHeight; //texture height
vec2 texel = vec2(1.0/width,1.0/height);
//uniform variables from external script
uniform float focalDepth; //focal distance value in meters, but you may use autofocus option below
uniform float focalLength; //focal length in mm
uniform float fstop; //f-stop value
bool showFocus = false; //show debug focus point and focal range (red = focal point, green = focal range)
float znear = 0.1; //camera clipping start
float zfar = 5.0; //camera clipping end
//------------------------------------------
//user variables
int samples = 3; //samples on the first ring
int rings = 3; //ring count
bool manualdof = false; //manual dof calculation
float ndofstart = 1.0; //near dof blur start
float ndofdist = 2.0; //near dof blur falloff distance
float fdofstart = 1.0; //far dof blur start
float fdofdist = 3.0; //far dof blur falloff distance
float CoC = 0.03;//circle of confusion size in mm (35mm film = 0.03mm)
bool vignetting = false; //use optical lens vignetting?
float vignout = 1.3; //vignetting outer border
float vignin = 0.0; //vignetting inner border
float vignfade = 22.0; //f-stops till vignete fades
bool autofocus = false; //use autofocus in shader? disable if you use external focalDepth value
vec2 focus = vec2(0.5, 0.5); // autofocus point on screen (0.0,0.0 - left lower corner, 1.0,1.0 - upper right)
float maxblur = 1.0; //clamp value of max blur (0.0 = no blur,1.0 default)
float threshold = 0.5; //highlight threshold;
float gain = 2.0; //highlight gain;
float bias = 0.5; //bokeh edge bias
float fringe = 0.7; //bokeh chromatic aberration/fringing
bool noise = false; //use noise instead of pattern for sample dithering
float namount = 0.0001; //dither amount
bool depthblur = false; //blur the depth buffer?
float dbsize = 1.25; //depthblursize
/*
next part is experimental
not looking good with small sample and ring count
looks okay starting from samples = 4, rings = 4
*/
bool pentagon = false; //use pentagon as bokeh shape?
float feather = 0.4; //pentagon shape feather
//------------------------------------------
float penta(vec2 coords) //pentagonal shape
{
float scale = float(rings) - 1.3;
vec4 HS0 = vec4( 1.0, 0.0, 0.0, 1.0);
vec4 HS1 = vec4( 0.309016994, 0.951056516, 0.0, 1.0);
vec4 HS2 = vec4(-0.809016994, 0.587785252, 0.0, 1.0);
vec4 HS3 = vec4(-0.809016994,-0.587785252, 0.0, 1.0);
vec4 HS4 = vec4( 0.309016994,-0.951056516, 0.0, 1.0);
vec4 HS5 = vec4( 0.0 ,0.0 , 1.0, 1.0);
vec4 one = vec4( 1.0 );
vec4 P = vec4((coords),vec2(scale, scale));
vec4 dist = vec4(0.0);
float inorout = -4.0;
dist.x = dot( P, HS0 );
dist.y = dot( P, HS1 );
dist.z = dot( P, HS2 );
dist.w = dot( P, HS3 );
dist = smoothstep( -feather, feather, dist );
inorout += dot( dist, one );
dist.x = dot( P, HS4 );
dist.y = HS5.w - abs( P.z );
dist = smoothstep( -feather, feather, dist );
inorout += dist.x;
return clamp( inorout, 0.0, 1.0 );
}
float bdepth(vec2 coords) //blurring depth
{
float d = 0.0;
float kernel[9];
vec2 offset[9];
vec2 wh = vec2(texel.x, texel.y) * dbsize;
offset[0] = vec2(-wh.x,-wh.y);
offset[1] = vec2( 0.0, -wh.y);
offset[2] = vec2( wh.x -wh.y);
offset[3] = vec2(-wh.x, 0.0);
offset[4] = vec2( 0.0, 0.0);
offset[5] = vec2( wh.x, 0.0);
offset[6] = vec2(-wh.x, wh.y);
offset[7] = vec2( 0.0, wh.y);
offset[8] = vec2( wh.x, wh.y);
kernel[0] = 1.0/16.0; kernel[1] = 2.0/16.0; kernel[2] = 1.0/16.0;
kernel[3] = 2.0/16.0; kernel[4] = 4.0/16.0; kernel[5] = 2.0/16.0;
kernel[6] = 1.0/16.0; kernel[7] = 2.0/16.0; kernel[8] = 1.0/16.0;
for( int i=0; i<9; i++ )
{
float tmp = texture2D(inputImageTexture2, coords + offset[i]).r;
d += tmp * kernel[i];
}
return d;
}
vec3 color(vec2 coords,float blur) //processing the sample
{
vec3 col = vec3(0.0);
col.r = texture2D(inputImageTexture, coords + vec2(0.0,1.0)*texel*fringe*blur).r;
col.g = texture2D(inputImageTexture, coords + vec2(-0.866,-0.5)*texel*fringe*blur).g;
col.b = texture2D(inputImageTexture, coords + vec2(0.866,-0.5)*texel*fringe*blur).b;
vec3 lumcoeff = vec3(0.299,0.587,0.114);
float lum = dot(col.rgb, lumcoeff);
float thresh = max((lum-threshold)*gain, 0.0);
return col+mix(vec3(0.0),col,thresh*blur);
}
vec2 rand(vec2 coord) //generating noise/pattern texture for dithering
{
float noiseX = ((fract(1.0-coord.s*(width/2.0))*0.25)+(fract(coord.t*(height/2.0))*0.75))*2.0-1.0;
float noiseY = ((fract(1.0-coord.s*(width/2.0))*0.75)+(fract(coord.t*(height/2.0))*0.25))*2.0-1.0;
if (noise)
{
noiseX = clamp(fract(sin(dot(coord ,vec2(12.9898,78.233))) * 43758.5453),0.0,1.0)*2.0-1.0;
noiseY = clamp(fract(sin(dot(coord ,vec2(12.9898,78.233)*2.0)) * 43758.5453),0.0,1.0)*2.0-1.0;
}
return vec2(noiseX,noiseY);
}
vec3 debugFocus(vec3 col, float blur, float depth)
{
float edge = 0.002*depth; //distance based edge smoothing
float m = clamp(smoothstep(0.0,edge,blur),0.0,1.0);
float e = clamp(smoothstep(1.0-edge,1.0,blur),0.0,1.0);
col = mix(col,vec3(1.0,1.0,0.0),(1.0-m)*0.6);
col = mix(col,vec3(0.0,1.0,1.0),((1.0-e)-(1.0-m))*0.2);
return col;
}
float linearize(float depth)
{
return -zfar * znear / (depth * (zfar - znear) - zfar);
}
float vignette()
{
float dist = distance(textureCoordinate.xy, vec2(0.5,0.5));
dist = smoothstep(vignout+(fstop/vignfade), vignin+(fstop/vignfade), dist);
return clamp(dist,0.0,1.0);
}
void main()
{
//scene depth calculation
float depth = linearize(texture2D(inputImageTexture2, textureCoordinate2.xy).x);
if (depthblur)
{
depth = linearize(bdepth(textureCoordinate2.xy));
}
//focal plane calculation
float fDepth = focalDepth;
if (autofocus)
{
fDepth = linearize(texture2D(inputImageTexture2, focus).x);
}
//dof blur factor calculation
float blur = 0.0;
if (manualdof)
{
float a = depth-fDepth; //focal plane
float b = (a-fdofstart)/fdofdist; //far DoF
float c = (-a-ndofstart)/ndofdist; //near Dof
blur = (a>0.0)?b:c;
}
else
{
float f = focalLength; //focal length in mm
float d = fDepth*1000.0; //focal plane in mm
float o = depth*1000.0; //depth in mm
float a = (o*f)/(o-f);
float b = (d*f)/(d-f);
float c = (d-f)/(d*fstop*CoC);
blur = abs(a-b)*c;
}
blur = clamp(blur,0.0,1.0);
// calculation of pattern for ditering
vec2 noise = rand(textureCoordinate.xy)*namount*blur;
// getting blur x and y step factor
float w = (1.0/width)*blur*maxblur+noise.x;
float h = (1.0/height)*blur*maxblur+noise.y;
// calculation of final color
vec3 col = vec3(0.0);
if(blur < 0.05) //some optimization thingy
{
col = texture2D(inputImageTexture, textureCoordinate.xy).rgb;
}
else
{
col = texture2D(inputImageTexture, textureCoordinate.xy).rgb;
float s = 1.0;
int ringsamples;
for (int i = 1; i <= rings; i += 1)
{
ringsamples = i * samples;
for (int j = 0 ; j < ringsamples ; j += 1)
{
float step = PI*2.0 / float(ringsamples);
float pw = (cos(float(j)*step)*float(i));
float ph = (sin(float(j)*step)*float(i));
float p = 1.0;
if (pentagon)
{
p = penta(vec2(pw,ph));
}
col += color(textureCoordinate.xy + vec2(pw*w,ph*h),blur)*mix(1.0,(float(i))/(float(rings)),bias)*p;
s += 1.0*mix(1.0,(float(i))/(float(rings)),bias)*p;
}
}
col /= s; //divide by sample count
}
if (showFocus)
{
col = debugFocus(col, blur, depth);
}
if (vignetting)
{
col *= vignette();
}
gl_FragColor.rgb = col;
gl_FragColor.a = 1.0;
}
This is my bokeh filter, a subclass of GPUImageTwoInputFilter:
#implementation GPUImageBokehFilter
- (id)init;
{
NSString *fragmentShaderPathname = [[NSBundle mainBundle] pathForResource:#"BokehShader" ofType:#"fsh"];
NSString *fragmentShaderString = [NSString stringWithContentsOfFile:fragmentShaderPathname encoding:NSUTF8StringEncoding error:nil];
if (!(self = [super initWithFragmentShaderFromString:fragmentShaderString]))
{
return nil;
}
focalDepthUniform = [filterProgram uniformIndex:#"focalDepth"];
focalLengthUniform = [filterProgram uniformIndex:#"focalLength"];
fStopUniform = [filterProgram uniformIndex:#"fstop"];
[self setFocalDepth:1.0];
[self setFocalLength:35.0];
[self setFStop:2.2];
return self;
}
#pragma mark -
#pragma mark Accessors
- (void)setFocalDepth:(float)focalDepth {
_focalDepth = focalDepth;
[self setFloat:_focalDepth forUniform:focalDepthUniform program:filterProgram];
}
- (void)setFocalLength:(float)focalLength {
_focalLength = focalLength;
[self setFloat:_focalLength forUniform:focalLengthUniform program:filterProgram];
}
- (void)setFStop:(CGFloat)fStop {
_fStop = fStop;
[self setFloat:_fStop forUniform:fStopUniform program:filterProgram];
}
#end
And finally, this is how I use said filter:
#implementation ViewController {
GPUImageBokehFilter *bokehFilter;
GPUImagePicture *bokehMap;
UIImage *inputImage;
}
- (void)viewDidLoad
{
[super viewDidLoad];
inputImage = [UIImage imageNamed:#"stones"];
bokehMap = [[GPUImagePicture alloc] initWithImage:[UIImage imageNamed:#"bokehmask"]];
_backgroundImage.image = inputImage;
bokehFilter = [[GPUImageBokehFilter alloc] init];
[self processImage];
}
- (IBAction)dataInputUpdated:(id)sender {
[self processImage];
}
- (void *)processImage {
dispatch_async(dispatch_get_global_queue( DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
GPUImagePicture *gpuPicture = [[GPUImagePicture alloc] initWithImage:inputImage];
[gpuPicture addTarget:bokehFilter];
[gpuPicture processImage];
[bokehMap addTarget:bokehFilter];
[bokehMap processImage];
[bokehFilter useNextFrameForImageCapture];
[bokehFilter setFloat:inputImage.size.width forUniformName:#"inputImageTextureWidth"];
[bokehFilter setFloat:inputImage.size.height forUniformName:#"inputImageTextureHeight"];
UIImage *blurredImage = [bokehFilter imageFromCurrentFramebuffer];
dispatch_async(dispatch_get_main_queue(), ^{
[self displayNewImage:blurredImage];
});
});
}
- (void)displayNewImage:(UIImage*)newImage {
[UIView transitionWithView:_backgroundImage
duration:.6f
options:UIViewAnimationOptionTransitionCrossDissolve
animations:^{
_backgroundImage.image = newImage;
} completion:nil];
}
...
The first image is the one I'm trying to blur, the second one is a random gradient to test the shader's depth map thingy:
When I start the app on my iPhone, I get this:
After moving the slider (which triggers the dataInputChanged method), I get this:
While that admittedly looks much better than the first image, I still have some problems with this:
There's a diagonal noisy line (inside the red lines I put on the picture) that appears to be unblurred.
The top left of the image is blurry, even though it shouldn't be.
Why do I get this weird behavior? Shouldn't the shader output be the same every time?
Also, how do I get it to respect the depth map? My GLSL shader knowledge is very limited, so please be patient.
The diagonal artifact appears to be caused by your test gradient. You can see that it occurs at about the same place as where your gradient goes to completely white. Try spreading out the gradient so it only reaches 1.0 or 0.0 at the very corners of the image.
It's a pretty big question, and I can't make a full answer because I would really need to test the thing out.
But a few points: The final image that you put up is hard to work with. Because the image has been upscaled so much, I can't tell if it's actually blurred or if it just appears blurry because of the resolution. Regardless, the amount of blur that you're getting (when compared to the original link that you provided) suggests that something isn't working with the shader.
Another thing that concerns me is the //some optimization thingy comment that you've got in there. This is the sort of thing that's going to be responsible for an ugly line in your final output. Saying that you wont have any blur under blur < 0.05 isn't necessarily something that you can do! I would be expecting a nasty artifact as the shader transitions from the blur shader and into the 'optimized' part.
Hope that sheds some light, and good luck!
Have you tried enabling showFocus? This should show the focal point in red and the focal range in green which should help with debugging. You could also try enabling autofocus to ensure that the centre of the image is in focus, because at the moment it's not obvious which distance should be in focus, due to the linearize function changing coordinate systems. After that try tweaking fstop to get the desired amount of blur. You will probably also find that you will need greater than samples = 3 and rings = 3 to produce a smooth bokeh effect.
Your answers helped me get on the right track, and after a few hours of fiddling around with my code and the shader, I managed to get all bugs fixed. Here's what caused them and how I fixed them:
The ugly diagonal line was caused by the linearize() method, so I removed it and made the shader use the RGB values (or, to be more precise: only the R value) from the depth map without processing them first.
The blue-ish image I got from the shader was caused by my own incompetence. These two lines had to be put before the calls to processImage:
[bokehFilter setFloat:inputImage.size.width forUniformName:#"inputImageTextureWidth"];
[bokehFilter setFloat:inputImage.size.height forUniformName:#"inputImageTextureHeight"];
In hindsight, it's obvious why I only got results the second time I used the shader. After fixing those bugs, I went on to optimize it a bit to keep the execution time as low as possible, and now I can tell it to render 8 samples/4 rings and it does so in less than a second. Here's what that looks like:
Thanks for the answers, everyone, I probably wouldn't have gotten those bugs fixed without you.
Using the amazing GPU image framework, I'm trying to create a custom filter using a custom fragment shader that passed some color vectors as uniforms, elaborate each fragment substituting a choosen color with one in the uniform. I made that using Quartz and it works, but since I'm moving my first step in OpenGL world using this framework, I'd like to give a try to the GPU processing.
The fragment shader I made seems to work fine, but there is a problem in the output. I post just a sample for debugging porpoise
varying highp vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
bool compareVectors (lowp vec3 x,lowp vec3 y){
bool result;
if (x.r != y.r) {
return result = false;
}
if (x.b != y.b) {
return result = false;
}
if (x.g != y.g ) {
return result = false;
}
return result = true;
}
void main()
{
lowp vec3 tc = vec3(0.0, 0.0, 0.0);
lowp vec4 pixcol = texture2D(inputImageTexture, textureCoordinate).rgba;
lowp vec3 sampleColors[3];
lowp vec3 newColors[3];
sampleColors[0] = vec3(0.2, 0.2, 0.2);
sampleColors[1] = vec3(0.0, 0.0, 0.0);
sampleColors[2] = vec3(1.0, 1.0, 1.0);
newColors[0] = vec3(0.4, 0.4, 1.0);
newColors[1] = vec3(0.3, 0.4, 1.0);
newColors[2] = vec3(0.6, 0.7, 0.5);
if (pixcol.a >= 0.2) {
if (compareVectors (sampleColors[0],pixcol.rgb))
tc = newColors[0];
else if (compareVectors (sampleColors[1],pixcol.rgb))
tc = newColors[1];
else if (compareVectors (sampleColors[2],pixcol.rgb))
tc = newColors[2];
else
tc = pixcol.rgb;
}
else
tc = pixcol.rgb;
gl_FragColor = vec4(tc.rgb, pixcol.a);
}
The resulting image has a lot of artifacts. It seems pixellate on the screen and not well created if written to disk. Here are some screen.
The first image is the starting image, the second is a screenshot of the filtered mage on iphone screen, the third is the filtered image written to disk.
Digging into that I remembered that texel and pixel aren't the same thing, so probably I'm not mapping them correctly. I'd like to have a 1:1 position ratio and probably is not happening. How can I achieve that? Thanks, Andrea
As stated in the comment by Brad, the solution was to simply confront the texel color in a range of values. This is due to the float precision (I'm feeling stupid writing it right now, it was pretty obvious). The starting image was with fixed controlled color, but since the original image is sampled probably the information is not equal from the starting image.
Here is the correct fragment shader:
varying highp vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
bool compareVectors (lowp vec3 sample,lowp vec3 texel){
bool result;
if ( abs(texel.r-sample.r) > 0.1 ) {
return result = false;
}
if ( abs(texel.g-sample.g) > 0.1 ) {
return result = false;
}
if ( abs(texel.b-sample.b) > 0.1 ) {
return result = false;
}
return result = true;
}
void main()
{
lowp vec3 tc = vec3(0.0, 0.0, 0.0);
lowp vec4 pixcol = texture2D(inputImageTexture, textureCoordinate).rgba;
lowp vec3 sampleColors[3];
lowp vec3 newColors[3];
sampleColors[0] = vec3(0.5, 0.5, 0.5);
sampleColors[1] = vec3(0.0, 0.0, 0.0);
sampleColors[2] = vec3(1.0, 1.0, 1.0);
newColors[0] = vec3(0.4, 0.4, 1.0);
newColors[1] = vec3(0.3, 0.4, 1.0);
newColors[2] = vec3(0.6, 0.7, 0.5);
if (pixcol.a >= 0.2) {
if (compareVectors (sampleColors[0],pixcol.rgb))
tc = newColors[0];
else if (compareVectors (sampleColors[1],pixcol.rgb))
tc = newColors[1];
else if (compareVectors (sampleColors[2],pixcol.rgb))
tc = newColors[2];
else
tc = pixcol.rgb;
}
else
tc = pixcol.rgb;
gl_FragColor = vec4(tc.rgb, pixcol.a);
}
I'd like to thanks Brad that found the answer. Hope this helps.