I am trying to use GLSL to implement a Harris Corner Detection. But it does not work properly(I am guessing). First off it doesn't detect all corners and it detects alot of points that are not corners, another big problem is that it the threshold is very specific for each image. Maybe that is normal with a Harris detector?
All help is appreciated.
Shader Passes:
1st: Standard passthrough.
2nd: I turn the image into a grayscale image.
3rd: Sobel filtering the image and passing the x, y gradient intensitys and the product of the xy intensitys.
uniform sampler2D texture;
varying vec2 vUV;
void main() {
vec2 uv = vUV;
// Offset used to get access to neighbours
float w = 1.0/800.0;
float h = 1.0/600.0;
vec3 temp;
vec3 sum = vec3(0.0);
// Sobel - Edge Detection
// y gradient
vec3 texel0 = texture2D(texture, uv + vec2(-w, h)).xyz;
vec3 texel1 = texture2D(texture, uv + vec2(-w, 0)).xyz;
vec3 texel2 = texture2D(texture, uv + vec2(-w, -h)).xyz;
vec3 texel6 = texture2D(texture, uv + vec2(w, h)).xyz;
vec3 texel7 = texture2D(texture, uv + vec2(w, 0)).xyz;
vec3 texel8 = texture2D(texture, uv + vec2(w, -h)).xyz;
vec3 vertEdge = 1.0 * texel0 + (2.0*texel1) + 1.0 * texel2 -
(1.0 * texel6 + (2.0*texel7) + 1.0 * texel8);
// x gradient
vec3 texe0 = texture2D(texture, uv + vec2(-w,h)).xyz;
vec3 texe1 = texture2D(texture, uv + vec2(0, h)).xyz;
vec3 texe2 = texture2D(texture, uv + vec2(w, h)).xyz;
vec3 texe6 = texture2D(texture, uv + vec2(-w,-h)).xyz;
vec3 texe7 = texture2D(texture, uv + vec2(0,-h)).xyz;
vec3 texe8 = texture2D(texture, uv + vec2(w,-h)).xyz;
vec3 horizEdge = 1.0 * texe0 + (2.0*texe1) + 1.0 * texe2 -
(1.0 * texe6 + (2.0*texe7) + 1.0 * texe8);
// Gradient intensity values
float iy = (vertEdge.r + vertEdge.g + vertEdge.b) /3.0;
float ix = (horizEdge.r + horizEdge.g + horizEdge.b) /3.0;
// Absolute to get negative values
iy = abs(iy);
ix = abs(ix);
float gradProcduct = ix * iy;
gl_FragColor = vec4(ix,iy,gradProcduct, 0.0);
Not the best looking code - just want it to work for now
4th and 5th: Standard Gaussian Blur
6th: Calculating Harris Response.
If it is a corner i paint that pixel in magenta.
void main() {
vec2 uv = vUV;
float w = 1.0/800.0;
float h = 1.0/600.0;
float threshold = 0.05;
vec4 gradientInfo = texture2D(texture, uv);
/************** Harris Reponse **********************
R is calculated as R = det(M)- K(Trace(M)) which leads to
R = Ix^2*Ix^y - Ixy^2-K(ix^2+iy^2)^2
Ix = X-gradient intesity
Iy = Y-gradient intesity
Ixy = product of the X- and Y-gradient intensities
*********************************************************/
float R = pow(gradientInfo.r,2.0)*pow(gradientInfo.g,2.0)
- pow(gradientInfo.b,2.0)
- threshold * pow((pow(gradientInfo.r,2.0)+pow(gradientInfo.g,2.0)),2.0);
vec4 test;
//if(R > 0.000000000005)
if(R > 0.0000000000750){
// Extremley small values, ugly soloution for now to be able to use R in maxSupress
test = vec4(1.0, 0.0, 1.0, R*1000000000.0);
}
else
test = vec4(vec3(gradientInfo.xyz),0.0);
gl_FragColor = vec4( test);
}
Results
Result on a simple square
Result on a more complex figure with the same R and Threshold
And the result when the response check is mulitplied by a 1000. Doesn't really seem to work.
Below is the code for the maximum supression.
void main() {
vec2 uv = vUV;
float vOffset = 1.0/800.0;
float hOffset = 1.0/600.0;
vec4 neighbourPixels[9];
vec3 result;
int check = 0;
vec3 previous = texture2D(texture2, uv).xyz;
vec4 current = texture2D(texture, uv);
float temp = current.a;
vec4 neighbourArray[25];
if(current.a > 0.0){
for(int i = -2; i<3;i++){
for(int j = -2; j<3;j++){
if(temp < texture2D(texture, vUV.xy+vec2(i,j)*vec2(vOffset,hOffset)).a ){
//result = vec3(1.0,0.0,1.0);
check = 1;
break;
}
}
if(check==1){
break;
}
}
if(check==1){
result = vec3(1.0,0.0,0.0);
}
else{
result = vec3(0.0,1.0,1.0);
}
}
else{
result = previous.xyz;
}
gl_FragColor = vec4( result, 0.0);
}
You need to look for local maxima in the Harris response, rather than just thresholding. Dilate the response with a 3x3 or 5x5 box, then find pixels whether original response and dilated version are equal.
Related
How can I apply such a mask
to get effect such as bokeh
need to blur edge in mask and apply on image texture. How do that?
Vertex shader:
attribute vec4 a_Position;
void main()
{
gl_Position = a_Position;
}
Fragment shader:
precision lowp float;
uniform sampler2D u_Sampler; // textureSampler
uniform sampler2D u_Mask; // maskSampler
uniform vec3 iResolution;
vec4 blur(sampler2D source, vec2 size, vec2 uv) {
vec4 C = vec4(0.0);
float width = 1.0 / size.x;
float height = 1.0 / size.y;
float divisor = 0.0;
for (float x = -25.0; x <= 25.0; x++)
{
C += texture2D(source, uv + vec2(x * width, 0.0));
C += texture2D(source, uv + vec2(0.0, x * height));
divisor++;
}
C*=0.5;
return vec4(C.r / divisor, C.g / divisor, C.b / divisor, 1.0);
}
void main()
{
vec2 uv = gl_FragCoord.xy / iResolution.xy;
vec4 videoColor = texture2D(u_Sampler, uv);
vec4 maskColor = texture2D(u_Mask, uv);
gl_FragColor = blur(u_Sampler, iResolution.xy, uv);
}
vec4 blurColor = blur(u_Sampler, iResolution.xy, uv);
gl_FragColor = mix(blurColor, videoColor, maskColor.r);
But FYI it's not common to blur in one pass like you have. It's more common to blur in one direction (horizontally), then blur the result of that vertically, then mix the results, blurredTexture, videoTexture, mask.
I'm trying to convert this particular shader to CIKernel Code.
https://www.shadertoy.com/view/4scBRH
I've got this soo far,
kernel vec4 thresholdFilter(__sample image, float time)
{
vec2 uv = destCoord();
float amount = sin(time) * 0.1;
amount *= 0.3;
float split = 1. - fract(time / 2.0);
float scanOffset = 0.01;
vec2 uv1 = vec2(uv.x + amount, uv.y);
vec2 uv2 = vec2(uv.x, uv.y + amount);
if (uv.y > split) {
uv.x += scanOffset;
uv1.x += scanOffset;
uv2.x += scanOffset;
}
float r = sample(image, uv1).r;
float g = sample(image, uv).g;
float b = sample(image, uv2).b;
float a = 1.0;
vec3 outPutPixel = sample(image, samplerTransform(image, uv)).rgb;
return vec4(outPutPixel, 1.0);
}
The output of this code is not even close to the shaderToy output.
I am trying to get a fluid simulation to work using WebGL using http://meatfighter.com/fluiddynamics/GPU_Gems_Chapter_38.pdf as a resource. I have implemented everything but I feel like there are multiple things that aren't working correctly. I added boundaries but it seems like they are having no effect, which makes me suspicious about how much pressure and advection are working. I displayed the divergence and I get very little around where I am moving the object around as well as when the velocity hits the edge (boundary), but the pressure that I get is completely empty. I calculate pressure using the diffusion shader as described in the linked resource.
I know the code I am posting is al little confusing due to the nature of what it is about. I can supply any pictures/links to the simulation if that would help.
--EDIT--
After some more investigation I believe the problem is related to my advection function. or at least a problem. I am unsure how to fix it though.
Instead of posting all of my code, the general process I follow is:
advect velocity
diffuse velocity
add velocity
calculate divergence
compute pressure
subtract gradient
for diffusing velocity and computing pressure I am only do 10 iterations because thats all my computer can handle with my implementation (I will optimize once I get it working), but I feel like the computing pressure and subtracting gradient are not having any effect.
here are the shaders I am using:
//advection
uniform vec2 res;//The width and height of our screen
uniform sampler2D velocity;//input velocity
uniform sampler2D quantity;//quantity to advect
void main() {
vec2 pixel = gl_FragCoord.xy / res.xy;
float i0, j0, i1, j1;
float x, y, s0, s1, t0, t1, dxt0, dyt0;
float dt = 1.0/60.0;
float Nx = res.x -1.0;
float Ny = res.y -1.0;
float i = pixel.x;
float j = pixel.y;
dxt0 = dt ;
dyt0 = dt ;
x = gl_FragCoord.x - dxt0 * (texture2D(velocity, pixel).x );
y = gl_FragCoord.y - dyt0 * (texture2D(velocity, pixel).y );
i0=x-0.5;
i1=x+0.5;
j0=y-0.5;
j1=y+0.5;
s1 = x-i0;
s0 = 1.0-s1;
t1 = y-j0;
t0 = 1.0-t1;
float p1 = (t0 * texture2D(quantity, vec2(i0,j0)/res.xy).r);
float p2 = (t1 * texture2D(quantity, vec2(i0,j1)/res.xy).r);
float p3 = (t0 * texture2D(quantity, vec2(i1,j0)/res.xy).r);
float p4 = (t1 * texture2D(quantity, vec2(i1,j1)/res.xy).r);
float total1 = s0 * (p1 + p2);
float total2 = s1 * (p3 + p4);
gl_FragColor.r = total1 + total2;
p1 = (t0 * texture2D(quantity, vec2(i0,j0)/res.xy).g);
p2 = (t1 * texture2D(quantity, vec2(i0,j1)/res.xy).g);
p3 = (t0 * texture2D(quantity, vec2(i1,j0)/res.xy).g);
p4 = (t1 * texture2D(quantity, vec2(i1,j1)/res.xy).g);
total1 = s0 * (p1 + p2);
total2 = s1 * (p3 + p4);
gl_FragColor.g = total1 + total2;
}
//diffusion shader starts here
uniform vec2 res;//The width and height of our screen
uniform sampler2D x;//Our input texture
uniform sampler2D b;
uniform float alpha;
uniform float rBeta;
void main() {
float xPixel = 1.0/res.x;
float yPixel = 1.0/res.y;
vec2 pixel = gl_FragCoord.xy / res.xy;
gl_FragColor = texture2D( b, pixel );
vec4 leftColor = texture2D(x,vec2(pixel.x-xPixel,pixel.y));
vec4 rightColor = texture2D(x,vec2(pixel.x+xPixel,pixel.y));
vec4 upColor = texture2D(x,vec2(pixel.x,pixel.y-yPixel));
vec4 downColor = texture2D(x,vec2(pixel.x,pixel.y+yPixel));
gl_FragColor.r = (gl_FragColor.r * alpha +leftColor.r + rightColor.r + upColor.r + downColor.r) * rBeta;
gl_FragColor.g = (gl_FragColor.g * alpha +leftColor.g + rightColor.g + upColor.g + downColor.g)* rBeta;
gl_FragColor.b = (gl_FragColor.b * alpha +leftColor.b + rightColor.b + upColor.b + downColor.b)* rBeta;
}
//gradient
uniform vec2 res;//The width and height of our screen
uniform sampler2D velocity;//Our input velocity
uniform sampler2D pressure;//Our input pressure
void main() {
float xPixel = 1.0/res.x;
float yPixel = 1.0/res.y;
vec2 pixel = gl_FragCoord.xy / res.xy;
vec4 leftColor = texture2D(pressure, vec2(pixel.x-xPixel,pixel.y));
vec4 rightColor = texture2D(pressure, vec2(pixel.x+xPixel,pixel.y));
vec4 upColor = texture2D(pressure, vec2(pixel.x,pixel.y-yPixel));
vec4 downColor = texture2D(pressure, vec2(pixel.x,pixel.y+yPixel));
vec2 gradient = xPixel/2.0 * vec2((rightColor.x - leftColor.x), (upColor.y - downColor.y));
//Diffuse equation
gl_FragColor = texture2D(velocity, pixel) ;
gl_FragColor.xy -= gradient;
}
uniform vec2 res;//The width and height of our screen
uniform sampler2D velocity;//Our input texture
void main() {
float xPixel = 1.0/res.x;
float yPixel = 1.0/res.y;
vec2 pixel = gl_FragCoord.xy / res.xy;
vec4 leftColor = texture2D(velocity, vec2(pixel.x-xPixel,pixel.y));
vec4 rightColor = texture2D(velocity, vec2(pixel.x+xPixel,pixel.y));
vec4 upColor = texture2D(velocity, vec2(pixel.x,pixel.y-yPixel));
vec4 downColor = texture2D(velocity, vec2(pixel.x,pixel.y+yPixel));
float div = xPixel/2.0 * ((rightColor.x - leftColor.x) + (upColor.y - downColor.y));
//Diffuse equation
gl_FragColor = vec4(div);
}
Stumped trying to adjust the hue of a specific channel (or perhaps, more specifically, a specific range of colors - in this case, reds). Looking at the hue filter, I thought maybe I might get somewhere by commenting out the green and blue modifiers, impacting the changes on only the red channel:
precision highp float;
varying highp vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
uniform mediump float hueAdjust;
const highp vec4 kRGBToYPrime = vec4 (0.299, 0.587, 0.114, 0.0);
const highp vec4 kRGBToI = vec4 (0.595716, -0.274453, -0.321263, 0.0);
const highp vec4 kRGBToQ = vec4 (0.211456, -0.522591, 0.31135, 0.0);
const highp vec4 kYIQToR = vec4 (1.0, 0.9563, 0.6210, 0.0);
const highp vec4 kYIQToG = vec4 (1.0, -0.2721, -0.6474, 0.0);
const highp vec4 kYIQToB = vec4 (1.0, -1.1070, 1.7046, 0.0);
void main ()
{
// Sample the input pixel
highp vec4 color = texture2D(inputImageTexture, textureCoordinate);
// Convert to YIQ
highp float YPrime = dot (color, kRGBToYPrime);
highp float I = dot (color, kRGBToI);
highp float Q = dot (color, kRGBToQ);
// Calculate the hue and chroma
highp float hue = atan (Q, I);
highp float chroma = sqrt (I * I + Q * Q);
// Make the user's adjustments
hue += (-hueAdjust); //why negative rotation?
// Convert back to YIQ
Q = chroma * sin (hue);
I = chroma * cos (hue);
// Convert back to RGB
highp vec4 yIQ = vec4 (YPrime, I, Q, 0.0);
color.r = dot (yIQ, kYIQToR);
// --> color.g = dot (yIQ, kYIQToG);
// --> color.b = dot (yIQ, kYIQToB);
// Save the result
gl_FragColor = color;
}
);
But that just leaves the photo either grey/blue and washed-out or purplish green. Am I on the right track? If not, how can I modify this filter to affect individual channels while leaving the others intact?
Some examples:
Original, and the effect I'm trying to achieve:
(The second image is almost unnoticeably different, however the red channel's hue has been made slightly more pinker. I need to be able to adjust it between pink<->orange).
But here's what I get with B and G commented out:
(Left side: <0º, right side: >0º)
It looks to me like it's not affecting the hue of the reds in the way I'd like it to; possibly I'm approaching this incorrectly, or if I'm on the right track, this code isn't correctly adjusting the red channel hue?
(I also tried to achieve this effect using the GPUImageColorMatrixFilter, but I didn't get very far with it).
Edit: here's my current iteration of the shader using #VB_overflow's code + GPUImage wrapper, which is functionally affecting the input image in a way similar to what I'm aiming for:
#import "GPUImageSkinToneFilter.h"
#implementation GPUImageSkinToneFilter
NSString *const kGPUImageSkinToneFragmentShaderString = SHADER_STRING
(
varying highp vec2 textureCoordinate;
uniform sampler2D inputImageTexture;
// [-1;1] <=> [pink;orange]
uniform highp float skinToneAdjust; // will make reds more pink
// Other parameters
uniform mediump float skinHue;
uniform mediump float skinHueThreshold;
uniform mediump float maxHueShift;
uniform mediump float maxSaturationShift;
// RGB <-> HSV conversion, thanks to http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl
highp vec3 rgb2hsv(highp vec3 c)
{
highp vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
highp vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g));
highp vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r));
highp float d = q.x - min(q.w, q.y);
highp float e = 1.0e-10;
return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
}
// HSV <-> RGB conversion, thanks to http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl
highp vec3 hsv2rgb(highp vec3 c)
{
highp vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
highp vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
// Main
void main ()
{
// Sample the input pixel
highp vec4 colorRGB = texture2D(inputImageTexture, textureCoordinate);
// Convert color to HSV, extract hue
highp vec3 colorHSV = rgb2hsv(colorRGB.rgb);
highp float hue = colorHSV.x;
// check how far from skin hue
highp float dist = hue - skinHue;
if (dist > 0.5)
dist -= 1.0;
if (dist < -0.5)
dist += 1.0;
dist = abs(dist)/0.5; // normalized to [0,1]
// Apply Gaussian like filter
highp float weight = exp(-dist*dist*skinHueThreshold);
weight = clamp(weight, 0.0, 1.0);
// We want more orange, so increase saturation
if (skinToneAdjust > 0.0)
colorHSV.y += skinToneAdjust * weight * maxSaturationShift;
// we want more pinks, so decrease hue
else
colorHSV.x += skinToneAdjust * weight * maxHueShift;
// final color
highp vec3 finalColorRGB = hsv2rgb(colorHSV.rgb);
// display
gl_FragColor = vec4(finalColorRGB, 1.0);
}
);
#pragma mark -
#pragma mark Initialization and teardown
#synthesize skinToneAdjust;
#synthesize skinHue;
#synthesize skinHueThreshold;
#synthesize maxHueShift;
#synthesize maxSaturationShift;
- (id)init
{
if(! (self = [super initWithFragmentShaderFromString:kGPUImageSkinToneFragmentShaderString]) )
{
return nil;
}
skinToneAdjustUniform = [filterProgram uniformIndex:#"skinToneAdjust"];
skinHueUniform = [filterProgram uniformIndex:#"skinHue"];
skinHueThresholdUniform = [filterProgram uniformIndex:#"skinHueThreshold"];
maxHueShiftUniform = [filterProgram uniformIndex:#"maxHueShift"];
maxSaturationShiftUniform = [filterProgram uniformIndex:#"maxSaturationShift"];
self.skinHue = 0.05;
self.skinHueThreshold = 50.0;
self.maxHueShift = 0.14;
self.maxSaturationShift = 0.25;
return self;
}
#pragma mark -
#pragma mark Accessors
- (void)setSkinToneAdjust:(CGFloat)newValue
{
skinToneAdjust = newValue;
[self setFloat:newValue forUniform:skinToneAdjustUniform program:filterProgram];
}
- (void)setSkinHue:(CGFloat)newValue
{
skinHue = newValue;
[self setFloat:newValue forUniform:skinHueUniform program:filterProgram];
}
- (void)setSkinHueThreshold:(CGFloat)newValue
{
skinHueThreshold = newValue;
[self setFloat:newValue forUniform:skinHueThresholdUniform program:filterProgram];
}
- (void)setMaxHueShift:(CGFloat)newValue
{
maxHueShift = newValue;
[self setFloat:newValue forUniform:maxHueShiftUniform program:filterProgram];
}
- (void)setMaxSaturationShift:(CGFloat)newValue
{
maxSaturationShift = newValue;
[self setFloat:newValue forUniform:maxSaturationShiftUniform program:filterProgram];
}
#end
I made an example on ShaderToy. Use latest Chrome to see it, on my side it does not work on Firefox or IE because it uses a video as input.
After some experiments it seems to me that for red hues to be more "pink" you need to decrease the hue, but to get more "orange" you need to increase saturation.
In the code I convert to HSV instead of YIQ because this is faster, makes tweaking saturation possible and still allow to tweak hue. Also HSV components are in a [0-1] interval, so no need to handle radians.
So here is how this is done :
You choose a reference hue or color (in your case a red hue)
Shader compute the "distance" from current pixel hue to ref hue
Based on this distance, decrease hue if you want pink, increase saturation if you want orange
It is important to note that hue behaves differently than saturation and value: it should be treated as an angle (more info here).
The reference hue should be hardcoded, chosen by user (by color picking image), or found by analysing image content.
There are many different possible ways the compute the distance, in the example I chose to use the angular distance between hues.
You also need to apply some kind of filtering after computing the distance to "select" only closest colors, like this gaussian like function.
Here is the code, without the ShaderToy stuff:
precision highp float;
// [-1;1] <=> [pink;orange]
const float EFFECT_AMOUNT = -0.25; // will make reds more pink
// Other parameters
const float SKIN_HUE = 0.05;
const float SKIN_HUE_TOLERANCE = 50.0;
const float MAX_HUE_SHIFT = 0.04;
const float MAX_SATURATION_SHIFT = 0.25;
// RGB <-> HSV conversion, thanks to http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl
vec3 rgb2hsv(vec3 c)
{
vec4 K = vec4(0.0, -1.0 / 3.0, 2.0 / 3.0, -1.0);
vec4 p = mix(vec4(c.bg, K.wz), vec4(c.gb, K.xy), step(c.b, c.g));
vec4 q = mix(vec4(p.xyw, c.r), vec4(c.r, p.yzx), step(p.x, c.r));
float d = q.x - min(q.w, q.y);
float e = 1.0e-10;
return vec3(abs(q.z + (q.w - q.y) / (6.0 * d + e)), d / (q.x + e), q.x);
}
// HSV <-> RGB conversion, thanks to http://lolengine.net/blog/2013/07/27/rgb-to-hsv-in-glsl
vec3 hsv2rgb(vec3 c)
{
vec4 K = vec4(1.0, 2.0 / 3.0, 1.0 / 3.0, 3.0);
vec3 p = abs(fract(c.xxx + K.xyz) * 6.0 - K.www);
return c.z * mix(K.xxx, clamp(p - K.xxx, 0.0, 1.0), c.y);
}
// Main
void main ()
{
// Sample the input pixel
vec4 colorRGB = texture2D(inputImageTexture, textureCoordinate);
// get effect amount to apply
float skin_tone_shift = EFFECT_AMOUNT;
// Convert color to HSV, extract hue
vec3 colorHSV = rgb2hsv(colorRGB.rgb);
float hue = colorHSV.x;
// check how far from skin hue
float dist = hue - SKIN_HUE;
if (dist > 0.5)
dist -= 1.0;
if (dist < -0.5)
dist += 1.0;
dist = abs(dist)/0.5; // normalized to [0,1]
// Apply Gaussian like filter
float weight = exp(-dist*dist*SKIN_HUE_TOLERANCE);
weight = clamp(weight, 0.0, 1.0);
// We want more orange, so increase saturation
if (skin_tone_shift > 0.0)
colorHSV.y += skin_tone_shift * weight * MAX_SATURATION_SHIFT;
// we want more pinks, so decrease hue
else
colorHSV.x += skin_tone_shift * weight * MAX_HUE_SHIFT;
// final color
vec3 finalColorRGB = hsv2rgb(colorHSV.rgb);
// display
gl_FragColor = vec4(finalColorRGB, 1.0);
}
More Orange:
More Pink:
--EDIT--
It seems to me that you are not setting the uniform values in your ObjectiveC code. If you forget this shader will get zero for all those.
Code should look like this :
- (id)init
{
if(! (self = [super initWithFragmentShaderFromString:kGPUImageSkinToneFragmentShaderString]) )
{
return nil;
}
skinToneAdjustUniform = [filterProgram uniformIndex:#"skinToneAdjust"];
[self setFloat:0.5 forUniform:skinToneAdjustUniform program:filterProgram]; // here 0.5 so should increase saturation
skinHueUniform = [filterProgram uniformIndex:#"skinHue"];
self.skinHue = 0.05;
[self setFloat:self.skinHue forUniform:skinHueUniform program:filterProgram];
skinHueToleranceUniform = [filterProgram uniformIndex:#"skinHueTolerance"];
self.skinHueTolerance = 50.0;
[self setFloat:self.skinHueTolerance forUniform:skinHueToleranceUniform program:filterProgram];
maxHueShiftUniform = [filterProgram uniformIndex:#"maxHueShift"];
self.maxHueShift = 0.04;
[self setFloat:self.maxHueShift forUniform:maxHueShiftUniform program:filterProgram];
maxSaturationShiftUniform = [filterProgram uniformIndex:#"maxSaturationShift"];
self.maxSaturationShift = 0.25;
[self setFloat:self.maxSaturationShift forUniform:maxSaturationShiftUniform program:filterProgram];
return self;
}
#end
I know to perform this in OpenGL with the code below:
glDisable(GL_LIGHTING);
glEnable(GL_COLOR_MATERIAL);
how to simulate that in WebGL?
Here is a blinn-phong shader which emulates the OpenGL fixed function pipeline for per vertex lighting. By default this is equivalent to having glEnable(GL_COLOR_MATERIAL) and glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE) enabled. You can emulate glColorMaterial by setting the uniforms to 1.0 which will cause material parameters track the current color instead of those set by glMaterial.
#version 120
////////////////////////////////////////////////////////////////////////////////
// http://www.glprogramming.com/red/chapter05.html //
// //
// color = (matEmission + globalAmbient * matAmbient) + //
// AttenuationFactor( 1.0 / ( Kc + Kl*d + Kq*d^2 ) ) * //
// [ (lightAmbient * matAmbient) + //
// (max(N.L,0) * lightDiffuse * matDiffuse) + //
// (max(N.H,0)^matShininess * lightSpecular * matSpecular) ] //
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Uniforms //
////////////////////////////////////////////////////////////////////////////////
uniform float uColorMaterialAmbient = 1.0;
uniform float uColorMaterialDiffuse = 1.0;
uniform float uColorMaterialEmission = 0.0;
uniform float uColorMaterialSpecular = 0.0;
////////////////////////////////////////////////////////////////////////////////
// Main //
////////////////////////////////////////////////////////////////////////////////
void main(void)
{
vec4 matAmbient = mix(gl_FrontMaterial.ambient, gl_Color, uColorMaterialAmbient);
vec4 matDiffuse = mix(gl_FrontMaterial.diffuse, gl_Color, uColorMaterialDiffuse);
vec4 matEmission = mix(gl_FrontMaterial.emission, gl_Color, uColorMaterialEmission);
vec4 matSpecular = mix(gl_FrontMaterial.specular, gl_Color, uColorMaterialSpecular);
// Transform normal into eye space. gl_NormalMatrix is the transpose of the
// inverse of the upper leftmost 3x3 of gl_ModelViewMatrix.
vec3 eyeNormal = normalize(gl_NormalMatrix * gl_Normal);
// Calculate emission and global ambient light
vec4 emissionAmbient = matEmission + (gl_LightModel.ambient * matAmbient);
// Calculate ambient
vec4 lightAmbient = gl_LightSource[0].ambient * matAmbient;
// Transform the vertex into eye space
vec4 eyeVertex = gl_ModelViewMatrix * gl_Vertex;
vec3 eyeLightDir = gl_LightSource[0].position.xyz - eyeVertex.xyz;
float dist = length(eyeLightDir);
eyeLightDir = normalize(eyeLightDir);
// No attenuation for a directional light
float attenuationFactor = 1.0 / (gl_LightSource[0].constantAttenuation
+ gl_LightSource[0].linearAttenuation * dist
+ gl_LightSource[0].quadraticAttenuation * dist * dist);
// Calculate lambert term
float NdotL = max(dot(eyeNormal, eyeLightDir), 0.0);
// Calculate diffuse
vec4 lightDiffuse = NdotL * (gl_LightSource[0].diffuse * matDiffuse);
// Calculate specular
vec4 lightSpecular = vec4(0.0);
if ( NdotL > 0.0 )
{
float NdotHV = max(dot(eyeNormal, gl_LightSource[0].halfVector.xyz), 0.0);
lightSpecular = pow(NdotHV, gl_FrontMaterial.shininess) * (gl_LightSource[0].specular * matSpecular);
}
gl_FrontColor = emissionAmbient + attenuationFactor * (lightAmbient + lightDiffuse + lightSpecular);
gl_Position = ftransform();
}