How to apply Custom Shader Program with Fabric.js - webgl

For applying the custom shader Rain Drop program in the fabric.js (core library) and calling the rain-drop effect from separate program I have done the following :-
1) I have downloaded the fabric dependency from fabric-npm- version 5.3.0
2) Because I need to implement a Rain Drop program in the fabric.js, I have created one source file rain_drop.class.js below is the code snippet
/**
* Rain Drop Effect class
*/
(function(global) {
'use strict';
var fabric = global.fabric || (global.fabric = { }),
filters = fabric.Image.filters,
createClass = fabric.util.createClass;
filters.RainDrop = createClass(filters.BaseFilter, {
/**
* Filter type
* #param {String} type
* #default
*/
type: 'RainDrop',
fragmentSource : "#define MAX_RADIUS 2\n precision highp float;\n #define DOUBLE_HASH 0\n #define HASHSCALE1 .1031\n #define HASHSCALE3 vec3(.1031, .1030, .0973)\n #define RANSIZE 0.7 \n #define RAINSPEED 0.3 \n \n uniform float iTime;\n uniform sampler2D uTexture;\n varying vec2 vTexCoord;\n \n float hash12(vec2 p)\n {\n vec3 p3 = fract(vec3(p.xyx) * HASHSCALE1);\n p3 += dot(p3, p3.yzx + 19.19);\n return fract((p3.x + p3.y) * p3.z);\n }\n \n vec2 hash22(vec2 p)\n {\n vec3 p3 = fract(vec3(p.xyx) * HASHSCALE3);\n p3 += dot(p3, p3.yzx+19.19);\n return fract((p3.xx+p3.yz)*p3.zy);\n \n }\n \n void main(){\n \n vec3 iResolution; \n iResolution = vec3(1.0, 1.0, 1.0); \n float resolution = 10. * exp2(-3.*RANSIZE);\n vec2 uv = gl_FragCoord.xy / iResolution.y * resolution;\n vec2 uv2 = gl_FragCoord.xy / iResolution.xy* resolution;\n vec2 p0 = floor(uv);\n \n vec2 circles = vec2(0.);\n for (int j = -MAX_RADIUS; j <= MAX_RADIUS; ++j)\n {\n for (int i = -MAX_RADIUS; i <= MAX_RADIUS; ++i)\n {\n vec2 pi = p0 + vec2(i, j);\n #if DOUBLE_HASH\n vec2 hsh = hash22(pi);\n #else\n vec2 hsh = pi;\n #endif\n vec2 p = pi + hash22(hsh);\n \n float t = fract(RAINSPEED*iTime + hash12(hsh));\n vec2 v = p - uv;\n float d = length(v) - (float(MAX_RADIUS) + 1.)*t;\n \n float h = 1e-3;\n float d1 = d - h;\n float d2 = d + h;\n float p1 = sin(31.*d1) * smoothstep(-0.6, -0.3, d1) * smoothstep(0., -0.3, d1);\n float p2 = sin(31.*d2) * smoothstep(-0.6, -0.3, d2) * smoothstep(0., -0.3, d2);\n circles += 0.5 * normalize(v) * ((p2 - p1) / (2. * h) * (1. - t) * (1. - t));\n }\n }\n circles /= float((MAX_RADIUS*2+1)*(MAX_RADIUS*2+1));\n \n float intensity = mix(0.01, 0.15, smoothstep(0.1, 0.6, abs(fract(0.05*iTime + 0.5)*2.-1.)));\n vec3 n = vec3(circles, sqrt(1. - dot(circles, circles)));\n vec3 color = texture2D(uTexture, uv2/resolution - intensity*n.xy).rgb + 5.*pow(clamp(dot(n, normalize(vec3(1., 0.7, 0.5))), 0., 1.), 6.);\n gl_FragColor = vec4(color, 1.0);\n }",
/**
* iTime value, from 1 to 10.
* #param {Number} iTime
*/
iTime: 0,
/**
* Describe the property that is the filter parameter
* #param {String} m
* #default
*/
mainParameter: 'iTime',
/**
* Apply the Brightness operation to a Uint8ClampedArray representing the pixels of an image.
*
* #param {Object} options
* #param {ImageData} options.imageData The Uint8ClampedArray to be filtered.
*/
applyTo2d: function(options) {
if (this.iTime === 0) {
return;
}
},
/**
* Return WebGL uniform locations for this filter's shader.
*
* #param {WebGLRenderingContext} gl The GL canvas context used to compile this filter's shader.
* #param {WebGLShaderProgram} program This filter's compiled shader program.
*/
getUniformLocations: function(gl, program) {
return {
iTime: gl.getUniformLocation(program, 'iTime'),
};
},
/**
* Send data from this filter to its shader program's uniforms.
*
* #param {WebGLRenderingContext} gl The GL canvas context used to compile this filter's shader.
* #param {Object} uniformLocations A map of string uniform names to WebGLUniformLocation objects
*/
sendUniformData: function(gl, uniformLocations) {
gl.uniform1f(uniformLocations.iTime, Math.random());
},
});
/**
* Returns filter instance from an object representation
* #static
* #param {Object} object Object to create an instance from
* #param {function} [callback] to be invoked after filter creation
* #return {fabric.Image.filters.Brightness} Instance of fabric.Image.filters.Brightness
*/
fabric.Image.filters.BlurITT.fromObject = fabric.Image.filters.BaseFilter.fromObject;
})(typeof exports !== 'undefined' ? exports : this);
3. Adding the rain_drop effect in 'src/build.js' through the below code
ifSpecifiedInclude('image_filters', 'src/filters/rain_drop.class.js')
4. Now I need to recompile the fabric program through npm run build and this invokes below command in package.json
"build": "node build.js modules=ALL requirejs exclude=gestures,accessors,erasing",
5. I can get fabric.min.js under the 'fabric/dist' folder, that I can use as a library for separate program
6. calling the added filter through this way
var filter = new fabric.Image.filters.RainDrop({
iTime: 2.04
});
Here is the correct output that I need to achieve
But, I am getting the wrong output for the rain-drop effect
I know, something wrong with the shader program but Please If anyone who has good knowledge in this domain can jump in and share their thoughts.
Also sorry for the inadequate question formatting, If anyone can improve the question formatting that will be also helpful.
Thanks

Related

Rendering Lines with Bezier Curves in DX11

I want to input control points through the tessellation stages and output them as bent lines.
I expand the lines into billboarded quads in the geometry shader.
Right now I input a bunch of random vertices with a control point number of 4.
I assume the bending is done in the domain shader so I use one of the uv coordinates unique for that stage as a t value for a bezier function that takes in 4 world position coordinates.
However the lines remain straight. And I don't know what I am missing.
My code looks like this:
Domain Shader:
float3 bezier( float3 p0, float3 p1, float3 p2, float3 p3, float u)
{
float B0 = (1. - u) * (1. - u) * (1. - u);
float B1 = 3. * u * (1. - u) * (1. - u);
float B2 = 3. * u * u * (1. - u);
float B3 = u * u * u;
float3 p = B0 * p0 + B1 * p1 + B2 * p2 + B3 * p3;
return p;
}
float t = uv.x;
float3 pos = bezier(inp[0].worldPos, inp[1].worldPos, inp[2].worldPos, inp[3].worldPos, t);
Could the problem be that the vertex points I input are not forming curves? Right now I just take a mesh such as a plane and take the vertices from there.
The detail factor in the hull shader is 16. Density factor varies by distance.
I don't know what else is relevant. If you need more information let me know. I hope I made the question clear, I have googled it but can't seem to find the error in my own code.
See the SimpleBezier sample:
float4 BernsteinBasis(float t)
{
float invT = 1.0f - t;
return float4(invT * invT * invT,
3.0f * t * invT * invT,
3.0f * t * t * invT,
t * t * t);
}
float4 dBernsteinBasis(float t)
{
float invT = 1.0f - t;
return float4(-3 * invT * invT,
3 * invT * invT - 6 * t * invT,
6 * t * invT - 3 * t * t,
3 * t * t);
}
float3 EvaluateBezier(const OutputPatch< HS_OUTPUT, OUTPUT_PATCH_SIZE > BezPatch,
float4 BasisU,
float4 BasisV)
{
float3 value = float3(0, 0, 0);
value = BasisV.x * (BezPatch[0].pos * BasisU.x + BezPatch[1].pos * BasisU.y + BezPatch[2].pos * BasisU.z + BezPatch[3].pos * BasisU.w);
value += BasisV.y * (BezPatch[4].pos * BasisU.x + BezPatch[5].pos * BasisU.y + BezPatch[6].pos * BasisU.z + BezPatch[7].pos * BasisU.w);
value += BasisV.z * (BezPatch[8].pos * BasisU.x + BezPatch[9].pos * BasisU.y + BezPatch[10].pos * BasisU.z + BezPatch[11].pos * BasisU.w);
value += BasisV.w * (BezPatch[12].pos * BasisU.x + BezPatch[13].pos * BasisU.y + BezPatch[14].pos * BasisU.z + BezPatch[15].pos * BasisU.w);
return value;
}
[domain("quad")]
DS_OUTPUT BezierDS(HS_CONSTANT_DATA_OUTPUT input,
float2 UV : SV_DomainLocation,
const OutputPatch< HS_OUTPUT, OUTPUT_PATCH_SIZE > BezPatch)
{
float4 BasisU = BernsteinBasis(UV.x);
float4 BasisV = BernsteinBasis(UV.y);
float4 dBasisU = dBernsteinBasis(UV.x);
float4 dBasisV = dBernsteinBasis(UV.y);
float3 worldPos = EvaluateBezier(BezPatch, BasisU, BasisV);
float3 tangent = EvaluateBezier(BezPatch, dBasisU, BasisV);
float3 biTangent = EvaluateBezier(BezPatch, BasisU, dBasisV);
float3 normal = normalize(cross(tangent, biTangent));
DS_OUTPUT output;
output.pos = mul(float4(worldPos, 1), g_mViewProjection);
output.worldPos = worldPos;
output.normal = normal;
return output;
}
https://github.com/microsoft/Xbox-ATG-Samples/tree/master/PCSamples/IntroGraphics/SimpleBezierPC
https://github.com/microsoft/Xbox-ATG-Samples/tree/master/PCSamples/IntroGraphics/SimpleBezierPC12

Metal equivalent to OpenGL mix

I'm trying to understand what is the equivalent of mix OpenGL function in metal. This is the OpenGL code I'm trying to convert:
float udRoundBox( vec2 p, vec2 b, float r )
{
return length(max(abs(p)-b+r,0.0))-r;
}
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// setup
float t = 0.2 + 0.2 * sin(mod(iTime, 2.0 * PI) - 0.5 * PI);
float iRadius = min(iResolution.x, iResolution.y) * (0.05 + t);
vec2 halfRes = 0.5 * iResolution.xy;
// compute box
float b = udRoundBox( fragCoord.xy - halfRes, halfRes, iRadius );
// colorize (red / black )
vec3 c = mix( vec3(1.0,0.0,0.0), vec3(0.0,0.0,0.0), smoothstep(0.0,1.0,b) );
fragColor = vec4( c, 1.0 );
}
I was able to convert part of it so far:
float udRoundBox( float2 p, float2 b, float r )
{
return length(max(abs(p)-b+r,0.0))-r;
}
float4 cornerRadius(sampler_h src) {
float2 greenCoord = src.coord(); // this is alreay in relative coords; no need to devide by image size
float t = 0.5;
float iRadius = min(greenCoord.x, greenCoord.y) * (t);
float2 halfRes = float2(greenCoord.x * 0.5, greenCoord.y * 0.5);
float b = udRoundBox( float2(greenCoord.x - halfRes.x, greenCoord.y - halfRes.y), halfRes, iRadius );
float3 c = mix(float3(1.0,0.0,0.0), float3(0.0,0.0,0.0), smoothstep(0.0,1.0,b) );
return float4(c, 1.0);
}
But it's producing green screen. I'm trying to achieve corner radius on a video like so:
The mix function is an implementation of linear interpolation, more frequently referred to as a Lerp function.
You can use linear interpolation where you have a value, let's say t and you want to know how that value maps within a certain range.
For example if I have three values:
a = 0
b = 1
and
t = 0.5
I could call mix(a,b,t) and my result would be 0.5. That is because the mix function expects a start range value, an end range value and a factor by which to interpolate, so I get 0.5 which is halfway between 0 and 1.
Looking at the documentation Metal has an implementation of mix that does a linear interpolation.
The problem is, that greenCoord (which was only a good variable name for the other question you asked, by the way) is the relative coordinate of the current pixel and has nothing to do with the absolute input resolution.
If you want a replacement for your iResolution, use src.size() instead.
And it seems you need your input coordinates in absolute (pixel) units. You can achieve that by adding a destination parameter to the inputs of your kernel like so:
float4 cornerRadius(sampler src, destination dest) {
const float2 destCoord = dest.coord(); // pixel position in the output buffer in absolute coordinates
const float2 srcSize = src.size();
const float t = 0.5;
const float radius = min(srcSize.x, srcSize.y) * t;
const float2 halfRes = 0.5 * srcSize;
const float b = udRoundBox(destCoord - halfRes, halfRes, radius);
const float3 c = mix(float3(1.0,0.0,0.0), float3(0.0,0.0,0.0), smoothstep(0.0,1.0,b) );
return float4(c, 1.0);
}

iOS 12 CIKernel Filters CRASH

I am using Following CIColorKernel Code to generate customFilter.
kernel vec4 customFilter(__sample image, __sample noise, float time, float inputNoise) {
vec2 uv = destCoord() / 1280.0;
float d = length(uv - vec2(0.5,0.5));
float blur = inputNoise;
float myTime = time * 1.0;
vec2 myuv = vec2(uv.x + sin( (uv.y + sin(myTime)) * abs(sin(myTime) + sin(2.0 * myTime) + sin(0.3 * myTime) + sin(1.4 * myTime) + cos(0.7 * myTime) + cos(1.3 * myTime)) * 4.0 ) * 0.02,uv.y) ;
vec2 finalUV = myuv * 1280.0;
vec3 col; col.r = sample(image, samplerTransform(image, finalUV)).r; col.g = sample(image, samplerTransform(image, finalUV)).g; col.b = sample(image, samplerTransform(image, finalUV)).b;
float scanline = sin(uv.y * 1280.0 *400.0)*0.08; col -= scanline;
// vignette
col *= 1.0 - d * 0.5;
return vec4(col, 1.0);
}
this piece of code works fine with iOS 10 / iOS 11 devices, However. It generate weird crash with iOS 12 Device
[CIKernelPool] 16:40: ERROR: parameter has unexpected type 'vec4' (should be a sampler type)
col.r = sample(image, samplerTransform(image, finalUV)).r;
[CIKernelPool] 17:40: ERROR: parameter has unexpected type 'vec4' (should be a sampler type)
col.g = sample(image, samplerTransform(image, finalUV)).g;
[CIKernelPool] 18:40: ERROR: parameter has unexpected type 'vec4' (should be a sampler type)
col.b = sample(image, samplerTransform(image, finalUV)).b;
this seem to happen in all CIColorKernel using __sample. However using sampler in place of __sample and coverting CIColorKernel to CIKernel solves the crash but it doesn't generating the expected result.
As the error stated, you are supplying wrong object to the
sample(image, samplerTransform(image, finalUV)).r
Here image is of type __sample. But it actually requires sampler type.
CIColorKernel does expect __sample type in its parameters. Thus, what you need is to use
CIKernel instead of CIColorKernel. Then you can supply sampler in your kernel.
kernel vec4 customFilter(sampler image, sampler noise, float time, float inputNoise) {

Fragment shader and Vertex Shader in Metal

Fragment Shader
vertex VertexOutBezier bezier_vertex(constant BezierParameters *allParams[[buffer(0)]],
// constant GlobalParameters& globalParams[[buffer(1)]],
uint vertexId [[vertex_id]],
uint instanceId [[instance_id]])
{
float t = (float) vertexId / 300;
rint(t);
BezierParameters params = allParams[instanceId];
float lineWidth = (1 - (((float) (vertexId % 2)) * 2.0)) * params.lineThickness;
float2 a = params.a;
float2 b = params.b;
float nt = 1.0f - t;
float nt_2 = nt * nt;
float nt_3 = nt_2 * nt;
float t_2 = t * t;
float t_3 = t_2 * t;
float2 point = a * nt_3 + params.p1 * nt_2 * t + params.p2 * nt * t_2 + b * t_3;
float2 tangent = -3.0 * a * nt_2 + params.p1 * (1.0 - 4.0 * t + 3.0 * t_2) + params.p2 * (2.0 * t - 3.0 * t_2) + 3 * b * t_2;
tangent = normalize(float2(-tangent.y, tangent.x));
VertexOutBezier vo;
vo.pos.xy = point + (tangent * (lineWidth / 3.0f));
vo.pos.zw = float2(0, 1);
vo.color = params.color ;
return vo;
}
My Fragment shader is
fragment float4 bezier_fragment(VertexOutBezier params[[stage_in]],
texture2d<float> texture [[texture(0)]]
)
{
constexpr sampler defaultSampler;
float4 canvasColor = texture.sample(defaultSampler, params.pos.xy);
return canvasColor;
}
Here i expect to get the pixel color of the texture. But here it is only getting single color. It is not getting the color of the texture according to its position.
Even when I do this in fragment I am getting the single color it is not varying with coordinates
fragment float4 bezier_fragment(VertexOutBezier params[[stage_in]],
texture2d<float> texture [[texture(0)]]
)
{
constexpr sampler defaultSampler;
float4 canvasColor = params.color * params.pos.x;
return canvasColor;
}
If I do this in Vertex Shader I got color varying according position of x
vo.pos.xy = point + (tangent * (lineWidth / 3.0f));
vo.pos.zw = float2(0, 1);
vo.color = params.color * vo.pos.x;
What is the Issue in fragment Shader. I cannot get the coordinates from Vertex Shader
Please make sure the VertexOutBezier.pos.xy value is normalization ( 0 ~ 1.0) ,due to the defaultSampler only receive normalization position value, if always return a single may be the position is beyond the bounds.

How to perform color material track in WebGL

I know to perform this in OpenGL with the code below:
glDisable(GL_LIGHTING);
glEnable(GL_COLOR_MATERIAL);
how to simulate that in WebGL?
Here is a blinn-phong shader which emulates the OpenGL fixed function pipeline for per vertex lighting. By default this is equivalent to having glEnable(GL_COLOR_MATERIAL) and glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE) enabled. You can emulate glColorMaterial by setting the uniforms to 1.0 which will cause material parameters track the current color instead of those set by glMaterial.
#version 120
////////////////////////////////////////////////////////////////////////////////
// http://www.glprogramming.com/red/chapter05.html //
// //
// color = (matEmission + globalAmbient * matAmbient) + //
// AttenuationFactor( 1.0 / ( Kc + Kl*d + Kq*d^2 ) ) * //
// [ (lightAmbient * matAmbient) + //
// (max(N.L,0) * lightDiffuse * matDiffuse) + //
// (max(N.H,0)^matShininess * lightSpecular * matSpecular) ] //
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Uniforms //
////////////////////////////////////////////////////////////////////////////////
uniform float uColorMaterialAmbient = 1.0;
uniform float uColorMaterialDiffuse = 1.0;
uniform float uColorMaterialEmission = 0.0;
uniform float uColorMaterialSpecular = 0.0;
////////////////////////////////////////////////////////////////////////////////
// Main //
////////////////////////////////////////////////////////////////////////////////
void main(void)
{
vec4 matAmbient = mix(gl_FrontMaterial.ambient, gl_Color, uColorMaterialAmbient);
vec4 matDiffuse = mix(gl_FrontMaterial.diffuse, gl_Color, uColorMaterialDiffuse);
vec4 matEmission = mix(gl_FrontMaterial.emission, gl_Color, uColorMaterialEmission);
vec4 matSpecular = mix(gl_FrontMaterial.specular, gl_Color, uColorMaterialSpecular);
// Transform normal into eye space. gl_NormalMatrix is the transpose of the
// inverse of the upper leftmost 3x3 of gl_ModelViewMatrix.
vec3 eyeNormal = normalize(gl_NormalMatrix * gl_Normal);
// Calculate emission and global ambient light
vec4 emissionAmbient = matEmission + (gl_LightModel.ambient * matAmbient);
// Calculate ambient
vec4 lightAmbient = gl_LightSource[0].ambient * matAmbient;
// Transform the vertex into eye space
vec4 eyeVertex = gl_ModelViewMatrix * gl_Vertex;
vec3 eyeLightDir = gl_LightSource[0].position.xyz - eyeVertex.xyz;
float dist = length(eyeLightDir);
eyeLightDir = normalize(eyeLightDir);
// No attenuation for a directional light
float attenuationFactor = 1.0 / (gl_LightSource[0].constantAttenuation
+ gl_LightSource[0].linearAttenuation * dist
+ gl_LightSource[0].quadraticAttenuation * dist * dist);
// Calculate lambert term
float NdotL = max(dot(eyeNormal, eyeLightDir), 0.0);
// Calculate diffuse
vec4 lightDiffuse = NdotL * (gl_LightSource[0].diffuse * matDiffuse);
// Calculate specular
vec4 lightSpecular = vec4(0.0);
if ( NdotL > 0.0 )
{
float NdotHV = max(dot(eyeNormal, gl_LightSource[0].halfVector.xyz), 0.0);
lightSpecular = pow(NdotHV, gl_FrontMaterial.shininess) * (gl_LightSource[0].specular * matSpecular);
}
gl_FrontColor = emissionAmbient + attenuationFactor * (lightAmbient + lightDiffuse + lightSpecular);
gl_Position = ftransform();
}

Resources