Using normalized sampler coordinates in CIFilter kernel - metal

I was walking through this tutorial on custom CIFilter:
https://medium.com/#m_tuzer/using-metal-shading-language-for-custom-cikernels-metal-swift-7bc8e7e913e6
Everything works perfectly except that the coordinates of the sampler are not normalized. So, e.g a condition like this pos.y < 0.33 doesn’t work, and the kernel uses actual image coordinates.
Since the tutorial is old there probably has been changes in CIFilter that “broke” this code. I looked through the manual for CI kernels but could not find a way to get normalized coordinates of a sampler inside the kernel.
Here is the code of the kernel:
#include <metal_stdlib>
using namespace metal;
#include <CoreImage/CoreImage.h> // (1)
float3 multiplyColors(float3, float3);
float3 multiplyColors(float3 mainColor, float3 colorMultiplier) { // (2)
float3 color = float3(0,0,0);
color.r = mainColor.r * colorMultiplier.r;
color.g = mainColor.g * colorMultiplier.g;
color.b = mainColor.b * colorMultiplier.b;
return color;
};
extern "C" { namespace coreimage { // (3)
float4 dyeInThree(sampler src, float3 redVector, float3 greenVector, float3 blueVector) {
float2 pos = src.coord();
float4 pixelColor = src.sample(pos); // (4)
float3 pixelRGB = pixelColor.rgb;
float3 color = float3(0,0,0);
if (pos.y < 0.33) { // (5)
color = multiplyColor(pixelRGB, redVector);
} else if (pos.y >= 0.33 && pos.y < 0.66) {
color = multiplyColor(pixelRGB, greenVector);
} else {
color = multiplyColor(pixelRGB, blueVector);
}
return float4(color, 1.0);
}
}}

You can translate the source coordinates into relative values using the extent of the source like this:
#include <metal_stdlib>
using namespace metal;
#include <CoreImage/CoreImage.h>
extern "C" { namespace coreimage {
float4 dyeInThree(sampler src, float3 redVector, float3 greenVector, float3 blueVector) {
float2 pos = src.coord();
float4 pixelColor = src.sample(pos);
// transform to relative coordinates
pos -= src.origin();
pos /= src.size();
float3 color = float3(0,0,0);
if (pos.y < 0.33) {
color = pixelColor.rgb * redVector;
} else if (pos.y < 0.66) {
color = pixelColor.rgb * greenVector;
} else {
color = pixelColor.rgb * blueVector;
}
return float4(color, pixelColor.a);
}
}}

Related

Adding Light Falloff for multiple Point Lights

I'm currently trying to add multiple point lights to my game. What I have done appears to be mostly working, except for a small problem of blending light falloff. Here's two images to show you what's happening. In the first one, Light Falloff is commented out. Both point lights appear correctly.
And here's the second image, where I have light falloff enabled. You will see that only light #2 is "mostly" visible. There are traces of light #1, but for the most part, light #1 appears to be overridden by light #2's falloff. In other words, each consecutive light's falloff overrides the light from previous lights.
Does anyone know how to add falloff for multiple point lights? I'm sure I'm doing something slightly wrong, and that's why the lights are not properly accumulated.
Here's my shader:
struct Vertex
{
float4 pos : POSITION;
float2 tex : TEXTURE;
float3 norm : NORMAL;
};
struct PixelShaderArgs
{
float4 pos : SV_POSITION;
float2 col : TEXTURE;
float3 norm : NORMAL;
float3 worldPos : POSITION;
};
struct PointLightShaderArgs
{
float3 pos;
float radius;
float intensity;
float3 padding;
float4 ambient;
float4 diffuse;
};
Texture2D ShaderTexture : register(t0);
SamplerState Sampler : register(s0);
float4x4 localMatrix : register(b0);
cbuffer ShaderDataBuffer : register(b1)
{
float2 TextureResolution;
};
cbuffer cbPerFrame : register(b3)
{
PointLightShaderArgs light[8];
};
cbuffer WorldPositionBuffer : register(b4)
{
float4x4 World;
};
PixelShaderArgs VertexShaderMain(Vertex vertex)
{
PixelShaderArgs output;
output.pos = mul(vertex.pos, localMatrix);
output.col = vertex.tex;
output.norm = mul(vertex.norm, World);
output.worldPos = mul(vertex.pos, World);
return output;
}
int2 convertUVToPixel(float u, float v)
{
int width = TextureResolution.x;
int height = TextureResolution.y;
int xCoordinate = floor(u * width);
int yCoordinate = floor(v * height);
return int2(xCoordinate % width, yCoordinate % height);
}
float Falloff(float distance, float radius)
{
return clamp(1.0f - (distance / radius), 0.0, 1.0);
}
#define ATTENUATION_CONSTANT 1.0f // 0% Constant
#define ATTENUATION_LINEAR 0.0f // 100% Linear
#define ATTENUATION_QUADRATIC 0.0f // 100% Quadratic
float4 PixelShaderMain(PixelShaderArgs pixelShaderArgs) : SV_Target
{
float u = pixelShaderArgs.col.x;
float v = pixelShaderArgs.col.y;
// Lighting
float3 fragColor = float3(0.0f, 0.0f, 0.0f);
float4 diffuse = ShaderTexture.Load(int3(convertUVToPixel(u, v), 0));
for (int i = 0; i < 2; i++)
{
float3 ambient = diffuse * light[i].ambient;
pixelShaderArgs.norm = normalize(pixelShaderArgs.norm);
float3 lightToPixelVec = light[i].pos - pixelShaderArgs.worldPos;
float distance = length(lightToPixelVec);
float luminosity = dot(lightToPixelVec / distance, pixelShaderArgs.norm);
float intensity = 1.00f;
if (luminosity > 0.0f)
{
// Do lighting attenuation
fragColor += luminosity * diffuse * light[i].diffuse;
fragColor /= ATTENUATION_CONSTANT + (ATTENUATION_LINEAR * distance) + (ATTENUATION_QUADRATIC * (distance * distance));
fragColor *= light[i].intensity; // multiply the final result by the intensity.
fragColor *= Falloff(distance, light[i].radius); // This is what's causing the problem!!
//fragColor = saturate(fragColor + ambient);
}
}
return float4(fragColor, diffuse.a);
}
I figured this out. The solution was to move the falloff calculation up and inline it with the following line: fragColor += luminosity * diffuse * light[i].diffuse * Falloff(distance,light[i].radius);
This results the correcting falloff blending, shown in this picture:
and another picture showing three overlapped point lights:
Here's the updated shader (A lot of changes were made from the first one because I'm actually posting this answer late)
struct Vertex
{
float4 pos : POSITION;
float2 tex : TEXTURE;
float3 norm : NORMAL;
};
struct PixelShaderArgs
{
float4 pos : SV_POSITION;
float2 col : TEXTURE;
float3 norm : NORMAL;
float3 worldPos : POSITION;
};
struct PointLightShaderArgs
{
float3 pos;
float radius;
float intensity;
float3 padding;
float4 ambient;
float4 diffuse;
};
Texture2D ShaderTexture : register(t0);
SamplerState Sampler : register(s0);
float4x4 localMatrix : register(b0);
cbuffer ShaderDataBuffer : register(b1)
{
float2 TextureResolution;
};
cbuffer cbPerFrame : register(b3)
{
PointLightShaderArgs light[32];
};
cbuffer WorldPositionBuffer : register(b4)
{
float4x4 World;
};
PixelShaderArgs VertexShaderMain(Vertex vertex)
{
PixelShaderArgs output;
output.pos = mul(vertex.pos, localMatrix);
output.col = vertex.tex;
output.norm = mul(vertex.norm, World);
output.worldPos = mul(vertex.pos, World);
return output;
}
int2 convertUVToPixel(float u, float v)
{
int width = TextureResolution.x;
int height = TextureResolution.y;
int xCoordinate = floor(u * width);
int yCoordinate = floor(v * height);
return int2(xCoordinate % width, yCoordinate % height);
}
float Falloff(float distance, float radius)
{
return clamp(1.0f - (distance / radius), 0.0, 1.0);
}
#define ATTENUATION_CONSTANT 1.0f // 0% Constant
#define ATTENUATION_LINEAR 0.0f // 100% Linear
#define ATTENUATION_QUADRATIC 0.0f // 100% Quadratic; Democrats are domestic terrorists
float4 PixelShaderMain(PixelShaderArgs pixelShaderArgs) : SV_Target
{
float u = pixelShaderArgs.col.x;
float v = pixelShaderArgs.col.y;
// Lighting
float3 fragColor = float3(0.0f, 0.0f, 0.0f);
float4 diffuse = ShaderTexture.Load(int3(convertUVToPixel(u, v), 0));
for (int i = 0; i < 32; i++)
{
float3 ambient = diffuse * light[i].ambient;
pixelShaderArgs.norm = normalize(pixelShaderArgs.norm);
float3 lightToPixelVec = light[i].pos - pixelShaderArgs.worldPos;
float distance = length(lightToPixelVec);
float luminosity = dot(lightToPixelVec / distance, pixelShaderArgs.norm);
float intensity = 1.00f;
if (luminosity > 0.0f)
{
// Do lighting attenuation
fragColor += luminosity * diffuse * light[i].diffuse * Falloff(distance,light[i].radius);
fragColor /= ATTENUATION_CONSTANT + (ATTENUATION_LINEAR * distance) + (ATTENUATION_QUADRATIC * (distance * distance));
fragColor *= light[i].intensity; // multiply the final result by the intensity.
}
fragColor = saturate(fragColor + ambient);
}
return float4(fragColor, diffuse.a);
}

METAL - Fragment shader is producing noise

I am trying to use basic lighting techniques described in Metal By Tutorials to produce a diffuse, ambient, and specular light on my models.
I have used this lighting algorithm plenty of times and it works great, but the past 2 times the specular color seems to produce a characteristic green and yellow noise. Even without the specular color, the diffuse and ambient colors seem to produce this awful noise.
Any ideas as to why this would be happening?
I wonder if the fact that I do not use a renderer class is causing this problem.
Here is fragment shader code:
fragment float4 fragmentmain(const VOUT in [[stage_in]],
texture2d<float> texture1 [[texture(0)]],
constant FRAGMENTUNIFORMS &data [[buffer(2)]],
constant LIGHT *lights [[buffer(3)]])
{
constexpr sampler texturesampler;
float3 basecolor = texture1.sample(texturesampler, in.coords).rgb;
float3 diffusecolor;
float3 ambientcolor;
float3 specularcolor;
float3 materialspecularcolor = float3(1,1,1);
float shine = 32;
float3 normaldirection = normalize(in.normal);
for (uint i = 0 ; i < data.lightcount ; i++) {
LIGHT light = lights[i];
if (light.type == sun) {
float3 lightdirection = normalize(-light.position);
float diffuseintensity = saturate(-dot(lightdirection, normaldirection));
diffusecolor = light.color * basecolor * diffuseintensity;
if (diffuseintensity > 0) {
float3 reflection = reflect(lightdirection, normaldirection);
float3 cameradirection = normalize(in.position.xyz - data.cameraposition);
float specularintensity = pow(saturate(-dot(reflection, cameradirection)), shine);
specularcolor = light.color * materialspecularcolor * specularintensity;
}
} else if (light.type == ambient) {
ambientcolor = light.color * light.intensity;
}
}
float3 color = diffusecolor + ambientcolor + specularcolor;
return float4(color, 1);
}
Interesting solution to the problem.
I changed:
float3 diffusecolor;
float3 ambientcolor;
float3 specularcolor;
to
float3 diffusecolor = float3(0,0,0);
float3 ambientcolor = float3(0,0,0);
float3 specularcolor = float3(0,0,0);
and the image turned entirely black. No more noise. However, it seemed that my lights were not being iterated through in the for loop.
It turned out that my light.type enum was set to
enum LIGHTTYPE {
sun = 1,
ambient = 2
};
but when I changed it to:
enum LIGHTTYPE {
sun = 0,
ambient = 1
};
the issue was totally resolved. Sorry!

Fog shader camera

I have some difficulties with my vertex-fragment fog shader in Unity. I have a good visual result but the problem is that the gradient is based on the camera's position, it moves as the camera moves. I don't know how to fix it.
Here is the shader code.
struct v2f {
float4 pos : SV_POSITION;
float4 grabUV : TEXCOORD0;
float2 uv_depth : TEXCOORD1;
float4 interpolatedRay : TEXCOORD2;
float4 screenPos : TEXCOORD3;
};
v2f vert(appdata_base v) {
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv_depth = v.texcoord.xy;
o.grabUV = ComputeGrabScreenPos(o.pos);
half index = v.vertex.z;
o.screenPos = ComputeScreenPos(o.pos);
o.interpolatedRay = mul(UNITY_MATRIX_MV, v.vertex);
return o;
}
sampler2D _GrabTexture;
float4 frag(v2f IN) : COLOR {
float3 uv = UNITY_PROJ_COORD(IN.grabUV);
float dpth = UNITY_SAMPLE_DEPTH(tex2Dproj(_CameraDepthTexture, uv));
dpth = LinearEyeDepth(dpth);
float4 wsPos = (IN.screenPos + dpth * IN.interpolatedRay); // Here is the problem but how to fix it
float fogVert = max(0.0, (wsPos.y - _Depth) * (_DepthScale * 0.1f));
fogVert *= fogVert;
fogVert = (exp (-fogVert));
return fogVert;
}
It seems that it's a Matrix problem
o.interpolatedRay = mul(UNITY_MATRIX_MV, v.vertex);

Unity3D - A shader that will clip a texture on iOS device

I'm working with Unity shaders for the first time and I have a problem with making a shader that will draw only some part of a texture. The code below works on mac standalone build, but not on iOS device. How can I fix that?
Shader "Sprite/ClipArea"
{
Properties
{
_MainTex ("Base (RGB), Alpha (A)", 2D) = "white" {}
_Length ("Length", Range(0.0, 1.0)) = 1.0
_Width ("Width", Range(0.0, 1.0)) = 1.0
}
SubShader
{
LOD 200
Tags
{
"Queue" = "Transparent"
"IgnoreProjector" = "True"
"RenderType" = "Transparent"
}
Pass
{
Cull Off
Lighting Off
ZWrite Off
Offset -1, -1
Fog { Mode Off }
ColorMask RGB
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
sampler2D _MainTex;
float4 _MainTex_ST;
float _Length;
float _Width;
struct appdata_t
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
struct v2f
{
float4 vertex : POSITION;
float2 texcoord : TEXCOORD0;
};
v2f vert (appdata_t v)
{
v2f o;
o.vertex = mul(UNITY_MATRIX_MVP, v.vertex);
o.texcoord = v.texcoord;
return o;
}
half4 frag (v2f IN) : COLOR
{
if ((IN.texcoord.x<0) || (IN.texcoord.x>_Width) || (IN.texcoord.y<0) || (IN.texcoord.y>_Length))
{
clip(-1);
}
return tex2D(_MainTex, IN.texcoord);
}
ENDCG
}
}
}
Thanks!
In mobile devices, it is advised not to use clip / discard methods as it is a very expensive operation.
I have updated the fragment shader part without the clip/discard method. Here, instead of clipping, make the portions you don't want to display in transparent color, and for the rest of the portion, render as is.
if ((IN.texcoord.x<0) || (IN.texcoord.x>_Width) || (IN.texcoord.y<0) || (IN.texcoord.y>_Length))
{
half4 colorTransparent = half4(0,0,0,0) ;
return colorTransparent ;
}
else
return tex2D(_MainTex, IN.texcoord) ;

DirectX 10 Light bright at origin?

I'm using Frank Luna's book to learn DirectX 10 but I'm a little confused with some of the lighting I'm getting. I've got a couple of objects, a directional light and a point light that I can move around the scene. My problem is that when I move the point light around, the light moves but gets darker the further it gets from the origin. When at the origin it has intense white light. Why is this, and how can I get it working properly? Thanks.
Here's the code for the point light:
float3 PointLight(SurfaceInfo v, Light L, float3 eyePos)
{
float3 litColor = float3(0.0f, 0.0f, 0.0f);
// The vector from the surface to the light.
float3 lightVec = L.pos - v.pos;
// The distance from surface to light.
float d = length(lightVec);
if( d > L.range )
return float3(0.0f, 0.0f, 0.0f);
// Normalize the light vector.
lightVec /= d;
// Add the ambient light term.
litColor += v.diffuse * L.ambient;
// Add diffuse and specular term, provided the surface is in
// the line of site of the light.
float diffuseFactor = dot(lightVec, v.normal);
[branch]
if( diffuseFactor > 0.0f )
{
float specPower = max(v.spec.a, 1.0f);
float3 toEye = normalize(eyePos - v.pos);
float3 R = reflect(-lightVec, v.normal);
float specFactor = pow(max(dot(R, toEye), 0.0f), specPower);
// diffuse and specular terms
litColor += diffuseFactor * v.diffuse * L.diffuse;
litColor += specFactor * v.spec * L.spec;
}
// attenuate
return litColor / dot(L.att, float3(1.0f, d, d*d));
}
The Effect file:
#include "lighthelper.fx"
#define MaxLights 2
cbuffer cbPerFrame
{
uniform extern Light gLight[MaxLights];
int gLightType;
float3 gEyePosW;
};
bool gSpecularEnabled;
cbuffer cbPerObject
{
float4x4 gWorld;
float4x4 gWVP;
float4x4 gTexMtx;
};
// Nonnumeric values cannot be added to a cbuffer.
Texture2D gDiffuseMap;
Texture2D gSpecMap;
SamplerState gTriLinearSam
{
Filter = MIN_MAG_MIP_LINEAR;
AddressU=Mirror;
AddressV=Mirror;
};
struct VS_IN
{
float3 posL : POSITION;
float3 normalL : NORMAL;
float2 texC : TEXCOORD;
float4 diffuse : DIFFUSE;
float4 spec : SPECULAR;
};
struct VS_OUT
{
float4 posH : SV_POSITION;
float3 posW : POSITION;
float3 normalW : NORMAL;
float2 texC : TEXCOORD;
float4 diffuse : DIFFUSE;
float4 spec : SPECULAR;
};
VS_OUT VS(VS_IN vIn)
{
VS_OUT vOut;
// Transform to world space space.
vOut.posW = mul(float4(vIn.posL, 1.0f), gWorld);
vOut.normalW = mul(float4(vIn.normalL, 0.0f), gWorld);
// Transform to homogeneous clip space.
vOut.posH = mul(float4(vIn.posL, 1.0f), gWVP);
// Output vertex attributes for interpolation across triangle.
vOut.texC = mul(float4(vIn.texC, 0.0f, 1.0f), gTexMtx);
vOut.diffuse = vIn.diffuse;
vOut.spec = vIn.spec;
return vOut;
}
float4 PS(VS_OUT pIn) : SV_Target
{
// Get materials from texture maps.
float4 diffuse = gDiffuseMap.Sample( gTriLinearSam, pIn.texC );
float4 spec = gSpecMap.Sample( gTriLinearSam, pIn.texC );
// Map [0,1] --> [0,256]
spec.a *= 256.0f;
// Interpolating normal can make it not be of unit length so normalize it.
float3 normalW = normalize(pIn.normalW);
// Compute the lit color for this pixel.
SurfaceInfo v = {pIn.posW, normalW, diffuse, spec};
float3 litColor;
for(int i = 0; i < MaxLights; ++i)
{
if( i==0) // Parallel
{
//litColor += ParallelLight(v, gLight[i], gEyePosW);
}
else // Point
{
litColor += PointLight(v, gLight[i], gEyePosW);
}
}
return float4(litColor, diffuse.a);}
technique10 TexTech
{
pass P0
{
SetVertexShader( CompileShader( vs_4_0, VS() ) );
SetGeometryShader( NULL );
SetPixelShader( CompileShader( ps_4_0, PS() ) );
}
}
And the Light defines:
myLight[1].ambient = D3DXCOLOR(0.4f, 0.8f, 0.4f, 1.0f);
myLight[1].diffuse = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f);
myLight[1].specular = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f);
myLight[1].att.x = 0.0f;
myLight[1].att.y = 0.1f;
myLight[1].att.z = 0.0f;
myLight[1].range = 50.0f;

Resources