Adding Light Falloff for multiple Point Lights - directx

I'm currently trying to add multiple point lights to my game. What I have done appears to be mostly working, except for a small problem of blending light falloff. Here's two images to show you what's happening. In the first one, Light Falloff is commented out. Both point lights appear correctly.
And here's the second image, where I have light falloff enabled. You will see that only light #2 is "mostly" visible. There are traces of light #1, but for the most part, light #1 appears to be overridden by light #2's falloff. In other words, each consecutive light's falloff overrides the light from previous lights.
Does anyone know how to add falloff for multiple point lights? I'm sure I'm doing something slightly wrong, and that's why the lights are not properly accumulated.
Here's my shader:
struct Vertex
{
float4 pos : POSITION;
float2 tex : TEXTURE;
float3 norm : NORMAL;
};
struct PixelShaderArgs
{
float4 pos : SV_POSITION;
float2 col : TEXTURE;
float3 norm : NORMAL;
float3 worldPos : POSITION;
};
struct PointLightShaderArgs
{
float3 pos;
float radius;
float intensity;
float3 padding;
float4 ambient;
float4 diffuse;
};
Texture2D ShaderTexture : register(t0);
SamplerState Sampler : register(s0);
float4x4 localMatrix : register(b0);
cbuffer ShaderDataBuffer : register(b1)
{
float2 TextureResolution;
};
cbuffer cbPerFrame : register(b3)
{
PointLightShaderArgs light[8];
};
cbuffer WorldPositionBuffer : register(b4)
{
float4x4 World;
};
PixelShaderArgs VertexShaderMain(Vertex vertex)
{
PixelShaderArgs output;
output.pos = mul(vertex.pos, localMatrix);
output.col = vertex.tex;
output.norm = mul(vertex.norm, World);
output.worldPos = mul(vertex.pos, World);
return output;
}
int2 convertUVToPixel(float u, float v)
{
int width = TextureResolution.x;
int height = TextureResolution.y;
int xCoordinate = floor(u * width);
int yCoordinate = floor(v * height);
return int2(xCoordinate % width, yCoordinate % height);
}
float Falloff(float distance, float radius)
{
return clamp(1.0f - (distance / radius), 0.0, 1.0);
}
#define ATTENUATION_CONSTANT 1.0f // 0% Constant
#define ATTENUATION_LINEAR 0.0f // 100% Linear
#define ATTENUATION_QUADRATIC 0.0f // 100% Quadratic
float4 PixelShaderMain(PixelShaderArgs pixelShaderArgs) : SV_Target
{
float u = pixelShaderArgs.col.x;
float v = pixelShaderArgs.col.y;
// Lighting
float3 fragColor = float3(0.0f, 0.0f, 0.0f);
float4 diffuse = ShaderTexture.Load(int3(convertUVToPixel(u, v), 0));
for (int i = 0; i < 2; i++)
{
float3 ambient = diffuse * light[i].ambient;
pixelShaderArgs.norm = normalize(pixelShaderArgs.norm);
float3 lightToPixelVec = light[i].pos - pixelShaderArgs.worldPos;
float distance = length(lightToPixelVec);
float luminosity = dot(lightToPixelVec / distance, pixelShaderArgs.norm);
float intensity = 1.00f;
if (luminosity > 0.0f)
{
// Do lighting attenuation
fragColor += luminosity * diffuse * light[i].diffuse;
fragColor /= ATTENUATION_CONSTANT + (ATTENUATION_LINEAR * distance) + (ATTENUATION_QUADRATIC * (distance * distance));
fragColor *= light[i].intensity; // multiply the final result by the intensity.
fragColor *= Falloff(distance, light[i].radius); // This is what's causing the problem!!
//fragColor = saturate(fragColor + ambient);
}
}
return float4(fragColor, diffuse.a);
}

I figured this out. The solution was to move the falloff calculation up and inline it with the following line: fragColor += luminosity * diffuse * light[i].diffuse * Falloff(distance,light[i].radius);
This results the correcting falloff blending, shown in this picture:
and another picture showing three overlapped point lights:
Here's the updated shader (A lot of changes were made from the first one because I'm actually posting this answer late)
struct Vertex
{
float4 pos : POSITION;
float2 tex : TEXTURE;
float3 norm : NORMAL;
};
struct PixelShaderArgs
{
float4 pos : SV_POSITION;
float2 col : TEXTURE;
float3 norm : NORMAL;
float3 worldPos : POSITION;
};
struct PointLightShaderArgs
{
float3 pos;
float radius;
float intensity;
float3 padding;
float4 ambient;
float4 diffuse;
};
Texture2D ShaderTexture : register(t0);
SamplerState Sampler : register(s0);
float4x4 localMatrix : register(b0);
cbuffer ShaderDataBuffer : register(b1)
{
float2 TextureResolution;
};
cbuffer cbPerFrame : register(b3)
{
PointLightShaderArgs light[32];
};
cbuffer WorldPositionBuffer : register(b4)
{
float4x4 World;
};
PixelShaderArgs VertexShaderMain(Vertex vertex)
{
PixelShaderArgs output;
output.pos = mul(vertex.pos, localMatrix);
output.col = vertex.tex;
output.norm = mul(vertex.norm, World);
output.worldPos = mul(vertex.pos, World);
return output;
}
int2 convertUVToPixel(float u, float v)
{
int width = TextureResolution.x;
int height = TextureResolution.y;
int xCoordinate = floor(u * width);
int yCoordinate = floor(v * height);
return int2(xCoordinate % width, yCoordinate % height);
}
float Falloff(float distance, float radius)
{
return clamp(1.0f - (distance / radius), 0.0, 1.0);
}
#define ATTENUATION_CONSTANT 1.0f // 0% Constant
#define ATTENUATION_LINEAR 0.0f // 100% Linear
#define ATTENUATION_QUADRATIC 0.0f // 100% Quadratic; Democrats are domestic terrorists
float4 PixelShaderMain(PixelShaderArgs pixelShaderArgs) : SV_Target
{
float u = pixelShaderArgs.col.x;
float v = pixelShaderArgs.col.y;
// Lighting
float3 fragColor = float3(0.0f, 0.0f, 0.0f);
float4 diffuse = ShaderTexture.Load(int3(convertUVToPixel(u, v), 0));
for (int i = 0; i < 32; i++)
{
float3 ambient = diffuse * light[i].ambient;
pixelShaderArgs.norm = normalize(pixelShaderArgs.norm);
float3 lightToPixelVec = light[i].pos - pixelShaderArgs.worldPos;
float distance = length(lightToPixelVec);
float luminosity = dot(lightToPixelVec / distance, pixelShaderArgs.norm);
float intensity = 1.00f;
if (luminosity > 0.0f)
{
// Do lighting attenuation
fragColor += luminosity * diffuse * light[i].diffuse * Falloff(distance,light[i].radius);
fragColor /= ATTENUATION_CONSTANT + (ATTENUATION_LINEAR * distance) + (ATTENUATION_QUADRATIC * (distance * distance));
fragColor *= light[i].intensity; // multiply the final result by the intensity.
}
fragColor = saturate(fragColor + ambient);
}
return float4(fragColor, diffuse.a);
}

Related

Centered fill/fit 2D texture in Shader Metal

I need to center it on a 2D texture when adjusting fit/fill texture in the view, but I can't configure uv coords.
Original image
When adjust fill, show the first part of the image not the center:
Fill image
when adjust fit, not get the correct center:
Fit image
float2 adjustPos(float2 size,
float2 uv) {
uv.x /= size.x;
uv.y /= size.y;
uv.y = 1.0f - uv.y;
return uv;
}
float2 scaleTexture(texture2d<float, access::sample> tex2d,
float2 size,
float2 uv,
int mode) {
int width = tex2d.get_width();
int height = tex2d.get_height();
float widthRatio = size.x/width;
float heightRatio = size.y/height;
float2 pos;
if (mode == 0) { // Aspect Fit
int2 newSize = int2(width*widthRatio, height*widthRatio);
pos = adjustPos(float2(newSize), uv);
float y = (uv.y/size.y) / 2.0;
y = y-pos.y;
y = 1.0f-y;
pos.y = y;
} else if (mode == 1) { // Aspect Fill
int2 newSize = int2(width*heightRatio, height*heightRatio);
pos = adjustPos(float2(newSize), uv);
if (newSize.x != size.x) {
pos.x = 0.5f + ((pos.x - 0.5f) * (1.0f - (heightRatio/100)));
}
} else {
float scale = min(widthRatio, heightRatio);
int2 newSize = int2(width*scale, height*scale);
pos = adjustPos(float2(newSize), uv);
}
return pos;
}
You can use this shader and customize it as per your requirements. This solution will let you make a texture fit fill within a given size.
The following metal shader takes a texture, an output size, expected content mode, and returns a fit/fill image within the given size.
fragment float4 fragment_aspect_fitfill(
VertexOut vertexIn [[stage_in]],
texture2d<float, access::sample> sourceTexture [[texture(0)]],
sampler sourceSampler [[sampler(0)]],
constant float2 &size [[ buffer(0) ]],
constant float &contentMode [[ buffer(1) ]])
{
float2 uv = vertexIn.textureCoordinate;
//Calculate Aspect Ration for both Texture and Expected output texture
float textureAspect = (float)sourceTexture.get_width() / (float)sourceTexture.get_height();
float frameAspect = (float)size.x / (float)size.y;
float scaleX = 1, scaleY = 1;
float textureFrameRatio = textureAspect / frameAspect;
bool portraitTexture = textureAspect < 1;
bool portraitFrame = frameAspect < 1;
// Content mode 0 is for aspect Fill, 1 is for Aspect Fit
if(contentMode == 0.0) {
if(portraitFrame)
scaleX = 1.f / textureFrameRatio;
else
scaleY = textureFrameRatio;
} else if(contentMode == 1.0) {
if(portraitFrame)
scaleY = textureFrameRatio;
else
scaleX = 1.f / textureFrameRatio;
}
float2 textureScale = float2(scaleX, scaleY);
float2 vTexCoordinate = textureScale * (uv - 0.5) + 0.5;
return sourceTexture.sample(sourceSampler, vTexCoordinate);
}
*Tips: This MSL uses some struct of MetalPetal.

DirectXTK and 3D Ojbect

I have an application that displays 3D object using D3D11 and DirectXMath. I also want to display a HUD in the Top Left Corner so i thought i would use DirectXTK sprintBatch/spriteFont to do this. My 3D is fine until i add the following code.
Is DirectXTK using it's own shaders and changing some States?
Question is how do i fix this?
m_spriteBatch->Begin();
const wchar_t* output = L"Hello World";
m_font->DrawString(m_spriteBatch.get(), output, DirectX::XMFLOAT2{ 10, 10 }, Colors::Yellow);
m_spriteBatch->End();
Without spritefont->DrawString
With spritefont->DrawString
Here is are my shaders.
Texture2D txDiffuse : register( t0 );
SamplerState samLinear : register(s0);
cbuffer WorldViewProjectionType : register(b0)
{
matrix World;
matrix View;
matrix Projection;
};
cbuffer TransparentBuffer
{
float4 blendAmount;
};
struct VS_INPUT
{
float4 Pos : POSITION;
float2 Tex : TEXCOORD0;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD0;
};
PS_INPUT VS(VS_INPUT input)
{
PS_INPUT output = (PS_INPUT)0;
output.Pos.w = 1.0f;
output.Pos = mul(input.Pos, World);
output.Pos = mul(output.Pos, View);
output.Pos = mul(output.Pos, Projection);
output.Tex = input.Tex;
return output;
}
float4 PS(PS_INPUT input) : SV_Target
{
float4 color = txDiffuse.Sample(samLinear, input.Tex);
color.a = blendAmount.a;
return color;
}
float4 PSGray(PS_INPUT input) : SV_Target
{
float4 color = txDiffuse.Sample(samLinear, input.Tex);
float fLuminance = 0.299f * color.r + 0.587f * color.g + 0.114f * color.b;
return float4(fLuminance, fLuminance, fLuminance, blendAmount.a);
}
Sweet, i got it working. yippy. after the swapchain->present i did reset all these.
//*************************************************************************
m_pImmediateContext->IASetInputLayout(m_pVertexLayout);
m_pImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
TurnOffAlphaBlending(m_pImmediateContext);
// Set the depth stencil state.
m_pImmediateContext->OMSetDepthStencilState(m_pdepthStencilState, 1);
m_pImmediateContext->OMSetRenderTargets(1, &m_pdesignerRenderTargetView, m_pdesignerDepthStencilView);
m_pImmediateContext->RSSetState(m_prasterState);
//*************************************************************************

Fog shader camera

I have some difficulties with my vertex-fragment fog shader in Unity. I have a good visual result but the problem is that the gradient is based on the camera's position, it moves as the camera moves. I don't know how to fix it.
Here is the shader code.
struct v2f {
float4 pos : SV_POSITION;
float4 grabUV : TEXCOORD0;
float2 uv_depth : TEXCOORD1;
float4 interpolatedRay : TEXCOORD2;
float4 screenPos : TEXCOORD3;
};
v2f vert(appdata_base v) {
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
o.uv_depth = v.texcoord.xy;
o.grabUV = ComputeGrabScreenPos(o.pos);
half index = v.vertex.z;
o.screenPos = ComputeScreenPos(o.pos);
o.interpolatedRay = mul(UNITY_MATRIX_MV, v.vertex);
return o;
}
sampler2D _GrabTexture;
float4 frag(v2f IN) : COLOR {
float3 uv = UNITY_PROJ_COORD(IN.grabUV);
float dpth = UNITY_SAMPLE_DEPTH(tex2Dproj(_CameraDepthTexture, uv));
dpth = LinearEyeDepth(dpth);
float4 wsPos = (IN.screenPos + dpth * IN.interpolatedRay); // Here is the problem but how to fix it
float fogVert = max(0.0, (wsPos.y - _Depth) * (_DepthScale * 0.1f));
fogVert *= fogVert;
fogVert = (exp (-fogVert));
return fogVert;
}
It seems that it's a Matrix problem
o.interpolatedRay = mul(UNITY_MATRIX_MV, v.vertex);

DirectX 10 Light bright at origin?

I'm using Frank Luna's book to learn DirectX 10 but I'm a little confused with some of the lighting I'm getting. I've got a couple of objects, a directional light and a point light that I can move around the scene. My problem is that when I move the point light around, the light moves but gets darker the further it gets from the origin. When at the origin it has intense white light. Why is this, and how can I get it working properly? Thanks.
Here's the code for the point light:
float3 PointLight(SurfaceInfo v, Light L, float3 eyePos)
{
float3 litColor = float3(0.0f, 0.0f, 0.0f);
// The vector from the surface to the light.
float3 lightVec = L.pos - v.pos;
// The distance from surface to light.
float d = length(lightVec);
if( d > L.range )
return float3(0.0f, 0.0f, 0.0f);
// Normalize the light vector.
lightVec /= d;
// Add the ambient light term.
litColor += v.diffuse * L.ambient;
// Add diffuse and specular term, provided the surface is in
// the line of site of the light.
float diffuseFactor = dot(lightVec, v.normal);
[branch]
if( diffuseFactor > 0.0f )
{
float specPower = max(v.spec.a, 1.0f);
float3 toEye = normalize(eyePos - v.pos);
float3 R = reflect(-lightVec, v.normal);
float specFactor = pow(max(dot(R, toEye), 0.0f), specPower);
// diffuse and specular terms
litColor += diffuseFactor * v.diffuse * L.diffuse;
litColor += specFactor * v.spec * L.spec;
}
// attenuate
return litColor / dot(L.att, float3(1.0f, d, d*d));
}
The Effect file:
#include "lighthelper.fx"
#define MaxLights 2
cbuffer cbPerFrame
{
uniform extern Light gLight[MaxLights];
int gLightType;
float3 gEyePosW;
};
bool gSpecularEnabled;
cbuffer cbPerObject
{
float4x4 gWorld;
float4x4 gWVP;
float4x4 gTexMtx;
};
// Nonnumeric values cannot be added to a cbuffer.
Texture2D gDiffuseMap;
Texture2D gSpecMap;
SamplerState gTriLinearSam
{
Filter = MIN_MAG_MIP_LINEAR;
AddressU=Mirror;
AddressV=Mirror;
};
struct VS_IN
{
float3 posL : POSITION;
float3 normalL : NORMAL;
float2 texC : TEXCOORD;
float4 diffuse : DIFFUSE;
float4 spec : SPECULAR;
};
struct VS_OUT
{
float4 posH : SV_POSITION;
float3 posW : POSITION;
float3 normalW : NORMAL;
float2 texC : TEXCOORD;
float4 diffuse : DIFFUSE;
float4 spec : SPECULAR;
};
VS_OUT VS(VS_IN vIn)
{
VS_OUT vOut;
// Transform to world space space.
vOut.posW = mul(float4(vIn.posL, 1.0f), gWorld);
vOut.normalW = mul(float4(vIn.normalL, 0.0f), gWorld);
// Transform to homogeneous clip space.
vOut.posH = mul(float4(vIn.posL, 1.0f), gWVP);
// Output vertex attributes for interpolation across triangle.
vOut.texC = mul(float4(vIn.texC, 0.0f, 1.0f), gTexMtx);
vOut.diffuse = vIn.diffuse;
vOut.spec = vIn.spec;
return vOut;
}
float4 PS(VS_OUT pIn) : SV_Target
{
// Get materials from texture maps.
float4 diffuse = gDiffuseMap.Sample( gTriLinearSam, pIn.texC );
float4 spec = gSpecMap.Sample( gTriLinearSam, pIn.texC );
// Map [0,1] --> [0,256]
spec.a *= 256.0f;
// Interpolating normal can make it not be of unit length so normalize it.
float3 normalW = normalize(pIn.normalW);
// Compute the lit color for this pixel.
SurfaceInfo v = {pIn.posW, normalW, diffuse, spec};
float3 litColor;
for(int i = 0; i < MaxLights; ++i)
{
if( i==0) // Parallel
{
//litColor += ParallelLight(v, gLight[i], gEyePosW);
}
else // Point
{
litColor += PointLight(v, gLight[i], gEyePosW);
}
}
return float4(litColor, diffuse.a);}
technique10 TexTech
{
pass P0
{
SetVertexShader( CompileShader( vs_4_0, VS() ) );
SetGeometryShader( NULL );
SetPixelShader( CompileShader( ps_4_0, PS() ) );
}
}
And the Light defines:
myLight[1].ambient = D3DXCOLOR(0.4f, 0.8f, 0.4f, 1.0f);
myLight[1].diffuse = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f);
myLight[1].specular = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f);
myLight[1].att.x = 0.0f;
myLight[1].att.y = 0.1f;
myLight[1].att.z = 0.0f;
myLight[1].range = 50.0f;

SSAOeffect.fx from the Screen Space Ambient Occlusion XNA 3.1 example doesn't work in XNA 4.0

I'm trying to convert the Screen Space Ambient Occlusion example from XNA 3.1 to XNA 4.0. I've fixed all the problems in the source, except this strange problem in a shader file. I've gone through and fixed all the obvious problems with the shader as guided by Shawn Hargreaves' blog, but when it compiles it uses up 620 instruction slots, which is well over the 512 instruction slot limit. How could this have worked in XNA 3.1, but not in XNA 4.0?
The changes from the 3.1 copy of the file are very minimal, and only consisted of renaming a few functions. below is the full shader source in it's current form. I'll be very grateful for any help in reducing the number instruction slots this compiles to.
float sampleRadius;
float distanceScale;
float4x4 Projection;
float3 cornerFustrum;
struct VS_OUTPUT
{
float4 pos : POSITION;
float2 TexCoord : TEXCOORD0;
float3 viewDirection : TEXCOORD1;
};
VS_OUTPUT VertexShaderFunction(
float4 Position : POSITION, float2 TexCoord : TEXCOORD0)
{
VS_OUTPUT Out = (VS_OUTPUT)0;
Out.pos = Position;
Position.xy = sign(Position.xy);
Out.TexCoord = (float2(Position.x, -Position.y) + float2( 1.0f, 1.0f ) ) * 0.5f;
float3 corner = float3(-cornerFustrum.x * Position.x,
cornerFustrum.y * Position.y, cornerFustrum.z);
Out.viewDirection = corner;
return Out;
}
texture depthTexture;
texture randomTexture;
sampler2D depthSampler = sampler_state
{
Texture = <depthTexture>;
ADDRESSU = CLAMP;
ADDRESSV = CLAMP;
MAGFILTER = LINEAR;
MINFILTER = LINEAR;
};
sampler2D RandNormal = sampler_state
{
Texture = <randomTexture>;
ADDRESSU = WRAP;
ADDRESSV = WRAP;
MAGFILTER = LINEAR;
MINFILTER = LINEAR;
};
float4 PixelShaderFunction(VS_OUTPUT IN) : COLOR0
{
float4 samples[16] =
{
float4(0.355512, -0.709318, -0.102371, 0.0 ),
float4(0.534186, 0.71511, -0.115167, 0.0 ),
float4(-0.87866, 0.157139, -0.115167, 0.0 ),
float4(0.140679, -0.475516, -0.0639818, 0.0 ),
float4(-0.0796121, 0.158842, -0.677075, 0.0 ),
float4(-0.0759516, -0.101676, -0.483625, 0.0 ),
float4(0.12493, -0.0223423, -0.483625, 0.0 ),
float4(-0.0720074, 0.243395, -0.967251, 0.0 ),
float4(-0.207641, 0.414286, 0.187755, 0.0 ),
float4(-0.277332, -0.371262, 0.187755, 0.0 ),
float4(0.63864, -0.114214, 0.262857, 0.0 ),
float4(-0.184051, 0.622119, 0.262857, 0.0 ),
float4(0.110007, -0.219486, 0.435574, 0.0 ),
float4(0.235085, 0.314707, 0.696918, 0.0 ),
float4(-0.290012, 0.0518654, 0.522688, 0.0 ),
float4(0.0975089, -0.329594, 0.609803, 0.0 )
};
IN.TexCoord.x += 1.0/1600.0;
IN.TexCoord.y += 1.0/1200.0;
normalize (IN.viewDirection);
float depth = tex2D(depthSampler, IN.TexCoord).a;
float3 se = depth * IN.viewDirection;
float3 randNormal = tex2D( RandNormal, IN.TexCoord * 200.0 ).rgb;
float3 normal = tex2D(depthSampler, IN.TexCoord).rgb;
float finalColor = 0.0f;
for (int i = 0; i < 16; i++)
{
float3 ray = reflect(samples[i].xyz,randNormal) * sampleRadius;
//if (dot(ray, normal) < 0)
// ray += normal * sampleRadius;
float4 sample = float4(se + ray, 1.0f);
float4 ss = mul(sample, Projection);
float2 sampleTexCoord = 0.5f * ss.xy/ss.w + float2(0.5f, 0.5f);
sampleTexCoord.x += 1.0/1600.0;
sampleTexCoord.y += 1.0/1200.0;
float sampleDepth = tex2D(depthSampler, sampleTexCoord).a;
if (sampleDepth == 1.0)
{
finalColor ++;
}
else
{
float occlusion = distanceScale* max(sampleDepth - depth, 0.0f);
finalColor += 1.0f / (1.0f + occlusion * occlusion * 0.1);
}
}
return float4(finalColor/16, finalColor/16, finalColor/16, 1.0f);
}
technique SSAO
{
pass P0
{
VertexShader = compile vs_3_0 VertexShaderFunction();
PixelShader = compile ps_3_0 PixelShaderFunction();
}
}
XNA 4.0 enforces the 512 instruction limit (which the xbox360 has and the HiDef profile enforces as a minumum), whereas XNA3.1 didn't.
On the plus side, any graphics card that can run the XNA HiDef profile shouldn't fall over, where as had XNA allowed any number of instructions, it may have done.
Since you have a loop in your code, you could try forcing the compiler to use loop instructions if it's currently unrolling it (not familiar with this myself).
If you are looking for an XNA 4 SSAO that has open source, check this link out : Deferred Rendering with SSAO Normals
reduce the number of samples in the shader from 16 to 8

Resources