First of all I'm new to XNA and HLSL so me knowledge is very limited.
I'm writing a small Application to display a digital elevation model consisting of 16Bit values in 2D by using different colors for different height.
The colormapping is done by a Pixelshader via a lookup texture.
At the moment I'm putting the values into red an green components of a texture2D and map them to colors in a 256x256 texture.
As the coloring is discrete/not continously I set minfilter/magfilter to point what leads to a blocky look when zooming in.
Is there a way to get the linear filtering back after the lookup? Or does anybody know a better way to do the mapping?
Shader:
sampler2D tex1 : register(s0) = sampler_state
{
MinFilter = Point;
MagFilter = Point;
MipFilter = linear;
};
texture2D lookupTex;
sampler2D lookup = sampler_state
{
Texture = <lookupTex>;
MinFilter = Point;
MagFilter = Point;
MipFilter = Point;
};
float4 PixelShaderLookup(float4 incol : COLOR, float2 UV : TEXCOORD0) : COLOR0
{
float4 inCol = tex2D(tex1, UV);
half3 scale = (256 - 1.0) / 256;
half3 offset = 1.0 / (2.0 * 256);
float4 outCol = tex2D(lookup, scale * inCol.gr + offset);
return outCol;
}
Thanks for your help and a happy new year :)
Related
I have a question about normal mapping in directx9 shader.
Currently my Terrain shader Output for Normal Map + Diffuse Color only result into this Image.
Which looks good to me.
If i use an empty Normal map image like this one.
My shader output for normal diffuse and color map looks like this.
But if i use 1 including a ColorMap i get a really stange result.
Does anyone have an idea what could cause this issue?
Here is some snippets.
float4 PS_TERRAIN(VSTERRAIN_OUTPUT In) : COLOR0
{
float4 fDiffuseColor;
float lightIntensity;
float3 bumpMap = 2.0f * tex2D( Samp_Bump, In.Tex.xy ).xyz-1.0f;
float3 bumpNormal = (bumpMap.x * In.Tangent) + (bumpMap.y * In.Bitangent) + (bumpMap.z * In.Normal);
bumpNormal = normalize(bumpNormal);
// Direction Light Test ( Test hardcoded )
float3 lightDirection = float3(0.0f, -0.5f, -0.2f);
float3 lightDir = -lightDirection;
// Bump
lightIntensity = saturate(dot( bumpNormal, lightDir));
// We are using a lightmap to do our alpha calculation for given pixel
float4 LightMaptest = tex2D( Samp_Lightmap, In.Tex.zw ) * 2.0f;
fDiffuseColor.a = LightMaptest.a;
if( !bAlpha )
fDiffuseColor.a = 1.0;
// Sample the pixel color from the texture using the sampler at this texture coordinate location.
float4 textureColor = tex2D( Samp_Diffuse, In.Tex.xy );
// Combine the color map value into the texture color.
textureColor = saturate(textureColor * LightMaptest);
textureColor.a = LightMaptest.a;
fDiffuseColor.rgb = saturate(lightIntensity * I_d).rgb;
fDiffuseColor = fDiffuseColor * textureColor; // If i enable this line it goes crazy
return fDiffuseColor;
}
why use sPos.z here to get tescoord?
Out.shadowCrd.x = 0.5 * (sPos.z + sPos.x);
Out.shadowCrd.y = 0.5 * (sPos.z - sPos.y);
Out.shadowCrd.z = 0;
Out.shadowCrd.w = sPos.z;
It is a shader which achieves shadow mapping in "Shaders for Game Programming and Artists".
The first pass render depth texture in light space.( light is camera and watch towards the origin )
The second pass get the depth and calculate the shadow.
Before these codes, the model has already been transformed to light space.
Then the texcoord should be calculated to read depth texture.
But I can't understand the algorithm of calculating the texcoord. Why sPos.z will be here?
Here is the whole vertex shader of the second pass
float distanceScale;
float4 lightPos;
float4 view_position;
float4x4 view_proj_matrix;
float4x4 proj_matrix;
float time_0_X;
struct VS_OUTPUT
{
float4 Pos: POSITION;
float3 normal: TEXCOORD0;
float3 lightVec : TEXCOORD1;
float3 viewVec: TEXCOORD2;
float4 shadowCrd: TEXCOORD3;
};
VS_OUTPUT vs_main(float4 inPos: POSITION, float3 inNormal: NORMAL)
{
VS_OUTPUT Out;
// Animate the light position.
float3 lightPos;
lightPos.x = cos(1.321 * time_0_X);
lightPos.z = sin(0.923 * time_0_X);
lightPos.xz = 100 * normalize(lightPos.xz);
lightPos.y = 100;
// Project the object's position
Out.Pos = mul(view_proj_matrix, inPos);
// World-space lighting
Out.normal = inNormal;
Out.lightVec = distanceScale * (lightPos - inPos.xyz);
Out.viewVec = view_position - inPos.xyz;
// Create view vectors for the light, looking at (0,0,0)
float3 dirZ = -normalize(lightPos);
float3 up = float3(0,0,1);
float3 dirX = cross(up, dirZ);
float3 dirY = cross(dirZ, dirX);
// Transform into light's view space.
float4 pos;
inPos.xyz -= lightPos;
pos.x = dot(dirX, inPos);
pos.y = dot(dirY, inPos);
pos.z = dot(dirZ, inPos);
pos.w = 1;
// Project it into light space to determine she shadow
// map position
float4 sPos = mul(proj_matrix, pos);
// Use projective texturing to map the position of each fragment
// to its corresponding texel in the shadow map.
sPos.z += 10;
Out.shadowCrd.x = 0.5 * (sPos.z + sPos.x);
Out.shadowCrd.y = 0.5 * (sPos.z - sPos.y);
Out.shadowCrd.z = 0;
Out.shadowCrd.w = sPos.z;
return Out;
}
Pixel Shader:
float shadowBias;
float backProjectionCut;
float Ka;
float Kd;
float Ks;
float4 modelColor;
sampler ShadowMap;
sampler SpotLight;
float4 ps_main(
float3 inNormal: TEXCOORD0,
float3 lightVec: TEXCOORD1,
float3 viewVec: TEXCOORD2,
float4 shadowCrd: TEXCOORD3) : COLOR
{
// Normalize the normal
inNormal = normalize(inNormal);
// Radial distance and normalize light vector
float depth = length(lightVec);
lightVec /= depth;
// Standard lighting
float diffuse = saturate(dot(lightVec, inNormal));
float specular = pow(saturate(
dot(reflect(-normalize(viewVec), inNormal), lightVec)),
16);
// The depth of the fragment closest to the light
float shadowMap = tex2Dproj(ShadowMap, shadowCrd);
// A spot image of the spotlight
float spotLight = tex2Dproj(SpotLight, shadowCrd);
// If the depth is larger than the stored depth, this fragment
// is not the closest to the light, that is we are in shadow.
// Otherwise, we're lit. Add a bias to avoid precision issues.
float shadow = (depth < shadowMap + shadowBias);
// Cut back-projection, that is, make sure we don't lit
// anything behind the light.
shadow *= (shadowCrd.w > backProjectionCut);
// Modulate with spotlight image
shadow *= spotLight;
// Shadow any light contribution except ambient
return Ka * modelColor +
(Kd * diffuse * modelColor + Ks * specular) * shadow;
}
really hoping that someone can help me here - I rarely can't resolve bugs in C# since I have a fair amount of experience in it but I don't have a lot to go on with HLSL.
The picture linked to below is of the same model (programmatically generated on run) twice, the first (white) time using BasicEffect and the second time using my custom shader, listed below. The fact that it works with BasicEffect makes me think that it's not an issue with generating the normals for the model or anything like that.
I've included different levels of subdividing to better illustrate the issue. It's worth mentioning that both effects are using the same lighting direction.
https://imagizer.imageshack.us/v2/801x721q90/673/qvXyBk.png
Here's my shader code (feel free to pick it apart, any tips are very welcome):
float4x4 WorldViewProj;
float4x4 NormalRotation = float4x4(1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1);
float4 ModelColor = float4(1, 1, 1, 1);
bool TextureEnabled = false;
Texture ModelTexture;
sampler ColoredTextureSampler = sampler_state
{
texture = <ModelTexture>;
magfilter = LINEAR; minfilter = LINEAR; mipfilter = LINEAR;
AddressU = mirror; AddressV = mirror;
};
float4 AmbientColor = float4(1, 1, 1, 1);
float AmbientIntensity = 0.1;
float3 DiffuseLightDirection = float3(1, 0, 0);
float4 DiffuseColor = float4(1, 1, 1, 1);
float DiffuseIntensity = 1.0;
struct VertexShaderInput
{
float4 Position : POSITION0;
float4 Normal : NORMAL0;
float2 TextureCoordinates : TEXCOORD0;
};
struct VertexShaderOutput
{
float4 Position : POSITION0;
float4 Color : COLOR0;
float2 TextureCoordinates : TEXCOORD0;
};
VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output = (VertexShaderOutput)0;
output.Position = mul(input.Position, WorldViewProj);
float4 normal = mul(input.Normal, NormalRotation);
float lightIntensity = dot(normal, DiffuseLightDirection);
output.Color = saturate(DiffuseColor * DiffuseIntensity * lightIntensity);
output.TextureCoordinates = input.TextureCoordinates;
return output;
}
float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
float4 pixBaseColor = ModelColor;
if (TextureEnabled == true)
{
pixBaseColor = tex2D(ColoredTextureSampler, input.TextureCoordinates);
}
float4 lighting = saturate((input.Color + AmbientColor * AmbientIntensity) * pixBaseColor);
return lighting;
}
technique BestCurrent
{
pass Pass1
{
VertexShader = compile vs_2_0 VertexShaderFunction();
PixelShader = compile ps_2_0 PixelShaderFunction();
}
}
In general, when implementing a lighting equation, there are a few things to ensure:
Normals, light directions, and other directional vectors should be normalized before using them in a dot product. In your case you could add something like:
normal = normalize(normal);
The same should be done for DiffuseLightDirection if it is already not normalized. It already is with your default value, but if your app changes it, it might not be normalized anymore. For that, it would be better to normalize in the application code since it only needs to be done once when it changes, and not per vertex.
Also remember that if you are multiplying the vector by a matrix that contains a scale, the vector will no longer be normalized, so it will need to be re-normalized.
The light direction and the normal must point in the same direction which is out from the surface. Your default light direction is (1,0,0). If you want light to point in the +x direction, then you must actually negate the vector before performing the dot product with the normal so that it is pointing out from the surface just like the normal. If you already take this into account, then it's not a problem.
Vectors can't be translated since they are just a direction not a position. So it is important to ensure when you transform them with a matrix that either the fourth component (w) of the vector is 0 or the matrix you are transforming it with has no translation. Setting w to 0 will zero out any translation from the matrix during the multiply. Since your matrix is called NormalRotation, I'm assuming it only contains a rotation, so this probably isn't an issue.
I just started messing around with shadow mapping. I understand the algorithm used. The thing is I cannot for the life of me figure out where I am messing up in the HLSL code. Here it is:
//These change
float4x4 worldViewProj;
float4x4 world;
texture tex;
//These remain constant
float4x4 lightSpace;
float4x4 lightViewProj;
float4x4 textureBias;
texture shadowMap;
sampler TexS = sampler_state
{
Texture = <tex>;
MinFilter = LINEAR;
MagFilter = LINEAR;
MipFilter = LINEAR;
AddressU = WRAP;
AddressV = WRAP;
};
sampler TexShadow = sampler_state
{
Texture = <shadowMap>;
MinFilter = LINEAR;
MagFilter = LINEAR;
MipFilter = LINEAR;
};
struct A2V
{
float3 posL : POSITION0;
float2 texCo : TEXCOORD0;
};
struct OutputVS
{
float4 posH : POSITION0;
float2 texCo : TEXCOORD0;
float4 posW : TEXCOORD2;
};
//Vertex Shader Depth Pass
OutputVS DepthVS(A2V IN)
{
OutputVS OUT = (OutputVS)0;
//Get screen coordinates in light space for texture map
OUT.posH = mul(float4(IN.posL, 1.f), lightViewProj);
//Get the depth by performing a perspective divide on the projected coordinates
OUT.posW.x = OUT.posH.z/OUT.posH.w;
return OUT;
}
//Pixel shader depth Pass
float4 DepthPS(OutputVS IN) : COLOR
{
//Texture only uses red channel, just store it there
return float4(IN.posW.x, 0, 0, 1);
}
//VertexShader Draw Pass
OutputVS DrawVS(A2V IN)
{
OutputVS OUT = (OutputVS)0;
//Get the screen coordinates for this pixel
OUT.posH = mul(float4(IN.posL, 1.f), worldViewProj);
//Send texture coordinates through
OUT.texCo = IN.texCo;
//Pass its world coordinates through
OUT.posW = mul(float4(IN.posL, 1.f), world);
return OUT;
}
//PixelShader Draw Pass
float4 DrawPS(OutputVS IN) : COLOR
{
//Get the pixels screen position in light space
float4 texCoord = mul(IN.posW, lightViewProj);
//Perform perspective divide to normalize coordinates [-1,1]
//texCoord.x = texCoord.x/texCoord.w;
//texCoord.y = texCoord.y/texCoord.w;
//Multiply by texture bias to bring in range 0-1
texCoord = mul(texCoord, textureBias);
//Get corresponding depth value
float prevDepth = tex2D(TexShadow, texCoord.xy);
//Check if it is in shadow
float4 posLight = mul(IN.posW, lightViewProj);
float currDepth = posLight.z/posLight.w;
if (currDepth >= prevDepth)
return float4(0.f, 0.f, 0.f, 1.f);
else
return tex2D(TexS, IN.texCo);
}
//Effect info
technique ShadowMap
{
pass p0
{
vertexShader = compile vs_2_0 DepthVS();
pixelShader = compile ps_2_0 DepthPS();
}
pass p1
{
vertexShader = compile vs_2_0 DrawVS();
pixelShader = compile ps_2_0 DrawPS();
}
}
I have verified that all my matrices are correct and the depth map is being drawn correctly. Rewrote all of the C++ that handles this code and made it neater and I am still getting the same problem. I am not currently blending the shadows, just drawing them flat black until I can get them to draw correctly. The light uses an orthogonal projection because it is a directional light. I dont have enough reputation points to embed images but here are the URLs: Depth Map - http://i.imgur.com/T2nITid.png
Program output - http://i.imgur.com/ae3U3N0.png
Any help or insight would be greatly appreciated as its for a school project. Thanks
The value you get from the depth buffer is float value that is from 0 to 1. As you probably already know, floating points are not accurate and the more decimal places you request the less accurate it is and this is where you end up with artifacts.
There are some things you can do. The easiest way is to make the value of the far and near Z in the projection matrix closer to each other so that the depth buffer will not use so many decimal places to represent how far away the object is. I usually find that having a value of 1-200 gives me a fairly good accurate result.
Another easy thing you can do is increase the size of the texture you are drawing on as that will give you more pixels and therefore it will represent the scene more accurately.
There are also a lot of complex things that games engines can do to improve on shadow mapping artifacts but you can write a book about that and if you really do want to get into it than I would recommended you start with the blog.
I am trying to build an infinite fog shader. This fog is applied on a 3D plane.
For the moment I have a Z-Depth Fog. And I encounter some issues.
As you can see in the screenshot, there are two views.
The green color is my 3D plane. The problem is in the red line. It seems that the this line depends of my camera which is not good because when I rotate my camera the line is affected by my camera position and rotation.
I don't know where does it comes from and how to have my fog limit not based on the camera position.
Shader
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform float4 _FogColor;
uniform sampler2D _CameraDepthTexture;
float _Depth;
float _DepthScale;
struct v2f {
float4 pos : SV_POSITION;
float4 projection : TEXCOORD0;
float4 screenPosition : TEXCOORD1;
};
v2f vert(appdata_base v) {
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
// o.projection = ComputeGrabScreenPos(o.pos);
float4 position = o.pos;
#if UNITY_UV_STARTS_AT_TOP
float scale = -1.0;
#else
float scale = 1.0;
#endif
float4 p = position * 0.5f;
p.xy = float2(p.x, p.y * scale) + p.w;
p.zw = position.zw;
o.projection = p;
// o.screenPosition = ComputeScreenPos(o.pos);
position = o.pos;
float4 q = position * 0.5f;
#if defined(UNITY_HALF_TEXEL_OFFSET)
q.xy = float2(q.x, q.y * _ProjectionParams.x) + q.w * _ScreenParams.zw;
#else
q.xy = float2(q.x, q.y * _ProjectionParams.x) + q.w;
#endif
#if defined(SHADER_API_FLASH)
q.xy *= unity_NPOTScale.xy;
#endif
q.zw = position.zw;
q.zw = 1.0f;
o.screenPosition = q;
return o;
}
sampler2D _GrabTexture;
float4 frag(v2f IN) : COLOR {
float3 uv = UNITY_PROJ_COORD(IN.projection);
float depth = UNITY_SAMPLE_DEPTH(tex2Dproj(_CameraDepthTexture, uv));
depth = LinearEyeDepth(depth);
return saturate((depth - IN.screenPosition.w + _Depth) * _DepthScale);
}
ENDCG
}
Next I want to rotate my Fog to have an Y-Depth Fog but I don't know how to achieve this effect.
I see two ways to acheive what you want:
is to render depth of your plane to texture and calculate fog based on difference of depth of plane and depth of object, 0 if obj depth is less and (objDepth - planeDepth) * scale if it is bigger)
Is to instead of rendering to texture calculate distance to plane in shader and use it directly.
I am not sure what you do since I am not very familiar with Unity surface shaders, but djudging from the code and result something different.
It seems that this is caused by _CameraDepthTexture, that's why depth is calculated with the camera position.
But I don't know how to correct it... It seems that there is no way to get the depth from another point. Any idea ?
Here is another example. In green You can "see" the object and the blue line is for me the fog as it should be.