I have the following fragment and vertex shaders.
HLSL code
`
// Vertex shader
//-----------------------------------------------------------------------------------
void mainVP(
float4 position : POSITION,
out float4 outPos : POSITION,
out float2 outDepth : TEXCOORD0,
uniform float4x4 worldViewProj,
uniform float4 texelOffsets,
uniform float4 depthRange) //Passed as float4(minDepth, maxDepth,depthRange,1 / depthRange)
{
outPos = mul(worldViewProj, position);
outPos.xy += texelOffsets.zw * outPos.w;
outDepth.x = (outPos.z - depthRange.x)*depthRange.w;//value [0..1]
outDepth.y = outPos.w;
}
// Fragment shader
void mainFP( float2 depth: TEXCOORD0, out float4 result : COLOR) {
float finalDepth = depth.x;
result = float4(finalDepth, finalDepth, finalDepth, 1);
}
`
This shader produces a depth map.
This depth map must then be used to reconstruct the world positions for the depth values. I have searched other posts but none of them seem to store the depth using the same formula I am using. The only similar post is the following
Reconstructing world position from linear depth
Therefore, I am having a hard time reconstructing the point using the x and y coordinates from the depth map and the corresponding depth.
I need some help in constructing the shader to get the world view position for a depth at particular texture coordinates.
It doesn't look like you're normalizing your depth. Try this instead. In your VS, do:
outDepth.xy = outPos.zw;
And in your PS to render the depth, you can do:
float finalDepth = depth.x / depth.y;
Here is a function to then extract the view-space position of a particular pixel from your depth texture. I'm assuming you're rendering screen aligned quad and performing your position-extraction in the pixel shader.
// Function for converting depth to view-space position
// in deferred pixel shader pass. vTexCoord is a texture
// coordinate for a full-screen quad, such that x=0 is the
// left of the screen, and y=0 is the top of the screen.
float3 VSPositionFromDepth(float2 vTexCoord)
{
// Get the depth value for this pixel
float z = tex2D(DepthSampler, vTexCoord);
// Get x/w and y/w from the viewport position
float x = vTexCoord.x * 2 - 1;
float y = (1 - vTexCoord.y) * 2 - 1;
float4 vProjectedPos = float4(x, y, z, 1.0f);
// Transform by the inverse projection matrix
float4 vPositionVS = mul(vProjectedPos, g_matInvProjection);
// Divide by w to get the view-space position
return vPositionVS.xyz / vPositionVS.w;
}
For a more advanced approach that reduces the number of calculation involved but involves using the view frustum and a special way of rendering the screen-aligned quad, see here.
Related
I'm trying to port my engine to DirectX and I'm currently having issues with depth reconstruction. It works perfectly in OpenGL (even though I use a bit of an expensive method). Every part besides the depth reconstruction works so far. I use GLM because it's a good math library that has no need to install any dependencies or anything for the user.
So basically I get my GLM matrices:
struct DefferedUBO {
glm::mat4 view;
glm::mat4 invProj;
glm::vec4 eyePos;
glm::vec4 resolution;
};
DefferedUBO deffUBOBuffer;
// ...
glm::mat4 projection = glm::perspective(engine.settings.fov, aspectRatio, 0.1f, 100.0f);
// Get My Camera
CTransform *transform = &engine.transformSystem.components[engine.entities[entityID].components[COMPONENT_TRANSFORM]];
// Get the View Matrix
glm::mat4 view = glm::lookAt(
transform->GetPosition(),
transform->GetPosition() + transform->GetForward(),
transform->GetUp()
);
deffUBOBuffer.invProj = glm::inverse(projection);
deffUBOBuffer.view = glm::inverse(view);
if (engine.settings.graphicsLanguage == GRAPHICS_DIRECTX) {
deffUBOBuffer.invProj = glm::transpose(deffUBOBuffer.invProj);
deffUBOBuffer.view = glm::transpose(deffUBOBuffer.view);
}
// Abstracted so I can use OGL, DX, VK, or even Metal when I get around to it.
deffUBO->UpdateUniformBuffer(&deffUBOBuffer);
deffUBO->Bind());
Then in HLSL, I simply use the following:
cbuffer MatrixInfoType {
matrix invView;
matrix invProj;
float4 eyePos;
float4 resolution;
};
float4 ViewPosFromDepth(float depth, float2 TexCoord) {
float z = depth; // * 2.0 - 1.0;
float4 clipSpacePosition = float4(TexCoord * 2.0 - 1.0, z, 1.0);
float4 viewSpacePosition = mul(invProj, clipSpacePosition);
viewSpacePosition /= viewSpacePosition.w;
return viewSpacePosition;
}
float3 WorldPosFromViewPos(float4 view) {
float4 worldSpacePosition = mul(invView, view);
return worldSpacePosition.xyz;
}
float3 WorldPosFromDepth(float depth, float2 TexCoord) {
return WorldPosFromViewPos(ViewPosFromDepth(depth, TexCoord));
}
// ...
// Sample the hardware depth buffer.
float depth = shaderTexture[3].Sample(SampleType[0], input.texCoord).r;
float3 position = WorldPosFromDepth(depth, input.texCoord).rgb;
Here's the result:
This just looks like random colors multiplied with the depth.
Ironically when I remove transposing, I get something closer to the truth, but not quite:
You're looking at Crytek Sponza. As you can see, the green area moves and rotates with the bottom of the camera. I have no idea at all why.
The correct version, along with Albedo, Specular, and Normals.
I fixed my problem at gamedev.net. There was a matrix majorness issue as well as a depth handling issue.
https://www.gamedev.net/forums/topic/692095-d3d-glm-depth-reconstruction-issues
I just started messing around with shadow mapping. I understand the algorithm used. The thing is I cannot for the life of me figure out where I am messing up in the HLSL code. Here it is:
//These change
float4x4 worldViewProj;
float4x4 world;
texture tex;
//These remain constant
float4x4 lightSpace;
float4x4 lightViewProj;
float4x4 textureBias;
texture shadowMap;
sampler TexS = sampler_state
{
Texture = <tex>;
MinFilter = LINEAR;
MagFilter = LINEAR;
MipFilter = LINEAR;
AddressU = WRAP;
AddressV = WRAP;
};
sampler TexShadow = sampler_state
{
Texture = <shadowMap>;
MinFilter = LINEAR;
MagFilter = LINEAR;
MipFilter = LINEAR;
};
struct A2V
{
float3 posL : POSITION0;
float2 texCo : TEXCOORD0;
};
struct OutputVS
{
float4 posH : POSITION0;
float2 texCo : TEXCOORD0;
float4 posW : TEXCOORD2;
};
//Vertex Shader Depth Pass
OutputVS DepthVS(A2V IN)
{
OutputVS OUT = (OutputVS)0;
//Get screen coordinates in light space for texture map
OUT.posH = mul(float4(IN.posL, 1.f), lightViewProj);
//Get the depth by performing a perspective divide on the projected coordinates
OUT.posW.x = OUT.posH.z/OUT.posH.w;
return OUT;
}
//Pixel shader depth Pass
float4 DepthPS(OutputVS IN) : COLOR
{
//Texture only uses red channel, just store it there
return float4(IN.posW.x, 0, 0, 1);
}
//VertexShader Draw Pass
OutputVS DrawVS(A2V IN)
{
OutputVS OUT = (OutputVS)0;
//Get the screen coordinates for this pixel
OUT.posH = mul(float4(IN.posL, 1.f), worldViewProj);
//Send texture coordinates through
OUT.texCo = IN.texCo;
//Pass its world coordinates through
OUT.posW = mul(float4(IN.posL, 1.f), world);
return OUT;
}
//PixelShader Draw Pass
float4 DrawPS(OutputVS IN) : COLOR
{
//Get the pixels screen position in light space
float4 texCoord = mul(IN.posW, lightViewProj);
//Perform perspective divide to normalize coordinates [-1,1]
//texCoord.x = texCoord.x/texCoord.w;
//texCoord.y = texCoord.y/texCoord.w;
//Multiply by texture bias to bring in range 0-1
texCoord = mul(texCoord, textureBias);
//Get corresponding depth value
float prevDepth = tex2D(TexShadow, texCoord.xy);
//Check if it is in shadow
float4 posLight = mul(IN.posW, lightViewProj);
float currDepth = posLight.z/posLight.w;
if (currDepth >= prevDepth)
return float4(0.f, 0.f, 0.f, 1.f);
else
return tex2D(TexS, IN.texCo);
}
//Effect info
technique ShadowMap
{
pass p0
{
vertexShader = compile vs_2_0 DepthVS();
pixelShader = compile ps_2_0 DepthPS();
}
pass p1
{
vertexShader = compile vs_2_0 DrawVS();
pixelShader = compile ps_2_0 DrawPS();
}
}
I have verified that all my matrices are correct and the depth map is being drawn correctly. Rewrote all of the C++ that handles this code and made it neater and I am still getting the same problem. I am not currently blending the shadows, just drawing them flat black until I can get them to draw correctly. The light uses an orthogonal projection because it is a directional light. I dont have enough reputation points to embed images but here are the URLs: Depth Map - http://i.imgur.com/T2nITid.png
Program output - http://i.imgur.com/ae3U3N0.png
Any help or insight would be greatly appreciated as its for a school project. Thanks
The value you get from the depth buffer is float value that is from 0 to 1. As you probably already know, floating points are not accurate and the more decimal places you request the less accurate it is and this is where you end up with artifacts.
There are some things you can do. The easiest way is to make the value of the far and near Z in the projection matrix closer to each other so that the depth buffer will not use so many decimal places to represent how far away the object is. I usually find that having a value of 1-200 gives me a fairly good accurate result.
Another easy thing you can do is increase the size of the texture you are drawing on as that will give you more pixels and therefore it will represent the scene more accurately.
There are also a lot of complex things that games engines can do to improve on shadow mapping artifacts but you can write a book about that and if you really do want to get into it than I would recommended you start with the blog.
I generate simple 2D grid with triangle strip representing water surface. First generated vertex has position [0,0] and the last one has [1,1]. For my water simulation I need to store current positions of vertices to a texture and then sample these values from the texture in the next frame to get the previous state of the water surface.
So, I created the texture in a size of vertices. For example if I will have a 10x10 vertices grid, I use a texture with 10x10 pixels (one pixel for one vertex data). And set this texture as a render target to render all vertex data into it.
According to this: MSDN Coordinate Systems, If I will use current positions of vertices in the grid (bottom-left at [0;0], top-right at [1;1]), rendered texture looks like this:
So I need to do some conversion to NDC. I convert it in a vertex shader like this:
[vertex.x * 2 - 1; vertex.y * 2 - 1]
Consider this 3x3 grid:
Now, grid is stretched to whole texture size. Texture coordinates are different from NDC and apparently I can use original coordinates of the grid (before conversion) to sample values from the texture and get previous values (positions) of vertices.
Here is a sample of my vertex/pixel shader code:
This vertex shader converts coordinates and sends it to pixel shader with SV_POSITION semantics (describes the pixel location).
struct VertexInput
{
float4 pos : POSITION;
float2 tex : TEXCOORD;
};
struct VertexOutput
{
float4 pos : SV_POSITION;
float2 tex : TEXCOORD;
};
// convertes coordinates from 0,0 origin to -1,-1, etc.
float2 toNDC(float2 px)
{
return float2(px.x * 2 - 1, px.y * 2 - 1);
}
VertexOutput main( VertexInput input )
{
VertexOutput output;
float2 ndc = toNDC(float2(input.pos.x, input.pos.z));
output.pos = float4(ndc, 1, 1);
output.tex = float2(input.pos.x, input.pos.z);
return output;
}
And here's the pixel shader saving values from vertex shader at defined pixel location (SV_POSITION).
struct PixelInput
{
float4 pos : SV_POSITION;
float2 tex : TEXCOORD;
};
float4 main(PixelInput input) : SV_TARGET
{
return float4(input.tex.x, input.tex.y, 0, 1);
}
And we're finally getting to my problem! I use graphics debugger in Visual Studio 2012 which allows me to look at the rendered texture and its values. I would expect that at the pixel location [0,1] (in texel coordinate system) should be value [0,0] (or [0,0,0,1] to be precise, for RGBA format) but it seems that value of final pixel is interpolated between 3 vertices and I have a wrong value for a given vertex.
Screenshot from VS graphics debugger:
Rendered 3x3 texture ([0;1] location in texel coordinate system):
Values from vertex and pixel shader:
How to render the exact value from vertex shader to texture for a given pixel?
I am pretty new to computer graphics and Direct3D 11, so please excuse my deficiencies.
I struggled for some time to add a fog effect in my xna games.
I work with a custom shader effect in a file (. Fx).
The "PixelShaderFunction" works without error. But the problem is that all my land is colored the same way.
I think the problem come from the calculation of the distance between the camera and the model.
float distance = length(input.TextureCoordinate - cameraPos);
Here is my complete code with "PixelShaderFunction"
// Both techniques share this same pixel shader.
float4 PixelShaderFunction(VertexShaderOutput input) : COLOR0
{
float distance = length(input.TextureCoordinate - cameraPos);
float l = saturate((distance-fogNear)/(fogFar-fogNear));
return tex2D(Sampler, input.TextureCoordinate) * lerp(input.Color, fogColor, l);
}
If your input.TextureCoordinate really represents texture coordinates for sampler, than the way you trying to calculate distance is wrong.
You can change body of your PixelShaderFunction as follows:
float distance = distance(cameraPos, input.Position3D);
float l = saturate((distance-fogNear)/(fogFar-fogNear));
return lerp(tex2D(Sampler, input.TextureCoordinate), fogColor, l);
Add the following to your VertexShaderOutput declaration:
float4 Position3D : TEXCOORD1;
In your Vertex Shader populate Position3D with the position of the vertex multiplied on world matrix:
output.Position3D = mul(input.pos, matWorld);
I have mapped some values into my texture on my alpha channel. Actually I use my texture as 2Darray. What I need is a way to read the alpha value of the map at position e.g. [4][5] (representing x and y)
I need the returned value available in my pixelshader. Is there any way to do this?
I use DX9.
Thx in advance!
Do you want to use the texel at [4][5] (x,y) for your entire pixelshader?
if that is your question you could just precalc that cordinate on the vertex shader and passit along to every vertex, and then sample with that uv cords. this way it wont get interpolated. (or it will, but it will only have one value to interpolate with)
other than that you probably have to specifiy abit more on what you are trying to achive.
What are you using it for? when will it occure, what sort of mesh are you using it for?
Texture2DArray is a shader model 4 thing. I don't believe you're using it on dx9.
If you are using shader model 4, then just use the function Load(4, 5).
Otherwise, for sm1,2,3, you can put the numbers you want, e.g. 4.0f and 5.0 into your vertex as normal texcoord data. Then have the pixel shader scale it by the size of the texture.
struct VertexInput {
float4 pos : POSITION;
float2 uv : TEXCOORD0; //0.0, 1.0, 2.0, 3.0, 4.0 etc
};
struct PixelInput {
float4 position : POSITION;
float2 uv : TEXCOORD0;
};
PixelInput vsTex(VertexInput vtx)
{
PixelInput output;
float4 pos = vtx.pos;
output.position = mul(pos, MatWorld);
output.position = mul(output.position, MatView);
output.position = mul(output.position, MatProj);
output.uv = vtx.uv;
return output;
}
float4 PixelShader(PixelInput input) : SV_Target
{
float coords = pix.uv / float2(TEX_WIDTH, TEX_HEIGHT);
return tex = tex2D(mySampler, coords);
}
Where TEX_WIDTH, TEX_HEIGHT are passed in via the 'defines' parameter of D3DXCompileShader. And
OR: just do 4.0f/tex_width and 5.0/tex_height in software and just pass that number (which will be between [0.0f,1.0f] through to the pixel shader)