HLSL isn't interpolating values? - directx

I have a very simple VS and PS that are supposed to draw a screen-sized quad on the screen. However, my tests to colorize based on texture coordinate are showing a solid color across the entire rect (screen) instead of interpolated fading colors along the x and y direction. Here is my shader code:
struct FRAGMENT {float4 position:SV_POSITION; float3 tex_coord:TEXCOORD0;};
FRAGMENT vs_main(uint vertex_id : SV_VertexID) {
FRAGMENT OUT;
OUT.tex_coord = float3(0.0f,0.0f,0.0f);
if(vertex_id == 1) OUT.tex_coord.x = 1.0f;
else if(vertex_id == 2) OUT.tex_coord.y = 1.0f;
else if(vertex_id == 3) OUT.tex_coord.xy = float2(1.0f, 1.0f);
OUT.position = float4(OUT.tex_coord.x * 2.0f - 1.0f, OUT.tex_coord.y * 2.0f - 1.0f, 0.0f, 0.0f);
return OUT;
}
float4 ps_main(FRAGMENT IN) : SV_TARGET {
return float4(IN.tex_coord, 1.0f);
}
The quad draws just fine, and I can color it manually, but the value for IN.tex_coord is solid across the entire screen. Does anyone know what is wrong?

Related

Shadow Mapping (DirectX12) : Shadow map does not render properly

Sorry for auto translation.
The part that's stuck right now is shadow mapping.
The position of the player is exactly 2000, 0, 2000; there is a light source that is the origin of the shadow mapping camera directly over the sky (it is a directory light).
Question 1
You need a shadow map to apply shadow mapping, don't you?
The shadow map isn't working right now, but let's move on.
As long as the viewport of the shadow mapping camera is in the cover space itself, the render is not working properly on the shadow map.
Even if it's initialized to 1.0f, it's all supposed to be outside the shadows.
Although the current shadow map camera position is 2000, 100, 2000, and the focus position is 0, 0, 0.
It's not a red square area that's supposed to be determined to be outside the shadow.
The green square area is determined to be out of the shadow.
For your information, there's nothing blocking the light in the viewport. The shadow you see in the screenshot is just outside the viewport, so it's a shadow that comes from a 0.0f judgment on the shadow map.
Question 2
This is the fundamental problem. No render on shadow map in shadow pass.
Once this is done, I'll find the first question somehow, but the render itself doesn't work, so there's no shadow of the object -> the cause is unknown
Source indicates that the ShadowShader class is rendering a shadow map (ShadowPassRender)
What affects this is the degree of view-project matrix created at the time of the light source (the Update ShaderVariables portion of the Shadow Shader class).
I'm most suspicious of this one, but the process of making it is not different from the example, so I don't know where it's wrong.
I'm using the light itself as a blin pong, and I'm gonna take this as an example and fix it for the project.
code that generate View-Projection matrix for shadow mapping
void CShadowShader::UpdateShaderVariables(ID3D12GraphicsCommandList* pd3dCommandList, XMFLOAT3 xmf3TargetPos)
{
XMFLOAT3 TargetPos = {950, 0, 950};
XMMATRIX lightView = XMMatrixLookAtLH(XMLoadFloat3(&m_pLight->GetPosition()), XMLoadFloat3(&TargetPos), XMLoadFloat3(&m_pLight->GetUp()));
// Transform bounding sphere to light space.
XMFLOAT3 xmf3CenterLS;
XMStoreFloat3(&xmf3CenterLS, XMVector3TransformCoord(XMLoadFloat3(&TargetPos), lightView));
// Ortho frustum in light space encloses scene.
float l = xmf3CenterLS.x - 3000;
float b = xmf3CenterLS.y - 3000;
float n = xmf3CenterLS.z - 3000;
float r = xmf3CenterLS.x + 3000;
float t = xmf3CenterLS.y + 3000;
float f = xmf3CenterLS.z + 3000;
XMMATRIX lightProj = XMMatrixOrthographicOffCenterLH(l, r, b, t, n, f);
// Transform NDC space [-1,+1]^2 to texture space [0,1]^2
XMMATRIX T(
0.5f, 0.0f, 0.0f, 0.0f,
0.0f, -0.5f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.5f, 0.5f, 0.0f, 1.0f);
XMMATRIX S = lightView * lightProj * T;
XMFLOAT4X4 m_xmf4x4ShadowTransform;
XMStoreFloat4x4(&m_xmf4x4ShadowTransform, S);
CB_SHADOW cbShadow{ m_xmf4x4ShadowTransform, m_pLight->GetPosition() };
m_ubShadowCB->CopyData(0, cbShadow);
pd3dCommandList->SetGraphicsRootConstantBufferView(3, m_ubShadowCB->Resource()->GetGPUVirtualAddress());
}
make PSO for shadow pass
void CShadowShader::CreateShader(ID3D12Device* pd3dDevice, ID3D12RootSignature* pd3dGraphicsRootSignature)
{
m_ubShadowCB = new UploadBuffer<CB_SHADOW>(pd3dDevice, 1, true);
ID3DBlob* pd3dVertexShaderBlob = NULL, * pd3dPixelShaderBlob = NULL;
D3D12_GRAPHICS_PIPELINE_STATE_DESC d3dPipelineStateDesc;
::ZeroMemory(&d3dPipelineStateDesc, sizeof(D3D12_GRAPHICS_PIPELINE_STATE_DESC));
d3dPipelineStateDesc.pRootSignature = pd3dGraphicsRootSignature;
d3dPipelineStateDesc.VS = CreateVertexShader(&pd3dVertexShaderBlob);
d3dPipelineStateDesc.PS = CreatePixelShader(&pd3dPixelShaderBlob);
d3dPipelineStateDesc.RasterizerState = CreateRasterizerState();
d3dPipelineStateDesc.RasterizerState.DepthBias = 10000.0f;
d3dPipelineStateDesc.RasterizerState.DepthBiasClamp = 0.0f;
d3dPipelineStateDesc.RasterizerState.SlopeScaledDepthBias = 1.0f;
d3dPipelineStateDesc.BlendState = CreateBlendState();
d3dPipelineStateDesc.DepthStencilState = CreateDepthStencilState();
d3dPipelineStateDesc.InputLayout = CreateInputLayout();
d3dPipelineStateDesc.SampleMask = UINT_MAX;
d3dPipelineStateDesc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
d3dPipelineStateDesc.NumRenderTargets = 0;
d3dPipelineStateDesc.RTVFormats[0] = DXGI_FORMAT_UNKNOWN;
d3dPipelineStateDesc.DSVFormat = DXGI_FORMAT_D24_UNORM_S8_UINT;
d3dPipelineStateDesc.SampleDesc.Count = 1;
d3dPipelineStateDesc.Flags = D3D12_PIPELINE_STATE_FLAG_NONE;
auto tmp = pd3dDevice->CreateGraphicsPipelineState(&d3dPipelineStateDesc, __uuidof(ID3D12PipelineState), (void**)&m_pd3dPipelineState);
if (pd3dVertexShaderBlob)
pd3dVertexShaderBlob->Release();
if (pd3dPixelShaderBlob)
pd3dPixelShaderBlob->Release();
if (d3dPipelineStateDesc.InputLayout.pInputElementDescs)
delete[] d3dPipelineStateDesc.InputLayout.pInputElementDescs;
}
Shader for Shadow pass
#include "Common.hlsli"
struct VertexIn
{
float3 PosL : POSITION;
};
struct VertexOut
{
float4 PosH : SV_POSITION;
};
VertexOut VS(VertexIn vin)
{
VertexOut vout = (VertexOut) 0.0f;
MATERIAL matData = material;
// Transform to world space.
float4 posW = mul(float4(vin.PosL, 1.0f), gmtxWorld);
// Transform to homogeneous clip space.
vout.PosH = mul(posW, gmtxShadowTransform);
return vout;
}
// This is only used for alpha cut out geometry, so that shadows
// show up correctly. Geometry that does not need to sample a
// texture can use a NULL pixel shader for depth pass.
void PS(VertexOut pin)
{
// Fetch the material data.
MATERIAL matData = material;
float4 diffuseAlbedo = matData.DiffuseAlbedo;
}
Default.hlsl for render pass (there's few korean comments. not important)
#include "Common.hlsli"
//정점 셰이더의 입력을 위한 구조체를 선언한다.
struct VS_DEFAULT_INPUT
{
float3 position : POSITION;
float3 normal : NORMAL;
};
//정점 셰이더의 출력(픽셀 셰이더의 입력)을 위한 구조체를 선언한다.
struct VS_DEFAULT_OUTPUT
{
float4 position : SV_POSITION;
float4 position_shadow : POSITION0;
float3 position_w : POSITION1;
float3 normal : NORMAL;
};
VS_DEFAULT_OUTPUT VS_Default(VS_DEFAULT_INPUT input)
{
VS_DEFAULT_OUTPUT output;
output.position = mul(mul(float4(input.position, 1.0f), gmtxWorld), gmtxViewProj);
output.position_w = mul(float4(input.position, 1.0f), gmtxWorld).xyz;
output.normal = normalize(mul(float4(input.normal, 0.0f), gmtxWorld).xyz);
output.position_shadow = mul(float4(output.position_w, 1.0f), gmtxShadowTransform);
return (output);
}
float4 PS_Default(VS_DEFAULT_OUTPUT input) : SV_TARGET
{
float4 cColor = float4(0.0f, 0.0f, 0.0f, 0.0f);
cColor += material.AmbientLight * material.DiffuseAlbedo;
float3 toEyeW = normalize(cameraPos - input.position_w);
float3 shadowFactor = float3(1.0f, 1.0f, 1.0f);
shadowFactor[0] = CalcShadowFactor(input.position_shadow);
for (int i = 0; i < nLights; i++)
{
cColor += ComputeLighting(light[i], input.position_w, input.normal, toEyeW, shadowFactor[0]);
}
// Add in specular reflections.
float3 r = reflect(-toEyeW, input.normal);
float4 reflectionColor = { 1.0f, 1.0f, 1.0f, 0.0f };
float3 fresnelFactor = SchlickFresnel(material.FresnelR0, input.normal, r);
cColor.rgb += material.Shininess * fresnelFactor * reflectionColor.rgb;
// Common convention to take alpha from diffuse albedo.
cColor.a = material.DiffuseAlbedo.a;
return (cColor);
}
GitHub Link: https://github.com/kcjsend2/3DGP-BulletPhysics
Bullet physical engine is included, so bullet engine will need to be received and connected to the project to build.
See Chapter 20 Shadow Mapping in Frank Luna's Introduction to 3d game programming with directx 12 for examples.
The framework is independent, so it's very different from the example.
Bullet physics engine is included, so bullet engine will need to be received and connected to the project to build.
I fixed it. just because of hlsl shader and direct x uses different type of matrix.
hlsl shader uses column major, and direct x uses row major matrix.
and I also calculate wrong with matrix multipication order.
worng one is first codes of the question
...and this is fixed code:
XMVECTOR lightPos = XMLoadFloat3(&m_pLight->GetPosition());
XMVECTOR TargetPos = XMLoadFloat3(&xmf3TargetPos);
XMVECTOR lightUp = XMLoadFloat3(&m_pLight->GetUp());
XMMATRIX lightView = XMMatrixLookAtLH(lightPos, TargetPos, lightUp);
/*XMVECTOR lightLook = Vector3::Normalize(lightPos - TargetPos);*/
// Transform bounding sphere to light space.
XMFLOAT3 xmf3CenterLS;
XMStoreFloat3(&xmf3CenterLS, XMVector3TransformCoord(XMLoadFloat3(&xmf3TargetPos), lightView));
// Ortho frustum in light space encloses scene.
float l = xmf3CenterLS.x - 800;
float b = xmf3CenterLS.y - 800;
float n = xmf3CenterLS.z - 800;
float r = xmf3CenterLS.x + 800;
float t = xmf3CenterLS.y + 800;
float f = xmf3CenterLS.z + 800;
XMMATRIX lightProj = XMMatrixOrthographicOffCenterLH(l, r, b, t, n, f);
// Transform NDC space [-1,+1]^2 to texture space [0,1]^2
XMMATRIX T(
0.5f, 0.0f, 0.0f, 0.0f,
0.0f, -0.5f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.5f, 0.5f, 0.0f, 1.0f);
XMMATRIX S = lightView * lightProj;
XMFLOAT4X4 xmf4x4LightViewProj;
XMStoreFloat4x4(&xmf4x4LightViewProj, XMMatrixTranspose(S));
S = S * T;
XMFLOAT4X4 xmf4x4ShadowTransform;
XMStoreFloat4x4(&xmf4x4ShadowTransform, XMMatrixTranspose(S));
CB_SHADOW cbShadow{ xmf4x4ShadowTransform, xmf4x4LightViewProj, m_pLight->GetPosition() };
m_ubShadowCB->CopyData(0, cbShadow);
pd3dCommandList->SetGraphicsRootConstantBufferView(3, m_ubShadowCB->Resource()->GetGPUVirtualAddress());
Exactly the same issue I was facing when I was not transposing the matrix I was sending to shader, then I transposed it and it worked. :)

Directx 9 Normal Mapping Pixelshader

I have a question about normal mapping in directx9 shader.
Currently my Terrain shader Output for Normal Map + Diffuse Color only result into this Image.
Which looks good to me.
If i use an empty Normal map image like this one.
My shader output for normal diffuse and color map looks like this.
But if i use 1 including a ColorMap i get a really stange result.
Does anyone have an idea what could cause this issue?
Here is some snippets.
float4 PS_TERRAIN(VSTERRAIN_OUTPUT In) : COLOR0
{
float4 fDiffuseColor;
float lightIntensity;
float3 bumpMap = 2.0f * tex2D( Samp_Bump, In.Tex.xy ).xyz-1.0f;
float3 bumpNormal = (bumpMap.x * In.Tangent) + (bumpMap.y * In.Bitangent) + (bumpMap.z * In.Normal);
bumpNormal = normalize(bumpNormal);
// Direction Light Test ( Test hardcoded )
float3 lightDirection = float3(0.0f, -0.5f, -0.2f);
float3 lightDir = -lightDirection;
// Bump
lightIntensity = saturate(dot( bumpNormal, lightDir));
// We are using a lightmap to do our alpha calculation for given pixel
float4 LightMaptest = tex2D( Samp_Lightmap, In.Tex.zw ) * 2.0f;
fDiffuseColor.a = LightMaptest.a;
if( !bAlpha )
fDiffuseColor.a = 1.0;
// Sample the pixel color from the texture using the sampler at this texture coordinate location.
float4 textureColor = tex2D( Samp_Diffuse, In.Tex.xy );
// Combine the color map value into the texture color.
textureColor = saturate(textureColor * LightMaptest);
textureColor.a = LightMaptest.a;
fDiffuseColor.rgb = saturate(lightIntensity * I_d).rgb;
fDiffuseColor = fDiffuseColor * textureColor; // If i enable this line it goes crazy
return fDiffuseColor;
}

fullscreen quad in pixel shader has screen coordinates?

i have a 640x480 rendertarget (its the main backbuffer).
im passing a fullscreen quad to the vertex shader, the fullscreen quad has coordinates between [-1,1] for both X and Y, that means that i only pass the coordinates into the pixel shader with no calculation:
struct VSInput
{
float4 Position : SV_POSITION0;
};
struct VSOutput
{
float4 Position : SV_POSITION0;
};
VSOutput VS(VSInput input)
{
VSOutput output = (VSOutput)0;
output.Position = input.Position;
return output;
}
but on the pixel shader, the x and y coordinate for each fragment is in screen space (0 < x < 640 and 0 < y < 480).
why is that? i always thought that the coordinates would get interpolated on their way to the pixel shader and be set between -1,1 and in this case even more so because I'm passing the coordinates between -1 and 1 hardcoded on the vertex shader!
but truth is, this pixel shader works:
float x = input.Position.x;
if(x < 200)
output.Diffuse = float4(1.0, 0.0, 0.0, 1.0);
else if( x > 400)
output.Diffuse = float4(0.0, 0.0, 1.0, 1.0);
else
output.Diffuse = float4(0.0, 1.0, 0.0, 1.0);
return output;
it outputs 3 color stripes on my rendering window, but if i change the values from screen space (the 200 and 400 from the code above) to -1,1 space and use something like if(x < 0.5) it wont work.
i already tried
float x = input.Position.x / input.Position.w;
because i read somewhere that that way i could get values between -1,1 but it doesn't work either.
thanks in advance.
From the MSDN on the Semantics-page about SV_POSITION:
When used in a pixel shader, SV_Position describes the pixel location.
So you are seeing expected behavior.
The best solution is to pass screen space coordinates as an additional parameter. I like to use this "full-screen-triangle" vertex shader:
struct VSQuadOut {
float4 position : SV_Position;
float2 uv: TexCoord;
};
// outputs a full screen triangle with screen-space coordinates
// input: three empty vertices
VSQuadOut VSQuad( uint vertexID : SV_VertexID ){
VSQuadOut result;
result.uv = float2((vertexID << 1) & 2, vertexID & 2);
result.position = float4(result.uv * float2(2.0f, -2.0f) + float2(-1.0f, 1.0f), 0.0f, 1.0f);
return result;
}
(Original source: Full screen quad without vertex buffer?)

xna 4.0 sprite + MRT + pixelshader

I'm trying to combine 2 rendertargets, color and normal, for diffuse lightning and to render the result on screen. The idea is to use a sprite with an effect containing only a pixelshader to combine the rendertargets from textures.
XNA code:
GraphicsDevice.SetRenderTarget(null);
GraphicsDevice.Clear(ClearOptions.Target | ClearOptions.DepthBuffer, Color.Black, 1.0f, 0);
effect.CurrentTechnique = effect.Techniques["show_buffer"];
effect.Parameters["normalTex"].SetValue(normalRendertarget);
effect.Parameters["colorTex"].SetValue(colorRendertarget);
effect.Parameters["AmbientIntensity"].SetValue(ambientIntesity);
effect.Parameters["LightDirection"].SetValue(lightDirection);
effect.Parameters["DiffuseIntensity"].SetValue(diffuseIntensity);
spriteBatch.Begin(0, BlendState.Opaque, null, null, null,effect);
spriteBatch.Draw(normalRT, Vector2.Zero, Color.White);
spriteBatch.End();
For some reason the rendertarget used in spriteBatch.Draw() influences the result.
Pixel Shader:
void Tex_PixelShader(float2 texCoord : TEXCOORD0, out float4 color : COLOR0)
{
float4 normal = tex2D(normalTexSampler, texCoord);
//tranform normal back into [-1,1] range
normal.rgb = (normal.rgb*2)-1;
float4 baseColor = tex2D(colorTexSampler, texCoord);
float3 lightDirectionNorm = normalize(LightDirection);
float diffuse = saturate(dot(-lightDirectionNorm,normal.rgb));
//only works with normalRT in spriteBatch.Draw()
//colorRT in spriteBatch.Draw() gives colorRT but darker as result
color = float4 (baseColor.rgb * (AmbientIntensity + diffuse*DiffuseIntensity), 1.0f);
//only works with colorRT in spriteBatch.Draw()
//normalRT in spriteBatch.Draw() gives normalRT as result
//color = tex2D(colorTexSampler, texCoord);
//only works with NormalRT
//colorRT in spriteBatch.Draw() gives colorRT as result
//color=tex2D(normalTexSampler, texCoord);
// works with any rendertarget in spriteBatch.Draw()
//color = float4(0.0f,1.0f,0.0f,1.0f);
}
The alpha value in both rendertargets is always 1. Adding a vertex shader to the effect results in black. Drawing one rendertarget without any effect with spriteBatch.Draw() shows that the content of each rendertarget is fine. I can't make sense of this. Any ideas?
Setting the textures with GraphicsDevice.Textures[1]=tex; instead of effect.Parameters["tex"]=tex; worked. Thanks Andrew.
Changed the xna code to:
GraphicsDevice.SetRenderTarget(null);
GraphicsDevice.Clear(ClearOptions.Target | ClearOptions.DepthBuffer, Color.Black, 1.0f, 0);
effect.CurrentTechnique = effect.Techniques["show_buffer"];
GraphicsDevice.Textures[1] = normalRT; //changed
GraphicsDevice.Textures[2] = colorRT; //changed
effect.Parameters["AmbientIntensity"].SetValue(ambientIntesity);
effect.Parameters["LightDirection"].SetValue(lightDirection);
effect.Parameters["DiffuseIntensity"].SetValue(diffuseIntensity);
spriteBatch.Begin(0, BlendState.Opaque, null, null, null,effect);
spriteBatch.Draw((Texture2D)colorRT, Vector2.Zero, Color.White);
spriteBatch.End();
And the shader code to:
sampler normalSampler: register(s1); //added
sampler colorSampler: register(s2); //added
void Tex_PixelShader(float2 texCoord : TEXCOORD0, out float4 color : COLOR0)
{
float4 normal = tex2D(normalSampler, texCoord); //changed
normal.rgb = (normal.rgb*2)-1;
float4 baseColor = tex2D(colorSampler, texCoord); //changed
float3 lightDirectionNorm = normalize(LightDirection);
float diffuse = saturate(dot(-lightDirectionNorm,normal.rgb));
color = float4 (baseColor.rgb * (AmbientIntensity + diffuse*DiffuseIntensity), 1.0f);
}

Applying Perspective Transformation to Geometry and Passing Constant Buffers to Shader

I am attempting to apply a perspective transformation in DirectX-11 to a rendered cube centered at the origin (0, 0, 0), and with edges that span 1.0 unit (-0.5 to 0.5). However, I am not seeing anything rendering. I have tried the following:
shaders.hlsl
cbuffer VSHADER_CB
{
matrix mWorld;
matrix mView;
matrix mProj;
};
struct VOut
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
VOut VShader(float4 position : POSITION, float4 color : COLOR)
{
VOut output;
output.position = mul(position, mWorld);
output.position = mul(output.position, mView);
output.position = mul(output.position, mProj);
output.color = color;
return output;
}
...
void InitConstantBuffer()
...
D3DXVECTOR3 position(0.0f, 0.0f, -5.0f);
D3DXVECTOR3 lookAt(0.0f, 0.0f, 0.0f);
D3DXVECTOR3 up(0.0f, 1.0f, 0.0f);
D3DXMatrixIdentity(&(cbMatrix.mWorld));
D3DXMatrixLookAtLH(&(cbMatrix.mView), &position, &lookAt, &up);
D3DXMatrixPerspectiveFovLH(&(cbMatrix.mProj), 70.0f, (FLOAT)(width / height), 1.0f, 100.0f);
D3D11_BUFFER_DESC cbd;
ZeroMemory(&cbd, sizeof(D3D11_BUFFER_DESC));
cbd.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbd.ByteWidth = sizeof(cbMatrix);
cbd.Usage = D3D11_USAGE_DEFAULT;
D3D11_SUBRESOURCE_DATA cbdInitData;
ZeroMemory(&cbdInitData, sizeof(D3D11_SUBRESOURCE_DATA));
cbdInitData.pSysMem = &cbMatrix;
mD3DDevice->CreateBuffer(&cbd, &cbdInitData, &mD3DCBuffer);
mD3DImmediateContext->VSSetConstantBuffers(0, 1, &mD3DCBuffer);
When i simply do not include any transformations (output.position = position) in the shader file, everything renders correctly, I see the front face of the cube. Is this all I need to pass constant buffers to my shader and utilize them completely? What am i missing here?
I figured out the answer to my own question, I needed to transpose the matrices by calling D3DXMatrixTranspose() before sending them to the shader.

Resources