DX11 Tessellation LOD with diameter incorrect tessellation values - directx

I implemented the LoD with diameter from following withpaper NVidia TerrainTessellation WhitePaper. In Chapter "Hull Shader:Tessellation LOD" Page 7 there is a very good explenantion of the LoD with diameter. Here a good quote:
For each patch edge, the shader computes the edge length and then conceptually fits a sphere around it. The sphere is projected into screen space and its screen space diameter is used to compute the tessellation factor for the edge.
Here my HullShader:
// Globals
cbuffer TessellationBuffer // buffer need to be aligned to 16!!
{
float4 cameraPosition;
float tessellatedTriSize;
float3 padding;
matrix worldMatrix;
matrix projectionMatrix;
};
// Typedefs
struct HullInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
struct ConstantOutputType
{
float edges[3] : SV_TessFactor;
float inside : SV_InsideTessFactor;
};
struct HullOutputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
// Rounding function
float roundTo2Decimals(float value)
{
value *= 100;
value = round(value);
value *= 0.01;
return value;
}
float calculateLOD(float4 patch_zero_pos, float4 patch_one_pos)//1,3,1,1; 3,3,0,1
{
float diameter = 0.0f;
float4 radiusPos;
float4 patchDirection;
// Calculates the distance between the patches and fits a sphere around.
diameter = distance(patch_zero_pos, patch_one_pos); // 2.23607
float radius = diameter/2; // 1.118035
patchDirection = normalize(patch_one_pos - patch_zero_pos); // 0.894,0,-0.447,0 direction from base edge_zero
// Calculate the position of the radiusPos (center of sphere) in the world.
radiusPos = patch_zero_pos + (patchDirection * radius);//2,3,0.5,1
radiusPos = mul(radiusPos, worldMatrix);
// Get the rectangular points of the sphere to the camera.
float4 camDirection;
// Direction from camera to the sphere center.
camDirection = normalize(radiusPos - cameraPosition); // 0.128,0,0.99,0
// Calculates the orthonormal basis (sUp,sDown) of a vector camDirection.
// Find the smallest component of camDirection and set it to 0. swap the two remaining
// components and negate one of them to find sUp_ which can be used to find sDown.
float4 sUp_;
float4 sUp;
float4 sDown;
float4 sDownAbs;
sDownAbs = abs(camDirection);//0.128, 0 ,0.99, 0
if(sDownAbs.y < sDownAbs.x && sDownAbs.y < sDownAbs.z) { //0.99, 0, 0.128
sUp_.x = -camDirection.z;
sUp_.y = 0.0f;
sUp_.z = camDirection.x;
sUp_.w = camDirection.w;
} else if(sDownAbs.z < sDownAbs.x && sDownAbs.z < sDownAbs.y){
sUp_.x = -camDirection.y;
sUp_.y = camDirection.x;
sUp_.z = 0.0f;
sUp_.w = camDirection.w;
}else{
sUp_.x = 0.0f;
sUp_.y = -camDirection.z;
sUp_.z = camDirection.y;
sUp_.w = camDirection.w;
}
// simple version
// sUp_.x = -camDirection.y;
// sUp_.y = camDirection.x;
// sUp_.z = camDirection.z;
// sUp_.w = camDirection.w;
sUp = sUp_ / length(sUp_); // =(0.99, 0, 0.128,0)/0.99824 = 0.991748,0,0.128226,0
sDown = radiusPos - (sUp * radius); // 0.891191,3,0.356639,1 = (2,3,0.5,1) - (0.991748,0,0.128226,0)*1.118035
sUp = radiusPos + (sUp * radius); // = (3.10881,3,0.643361,1)
// Projects sphere in projection space (2d).
float4 projectionUp = mul(sUp, projectionMatrix);
float4 projectionDown = mul(sDown, projectionMatrix);
// Calculate tessellation factor for this edge according to the diameter on the screen.
float2 sUp_2;
sUp_2.x = projectionUp.x;
sUp_2.y = projectionUp.y;
float2 sDown_2;
sDown_2.x = projectionDown.x;
sDown_2.y = projectionDown.y;
// Distance between the 2 points in 2D
float projSphereDiam = distance(sUp_2, sDown_2);
//Debug
//return tessellatedTriSize;
//if(projSphereDiam < 2.0f)
// return 1.0f;
//else if(projSphereDiam < 10.0f)
// return 2.0f;
//else
// return 10.0f;
return projSphereDiam*tessellatedTriSize;
}
// Patch Constant Function
// set/calculate any data constant to entire patch.
// is invoked once per patch
// direction vector w = 0 ; position vector w = 1
// receives as input a patch with 3 control points and each control point is represented by the structure of HullInputType
// patch control point should be displaced vertically, this can significantly affect the distance of the camera
// patchId is an identifier number of the patch generated by the Input Assembler
ConstantOutputType ColorPatchConstantFunction(InputPatch<HullInputType, 3> inputPatch, uint patchId : SV_PrimitiveID)
{
ConstantOutputType output;
////ret distance(x, y) Returns a distance scalar between two vectors.
float ret, retinside;
retinside = 0.0f;
float4 patch_zero_pos;//1,3,1,1
patch_zero_pos = float4(inputPatch[0].position.xyz, 1.0f);
float4 patch_one_pos;//3,3,0,1
patch_one_pos = float4(inputPatch[1].position.xyz, 1.0f);
float4 patch_two_pos;
patch_two_pos = float4(inputPatch[2].position.xyz, 1.0f);
// calculate LOD by diametersize of the edges
ret = calculateLOD(patch_zero_pos, patch_one_pos);
ret = roundTo2Decimals(ret);// rounding
output.edges[0] = ret;
retinside += ret;
ret = calculateLOD(patch_one_pos, patch_two_pos);
ret = roundTo2Decimals(ret);// rounding
output.edges[1] = ret;
retinside += ret;
ret = calculateLOD(patch_two_pos, patch_zero_pos);
ret = roundTo2Decimals(ret);// rounding
output.edges[2] = ret;
retinside += ret;
// Set the tessellation factor for tessallating inside the triangle.
// see image tessellationOuterInner
retinside *= 0.333;
// rounding
retinside = roundTo2Decimals(retinside);
output.inside = retinside;
return output;
}
// Hull Shader
// The hull shader is called for each output control point.
// Trivial pass through
[domain("tri")]
[partitioning("fractional_odd")] //fractional_odd
[outputtopology("triangle_cw")]
[outputcontrolpoints(3)]
[patchconstantfunc("ColorPatchConstantFunction")]
HullOutputType ColorHullShader(InputPatch<HullInputType, 3> patch, uint pointId : SV_OutputControlPointID, uint patchId : SV_PrimitiveID)
{
HullOutputType output;
// Set the position for this control point as the output position.
output.position = patch[pointId].position;
// Set the input color as the output color.
output.tex = patch[pointId].tex;
output.normal = patch[pointId].normal;
return output;
}
Some graphical explenation to the code:
First find the Center between the two vertices
Find orthogonal basis (rectangular to the camera direction) from the camera on the "circle"
project sUp and sDown in Projection space for calculating the length to calculate the tessellation factor.
The Problem
The Tessellation worked fine. But for some testing reason I let the object rotate, so I can see if the tessellation is going with the rotation aswell. Some how I think it is not 100% correct. Look at the Plane, this plane is rotated by (1.0f, 2.0f, 0.0f) and the ligther red is to show higher tessellation factors compared to the darker red. the green color are factors of 1.0. It should be more detailed on the top of the plane, than on the bottom.
What am I missing?
Some test cases
If I remove rotation stuff it looks like this:
If I remove rotation and I'm including this simple version of orthogonale base calculation:
// simple version
sUp_.x = -camDirection.y;
sUp_.y = camDirection.x;
sUp_.z = camDirection.z;
sUp_.w = camDirection.w;
it looks like this:
Could it be a problem, if I'm not using a lookUp Vector?
How are you doing LoD? I'm open trying something else...

What IDE are you using? If you're using Visual Studio, you should try Visual Studio Graphics Debugger or PIX, depending on the version of VS that you have.
http://msdn.microsoft.com/en-us/library/windows/desktop/bb943994(v=vs.85).aspx

I used the world matrix instead the view matrix. Always use the matrix the camera is using for rotations or other transformations.

Related

Directx 9 Normal Mapping Pixelshader

I have a question about normal mapping in directx9 shader.
Currently my Terrain shader Output for Normal Map + Diffuse Color only result into this Image.
Which looks good to me.
If i use an empty Normal map image like this one.
My shader output for normal diffuse and color map looks like this.
But if i use 1 including a ColorMap i get a really stange result.
Does anyone have an idea what could cause this issue?
Here is some snippets.
float4 PS_TERRAIN(VSTERRAIN_OUTPUT In) : COLOR0
{
float4 fDiffuseColor;
float lightIntensity;
float3 bumpMap = 2.0f * tex2D( Samp_Bump, In.Tex.xy ).xyz-1.0f;
float3 bumpNormal = (bumpMap.x * In.Tangent) + (bumpMap.y * In.Bitangent) + (bumpMap.z * In.Normal);
bumpNormal = normalize(bumpNormal);
// Direction Light Test ( Test hardcoded )
float3 lightDirection = float3(0.0f, -0.5f, -0.2f);
float3 lightDir = -lightDirection;
// Bump
lightIntensity = saturate(dot( bumpNormal, lightDir));
// We are using a lightmap to do our alpha calculation for given pixel
float4 LightMaptest = tex2D( Samp_Lightmap, In.Tex.zw ) * 2.0f;
fDiffuseColor.a = LightMaptest.a;
if( !bAlpha )
fDiffuseColor.a = 1.0;
// Sample the pixel color from the texture using the sampler at this texture coordinate location.
float4 textureColor = tex2D( Samp_Diffuse, In.Tex.xy );
// Combine the color map value into the texture color.
textureColor = saturate(textureColor * LightMaptest);
textureColor.a = LightMaptest.a;
fDiffuseColor.rgb = saturate(lightIntensity * I_d).rgb;
fDiffuseColor = fDiffuseColor * textureColor; // If i enable this line it goes crazy
return fDiffuseColor;
}

why sPos.z is uesd to get texcoord in shadow mapping

why use sPos.z here to get tescoord?
Out.shadowCrd.x = 0.5 * (sPos.z + sPos.x);
Out.shadowCrd.y = 0.5 * (sPos.z - sPos.y);
Out.shadowCrd.z = 0;
Out.shadowCrd.w = sPos.z;
It is a shader which achieves shadow mapping in "Shaders for Game Programming and Artists".
The first pass render depth texture in light space.( light is camera and watch towards the origin )
The second pass get the depth and calculate the shadow.
Before these codes, the model has already been transformed to light space.
Then the texcoord should be calculated to read depth texture.
But I can't understand the algorithm of calculating the texcoord. Why sPos.z will be here?
Here is the whole vertex shader of the second pass
float distanceScale;
float4 lightPos;
float4 view_position;
float4x4 view_proj_matrix;
float4x4 proj_matrix;
float time_0_X;
struct VS_OUTPUT
{
float4 Pos: POSITION;
float3 normal: TEXCOORD0;
float3 lightVec : TEXCOORD1;
float3 viewVec: TEXCOORD2;
float4 shadowCrd: TEXCOORD3;
};
VS_OUTPUT vs_main(float4 inPos: POSITION, float3 inNormal: NORMAL)
{
VS_OUTPUT Out;
// Animate the light position.
float3 lightPos;
lightPos.x = cos(1.321 * time_0_X);
lightPos.z = sin(0.923 * time_0_X);
lightPos.xz = 100 * normalize(lightPos.xz);
lightPos.y = 100;
// Project the object's position
Out.Pos = mul(view_proj_matrix, inPos);
// World-space lighting
Out.normal = inNormal;
Out.lightVec = distanceScale * (lightPos - inPos.xyz);
Out.viewVec = view_position - inPos.xyz;
// Create view vectors for the light, looking at (0,0,0)
float3 dirZ = -normalize(lightPos);
float3 up = float3(0,0,1);
float3 dirX = cross(up, dirZ);
float3 dirY = cross(dirZ, dirX);
// Transform into light's view space.
float4 pos;
inPos.xyz -= lightPos;
pos.x = dot(dirX, inPos);
pos.y = dot(dirY, inPos);
pos.z = dot(dirZ, inPos);
pos.w = 1;
// Project it into light space to determine she shadow
// map position
float4 sPos = mul(proj_matrix, pos);
// Use projective texturing to map the position of each fragment
// to its corresponding texel in the shadow map.
sPos.z += 10;
Out.shadowCrd.x = 0.5 * (sPos.z + sPos.x);
Out.shadowCrd.y = 0.5 * (sPos.z - sPos.y);
Out.shadowCrd.z = 0;
Out.shadowCrd.w = sPos.z;
return Out;
}
Pixel Shader:
float shadowBias;
float backProjectionCut;
float Ka;
float Kd;
float Ks;
float4 modelColor;
sampler ShadowMap;
sampler SpotLight;
float4 ps_main(
float3 inNormal: TEXCOORD0,
float3 lightVec: TEXCOORD1,
float3 viewVec: TEXCOORD2,
float4 shadowCrd: TEXCOORD3) : COLOR
{
// Normalize the normal
inNormal = normalize(inNormal);
// Radial distance and normalize light vector
float depth = length(lightVec);
lightVec /= depth;
// Standard lighting
float diffuse = saturate(dot(lightVec, inNormal));
float specular = pow(saturate(
dot(reflect(-normalize(viewVec), inNormal), lightVec)),
16);
// The depth of the fragment closest to the light
float shadowMap = tex2Dproj(ShadowMap, shadowCrd);
// A spot image of the spotlight
float spotLight = tex2Dproj(SpotLight, shadowCrd);
// If the depth is larger than the stored depth, this fragment
// is not the closest to the light, that is we are in shadow.
// Otherwise, we're lit. Add a bias to avoid precision issues.
float shadow = (depth < shadowMap + shadowBias);
// Cut back-projection, that is, make sure we don't lit
// anything behind the light.
shadow *= (shadowCrd.w > backProjectionCut);
// Modulate with spotlight image
shadow *= spotLight;
// Shadow any light contribution except ambient
return Ka * modelColor +
(Kd * diffuse * modelColor + Ks * specular) * shadow;
}

DirectX + GLM Depth Reconstruction issues

I'm trying to port my engine to DirectX and I'm currently having issues with depth reconstruction. It works perfectly in OpenGL (even though I use a bit of an expensive method). Every part besides the depth reconstruction works so far. I use GLM because it's a good math library that has no need to install any dependencies or anything for the user.
So basically I get my GLM matrices:
struct DefferedUBO {
glm::mat4 view;
glm::mat4 invProj;
glm::vec4 eyePos;
glm::vec4 resolution;
};
DefferedUBO deffUBOBuffer;
// ...
glm::mat4 projection = glm::perspective(engine.settings.fov, aspectRatio, 0.1f, 100.0f);
// Get My Camera
CTransform *transform = &engine.transformSystem.components[engine.entities[entityID].components[COMPONENT_TRANSFORM]];
// Get the View Matrix
glm::mat4 view = glm::lookAt(
transform->GetPosition(),
transform->GetPosition() + transform->GetForward(),
transform->GetUp()
);
deffUBOBuffer.invProj = glm::inverse(projection);
deffUBOBuffer.view = glm::inverse(view);
if (engine.settings.graphicsLanguage == GRAPHICS_DIRECTX) {
deffUBOBuffer.invProj = glm::transpose(deffUBOBuffer.invProj);
deffUBOBuffer.view = glm::transpose(deffUBOBuffer.view);
}
// Abstracted so I can use OGL, DX, VK, or even Metal when I get around to it.
deffUBO->UpdateUniformBuffer(&deffUBOBuffer);
deffUBO->Bind());
Then in HLSL, I simply use the following:
cbuffer MatrixInfoType {
matrix invView;
matrix invProj;
float4 eyePos;
float4 resolution;
};
float4 ViewPosFromDepth(float depth, float2 TexCoord) {
float z = depth; // * 2.0 - 1.0;
float4 clipSpacePosition = float4(TexCoord * 2.0 - 1.0, z, 1.0);
float4 viewSpacePosition = mul(invProj, clipSpacePosition);
viewSpacePosition /= viewSpacePosition.w;
return viewSpacePosition;
}
float3 WorldPosFromViewPos(float4 view) {
float4 worldSpacePosition = mul(invView, view);
return worldSpacePosition.xyz;
}
float3 WorldPosFromDepth(float depth, float2 TexCoord) {
return WorldPosFromViewPos(ViewPosFromDepth(depth, TexCoord));
}
// ...
// Sample the hardware depth buffer.
float depth = shaderTexture[3].Sample(SampleType[0], input.texCoord).r;
float3 position = WorldPosFromDepth(depth, input.texCoord).rgb;
Here's the result:
This just looks like random colors multiplied with the depth.
Ironically when I remove transposing, I get something closer to the truth, but not quite:
You're looking at Crytek Sponza. As you can see, the green area moves and rotates with the bottom of the camera. I have no idea at all why.
The correct version, along with Albedo, Specular, and Normals.
I fixed my problem at gamedev.net. There was a matrix majorness issue as well as a depth handling issue.
https://www.gamedev.net/forums/topic/692095-d3d-glm-depth-reconstruction-issues

Volumetric Fog Shader - Camera Issue

I am trying to build an infinite fog shader. This fog is applied on a 3D plane.
For the moment I have a Z-Depth Fog. And I encounter some issues.
As you can see in the screenshot, there are two views.
The green color is my 3D plane. The problem is in the red line. It seems that the this line depends of my camera which is not good because when I rotate my camera the line is affected by my camera position and rotation.
I don't know where does it comes from and how to have my fog limit not based on the camera position.
Shader
Pass {
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
uniform float4 _FogColor;
uniform sampler2D _CameraDepthTexture;
float _Depth;
float _DepthScale;
struct v2f {
float4 pos : SV_POSITION;
float4 projection : TEXCOORD0;
float4 screenPosition : TEXCOORD1;
};
v2f vert(appdata_base v) {
v2f o;
o.pos = mul(UNITY_MATRIX_MVP, v.vertex);
// o.projection = ComputeGrabScreenPos(o.pos);
float4 position = o.pos;
#if UNITY_UV_STARTS_AT_TOP
float scale = -1.0;
#else
float scale = 1.0;
#endif
float4 p = position * 0.5f;
p.xy = float2(p.x, p.y * scale) + p.w;
p.zw = position.zw;
o.projection = p;
// o.screenPosition = ComputeScreenPos(o.pos);
position = o.pos;
float4 q = position * 0.5f;
#if defined(UNITY_HALF_TEXEL_OFFSET)
q.xy = float2(q.x, q.y * _ProjectionParams.x) + q.w * _ScreenParams.zw;
#else
q.xy = float2(q.x, q.y * _ProjectionParams.x) + q.w;
#endif
#if defined(SHADER_API_FLASH)
q.xy *= unity_NPOTScale.xy;
#endif
q.zw = position.zw;
q.zw = 1.0f;
o.screenPosition = q;
return o;
}
sampler2D _GrabTexture;
float4 frag(v2f IN) : COLOR {
float3 uv = UNITY_PROJ_COORD(IN.projection);
float depth = UNITY_SAMPLE_DEPTH(tex2Dproj(_CameraDepthTexture, uv));
depth = LinearEyeDepth(depth);
return saturate((depth - IN.screenPosition.w + _Depth) * _DepthScale);
}
ENDCG
}
Next I want to rotate my Fog to have an Y-Depth Fog but I don't know how to achieve this effect.
I see two ways to acheive what you want:
is to render depth of your plane to texture and calculate fog based on difference of depth of plane and depth of object, 0 if obj depth is less and (objDepth - planeDepth) * scale if it is bigger)
Is to instead of rendering to texture calculate distance to plane in shader and use it directly.
I am not sure what you do since I am not very familiar with Unity surface shaders, but djudging from the code and result something different.
It seems that this is caused by _CameraDepthTexture, that's why depth is calculated with the camera position.
But I don't know how to correct it... It seems that there is no way to get the depth from another point. Any idea ?
Here is another example. In green You can "see" the object and the blue line is for me the fog as it should be.

Applying a DirectX shader to a rotated texture in XNA

I had a dynamic light shader for which the shaded sprite was fine in my own test program, but started resembling an eclipse once I imported it into my friend's physics based game. I narrowed it down by simplifying the gradient to be purely based on the X value within the shape, and making the outside of the circle in the sprite red, but as you can see, the rotation continues to cause problems (can't post images, so here's links to the album).
Circle at different rotations(not in order, but labelled by radian values): http://imgur.com/a/Preth
Everything I researched about matrix math says I am using the correct formula for rotation, but I figure maybe I'm doing something wrong. Here is my .fx shader code:
float rotationrads; /*assumed rotation is in radians*/
sampler TextureSampler: register(s0);
float4 staticlight(float2 Tex: TEXCOORD0) : COLOR0
{
float4 Color = tex2D(TextureSampler, Tex);
float2 NewTex;
/*Get the new X and Y values by applying the UV formula with the rotation*/
NewTex.x = (Tex.x * cos(rotationrads)) - (Tex.y * sin(rotationrads));
NewTex.y = (Tex.y * sin(rotationrads)) + (Tex.y * cos(rotationrads));
if(Color.a > 0.0)
{
Color.r = (Color.r * NewTex.x);
Color.g = (Color.g * NewTex.x);
Color.b = (Color.b * NewTex.x);
}
else
{
Color.r = 100;
Color.g = 0;
Color.b = 0;
Color.a = 100;
}
return Color;
}
technique StaticLightOnly
{
pass Pass1
{
PixelShader = compile ps_2_0 staticlight();
}
}
If anyone has experience with sprite-based rotation in 2d shaders, I'd appreciate any help with this! Thanks in advanced!
Because rotations are performed about the origin, you have to move the rotation center (0.5, 0.5) to the origin, execute the rotation and then undo the translation.

Resources