Pass an array to HLSL vertex shader as an argument? - directx

I need to pass an array to my vertex shader as an argument in Direct3D. The signature of the shader function looks like the following:
ReturnDataType main(float3 QuadPos : POSITION0, float4 OffsetArray[4] : COLOR0)
Is the signature OK? How do I define input layout description?
Thanks in Advance.

From HLSL references page, The function arguments only support intrinsic type, and user-defined type, the user-defined type also rely on intrinsic type, which does not support native array type, vector type or struct type might be your choice.
here is an example to use struct, you can simply build up a struct and pass in it as below like VS_INPUT.
//--------------------------------------------------------------------------------------
// Input / Output structures
//--------------------------------------------------------------------------------------
struct VS_INPUT
{
float4 vPosition : POSITION;
float3 vNormal : NORMAL;
float2 vTexcoord : TEXCOORD0;
};
struct VS_OUTPUT
{
float3 vNormal : NORMAL;
float2 vTexcoord : TEXCOORD0;
float4 vPosition : SV_POSITION;
};
//--------------------------------------------------------------------------------------
// Vertex Shader
//--------------------------------------------------------------------------------------
VS_OUTPUT VSMain( VS_INPUT Input )
{
VS_OUTPUT Output;
Output.vPosition = mul( Input.vPosition, g_mWorldViewProjection );
Output.vNormal = mul( Input.vNormal, (float3x3)g_mWorld );
Output.vTexcoord = Input.vTexcoord;
return Output;
}

Related

Something like vertex_id for Metal fragment shader, to identify fragment?

Vertex shaders in Metal can use a [[vertex_id]] attribute on an integer argument, and that argument will get values between 0 and the number of vertexes. Is there a similar thing for fragment shaders?
I want to write some debug info to a buffer, within the fragment shader, as shown below. Then in the CPU code, I would print the contents of the buffer, as a way to debug what is going on in the fragment shader.
struct VertexIn {
float4 position [[attribute(0)]];
};
struct VertexOut {
float4 position [[position]];
};
vertex VertexOut vertex_main(const VertexIn vertex_in [[stage_in]]) {
VertexOut out;
out.position = vertex_in.position;
return out;
}
fragment float4
fragment_main(VertexOut vo [[stage_in]],
uint fragment_id [[fragment_id]], // <-- Hypothetical, doesn't actually work.
device float4 *debug_out [[buffer(0)]],
device uint *debug_out2 [[buffer(1)]])
{
debug_out[fragment_id] = vo.position;
debug_out2[fragment_id] = ...
return float4(0, 1, clamp(vo.position.x/1000, 0.0, 1.0), 1);
}

metal shader: pass color encoded as 4 bytes INTEGER instead of 8 Bytes FLOAT

I need to send color encoded as in 4 bytes RGBA channel in INTEGER (not float) to my metal shader, but I don't know if metal shader can handle color stored in INTEGER. Actually I translate it in the shader to float4 (don't even know if it's not better to use half4 instead) color but I don't know if it's a good way:
struct VertexIn {
packed_float3 pos;
packed_uchar4 color;
};
struct VertexOut {
float4 pos [[position]];
float4 color;
};
vertex VertexOut vertexShader(const device VertexIn *vertexArray [[buffer(0)]],
const unsigned int vid [[vertex_id]]){
VertexIn in = vertexArray[vid];
VertexOut out;
out.color = float4(float(in.color[2])/255,float(in.color[1])/255,float(in.color[0])/255,float(in.color[3])/255);
out.pos = float4(in.pos.x, in.pos.y, in.pos.z, 1);
return out;
}
fragment float4 fragmentShader(VertexOut interpolated [[stage_in]]){
return interpolated.color;
}

D3DXCompileShader cannot compile shader with NORMAL in it

I noticed that the D3DXCompileShader function from the Ogre library fails in case of DirectX9 because of the following reason:
error X4502: invalid vs_2_0 output semantic 'NORMAL'
Seems pretty strange to me because MSDN states that it should be supported starting from the DirectX 9:
The following types of semantics are supported in both Direct3D 9 and Direct3D 10 and later
Here's my hlsl file:
float4x4 worldViewProj;
float4 lightPosition;
float3 eyePosition;
struct VS_OUTPUT
{
float4 pos: POSITION;
float4 normal: NORMAL;
float4 color: COLOR;
float4 lightPosition: TEXCOORD0;
float3 eyePosition: TEXCOORD1;
float3 pos3d: TEXCOORD2;
};
VS_OUTPUT main(
float4 pos: POSITION,
float4 normal: NORMAL,
float4 color: COLOR
)
{
VS_OUTPUT Out;
Out.pos = mul(worldViewProj, pos);
Out.pos3d = pos.xyz;
Out.normal = normal;
Out.color = color;
Out.lightPosition = lightPosition;
Out.eyePosition = eyePosition;
return Out;
}
The link you provided clearly states that NORMAL semantics is only supported as VS input.
You may try to pass the normal(s) to the pixel shader using TEXCOORD (TEXCOORD3 for instance) instead.

HLSL invalid ps_2_0 input semantic POSITION0

I'm attempting to write a phong shader effect for Dx9 in RenderMonkey.
I'm getting a compile error in the pixel shader
*"invalid ps_2_0 input semantic 'POSITION0'"*
and I'm not sure how to fix it, although I know it's got to be something to do with the POSITION0 semantic in VS_OUTPUT.
I tried changing VS_OUTPUT's Pos semantic to TEXCOORD0, but then the system reports that
vertex shader must minimally write all four components of POSITION
Shaders are supplied below. Any suggestions?
Here's my vertex shader:
struct VS_INPUT
{
float4 Pos : POSITION0;
float3 Normal : NORMAL0;
};
struct VS_OUTPUT
{
float4 Pos : POSITION0;
float3 Normal : TEXCOORD0;
};
VS_OUTPUT vs_main( VS_INPUT Input )
{
VS_OUTPUT Output;
Output.Pos = Input.Pos;
Output.Normal = Input.Normal;
return Output;
}
and my pixel shader:
float4x4 matViewProjection;
// light source
float4 lightPos;
float4 Ambient;
float4 Diffuse;
float4 Specular;
// material reflection properties
float4 Ke;
float4 Ka;
float4 Kd;
float4 Ks;
float nSpecular;
// eye
float4 eyePosition;
struct VS_OUTPUT
{
float4 Pos : POSITION0;
float3 Normal : TEXCOORD0;
};
float4 ps_main( VS_OUTPUT vsOutput ) : COLOR0
{
vsOutput.Pos = mul( vsOutput.Pos, matViewProjection );
float3 ViewDirection = normalize( eyePosition.xyz - vsOutput.Pos.xyz );
float3 LightDirection = normalize( lightPos.xyz - vsOutput.Pos.xyz );
float3 N = normalize( vsOutput.Normal );
float3 R = reflect( -LightDirection, N );
float LdotN = max( 0.0, dot( LightDirection, N ) );
float VdotR = max( 0.0, dot( ViewDirection, R ) );
// find colour components
float4 a = Ka * Ambient;
float4 d = Kd * Diffuse * LdotN;
float4 s = Ks * Specular * pow( VdotR, nSpecular );
float4 FragColour = Ke + a + d + s;
return FragColour;
}
Okay, I found a solution for those interested.
The Vertex Shader should have the following structs defined:
struct VS_INPUT
{
float4 Pos : POSITION0;
float3 Normal : NORMAL0;
};
struct VS_OUTPUT
{
float4 Pos : POSITION0;
float4 PosOut : TEXCOORD0;
float3 Normal : TEXCOORD1;
};
The VS_OUTPUT struct should be different in the pixel shader:
struct VS_OUTPUT
{
float4 PosOut : TEXCOORD0;
float3 Normal : TEXCOORD1;
};
My problem stemmed from the fact that you can't have a POSITION semantic as input to the pixel shader. At least for ps_2_0.
As user, one is not allowed to use the value of POSITION0 of the VertexShaderOutput in the PixelShaderFunction. This attribute seems to be cut off at some point between Vertex and Pixel Shader.
Instead you need to declare another attribute in the VertexshaderOutput, e.g.,
float4 newPosition : TEXCOORD1;
which you assign the same value as POSITION0. This new attribute, you may use ist in the PixelShaderfunction.

DX10 Skybox Shader

I'm trying to write a skybox shader in DX10 using the following HLSL code:
//////////////////////////////////////////////////////////////////////////
// world matrix for each rendered object
float4x4 g_mWorld;
// single cubemap texture
Texture2D g_tCubeMap;
// basic mirror texture sampler
SamplerState g_sSamplerMirror
{
Filter = MIN_MAG_MIP_POINT;
AddressU = MIRROR;
AddressV = MIRROR;
};
//////////////////////////////////////////////////////////////////////////
// pre-defined vertex formats for vertex input layouts
struct VS_INPUT
{
float3 Position : POSITION;
};
struct PS_INPUT
{
float4 SPosition : SV_POSITION;
float3 UV : TEXCOORD;
};
//////////////////////////////////////////////////////////////////////////
PS_INPUT VS_Default( VS_INPUT Input )
{
PS_INPUT Output = (PS_INPUT)0;
Output.SPosition = float4(Input.Position,1.0f);
Output.UV = normalize( mul( Output.SPosition, g_mWorld ) ).xyz;
return Output;
}
//////////////////////////////////////////////////////////////////////////
float4 PS_Default( PS_INPUT Input ) : SV_TARGET0
{
return float4( texCUBE( g_sSamplerMirror, Input.UV ) );
}
//////////////////////////////////////////////////////////////////////////
technique10 TECH_Default
{
pass
{
SetVertexShader( CompileShader( vs_4_0, VS_Default() ) );
SetPixelShader( CompileShader( ps_4_0, PS_Default() ) );
SetGeometryShader( 0 );
}
}
Which gives the error "DX-9 style intrinsics are disabled when not in dx9 compatibility mode." on line 46:
return float4( texCUBE( g_sSamplerMirror, Input.UV ) );
Is there an alternative to texCUBE? How can I fix this without enabling dx9 compatibility mode?
Since you are using Shader Model 4 you should be able to create a TextureCube object and then call the Sample method.

Resources