metal shader: pass color encoded as 4 bytes INTEGER instead of 8 Bytes FLOAT - ios

I need to send color encoded as in 4 bytes RGBA channel in INTEGER (not float) to my metal shader, but I don't know if metal shader can handle color stored in INTEGER. Actually I translate it in the shader to float4 (don't even know if it's not better to use half4 instead) color but I don't know if it's a good way:
struct VertexIn {
packed_float3 pos;
packed_uchar4 color;
};
struct VertexOut {
float4 pos [[position]];
float4 color;
};
vertex VertexOut vertexShader(const device VertexIn *vertexArray [[buffer(0)]],
const unsigned int vid [[vertex_id]]){
VertexIn in = vertexArray[vid];
VertexOut out;
out.color = float4(float(in.color[2])/255,float(in.color[1])/255,float(in.color[0])/255,float(in.color[3])/255);
out.pos = float4(in.pos.x, in.pos.y, in.pos.z, 1);
return out;
}
fragment float4 fragmentShader(VertexOut interpolated [[stage_in]]){
return interpolated.color;
}

Related

Something like vertex_id for Metal fragment shader, to identify fragment?

Vertex shaders in Metal can use a [[vertex_id]] attribute on an integer argument, and that argument will get values between 0 and the number of vertexes. Is there a similar thing for fragment shaders?
I want to write some debug info to a buffer, within the fragment shader, as shown below. Then in the CPU code, I would print the contents of the buffer, as a way to debug what is going on in the fragment shader.
struct VertexIn {
float4 position [[attribute(0)]];
};
struct VertexOut {
float4 position [[position]];
};
vertex VertexOut vertex_main(const VertexIn vertex_in [[stage_in]]) {
VertexOut out;
out.position = vertex_in.position;
return out;
}
fragment float4
fragment_main(VertexOut vo [[stage_in]],
uint fragment_id [[fragment_id]], // <-- Hypothetical, doesn't actually work.
device float4 *debug_out [[buffer(0)]],
device uint *debug_out2 [[buffer(1)]])
{
debug_out[fragment_id] = vo.position;
debug_out2[fragment_id] = ...
return float4(0, 1, clamp(vo.position.x/1000, 0.0, 1.0), 1);
}

why sending RGBA data in a BGRA pipelineDescriptor work?

My MTKView is in BGRA and I setup my pipelineDescriptor in BGRA like below :
pipelineDescriptor.colorAttachments.objectAtIndexedSubscript(0).setPixelFormat(MTLPixelFormatBGRA8Unorm);
now the problem is that if I send data encoded in RGBA (I mean color is a 16 bytes encoded in R(4bytes)+G(4bytes)+B(4bytes)+A(4bytes)) to the shader below then it's work well!
RenderCommandEncoder.setVertexBuffer(LvertexBuffer{buffer}, 0{offset}, 0{atIndex})
and
#include <metal_stdlib>
using namespace metal;
#include <simd/simd.h>
struct VertexIn {
vector_float4 color;
vector_float2 pos;
};
struct VertexOut {
float4 color;
float4 pos [[position]];
};
vertex VertexOut vertexShader(
const device VertexIn *vertexArray [[buffer(0)]],
unsigned int vid [[vertex_id]],
constant vector_uint2 *viewportSizePointer [[buffer(1)]]
)
{
// Get the data for the current vertex.
VertexIn in = vertexArray[vid];
VertexOut out;
...
out.color = in.color;
....
return out;
}
fragment float4 fragmentShader(
VertexOut interpolated [[stage_in]],
texture2d<half> colorTexture [[ texture(0) ]]
)
{
return interpolated.color;
}
How it's possible? is it about little and big endian ?
The color(s) you return from a fragment function are assumed to be in RGBA order, regardless of the pixel format of your render target. They get swizzled and/or converted as necessary to match the format of the destination. The same thing happens when sampling/writing other textures: colors always arrive in RGBA order.

D3DXCompileShader cannot compile shader with NORMAL in it

I noticed that the D3DXCompileShader function from the Ogre library fails in case of DirectX9 because of the following reason:
error X4502: invalid vs_2_0 output semantic 'NORMAL'
Seems pretty strange to me because MSDN states that it should be supported starting from the DirectX 9:
The following types of semantics are supported in both Direct3D 9 and Direct3D 10 and later
Here's my hlsl file:
float4x4 worldViewProj;
float4 lightPosition;
float3 eyePosition;
struct VS_OUTPUT
{
float4 pos: POSITION;
float4 normal: NORMAL;
float4 color: COLOR;
float4 lightPosition: TEXCOORD0;
float3 eyePosition: TEXCOORD1;
float3 pos3d: TEXCOORD2;
};
VS_OUTPUT main(
float4 pos: POSITION,
float4 normal: NORMAL,
float4 color: COLOR
)
{
VS_OUTPUT Out;
Out.pos = mul(worldViewProj, pos);
Out.pos3d = pos.xyz;
Out.normal = normal;
Out.color = color;
Out.lightPosition = lightPosition;
Out.eyePosition = eyePosition;
return Out;
}
The link you provided clearly states that NORMAL semantics is only supported as VS input.
You may try to pass the normal(s) to the pixel shader using TEXCOORD (TEXCOORD3 for instance) instead.

How to pass textures to DirectX 9 pixel shader?

I have pixel shader
// fxc.exe tiles.fs /T ps_3_0 /Fotiles.fsc /Fctiles.fsl
struct PSInput
{
float4 Pos : TEXCOORD0;
float3 Normal : TEXCOORD1;
float2 TexcoordUV : TEXCOORD2;
float2 TexcoordST : TEXCOORD3;
};
sampler2D sampler0; //uniform
sampler2D sampler1; //uniform
sampler2D sampler2; //uniform
sampler2D sampler3; //uniform
sampler2D alphamap1;//uniform
sampler2D alphamap2;//uniform
sampler2D alphamap3;//uniform
uniform int tex_count = 0;
uniform float4 color_ambient = float4(0.75, 0.75, 0.75, 1.0);
uniform float4 color_diffuse = float4(0.25, 0.25, 0.25, 1.0);
uniform float4 color_specular = float4(1.0, 1.0, 1.0, 1.0);
uniform float shininess = 77.0f;
uniform float3 light_position = float3(12.0f, 32.0f, 560.0f);
float4 main(PSInput In) : COLOR
{
float3 light_direction = normalize(light_position - (float3)In.Pos);
float3 normal = normalize(In.Normal);
float3 half_vector = normalize(light_direction + normalize((float3)In.Pos));
float diffuse = max(0.0, dot(normal, light_direction));
float specular = pow(max(0.0, dot(In.Normal, half_vector)), shininess);
float4 color = tex2D(sampler0, In.TexcoordUV);
if (tex_count > 0){
float4 temp = tex2D(sampler1, In.TexcoordUV);
float4 amap = tex2D(alphamap1, In.TexcoordST);
color = lerp(color, temp, amap.a);
}
if (tex_count > 1){
float4 temp = tex2D(sampler2, In.TexcoordUV);
float4 amap = tex2D(alphamap2, In.TexcoordST);
color = lerp(color, temp, amap.a);
}
if (tex_count > 2){
float4 temp = tex2D(sampler3, In.TexcoordUV);
float4 amap = tex2D(alphamap3, In.TexcoordST);
color = lerp(color, temp, amap.a);
}
color = color * color_ambient + diffuse * color_diffuse + specular * color_specular;
return color;
}
vertex shader
// fxc.exe tiles.vs /T vs_3_0 /Fotiles.vsc /Fctiles.vsl
struct VSInput
{
float3 Pos : POSITION;
float3 Normal : NORMAL;
float2 TexcoordUV : TEXCOORD0;
float2 TexcoordST : TEXCOORD1;
};
struct PSInput
{
float4 Pos : POSITION;
float3 Normal : TEXCOORD0;
float2 TexcoordUV : TEXCOORD1;
float2 TexcoordST : TEXCOORD2;
};
uniform matrix modelMatrix;
uniform matrix projectionMatrix;
uniform matrix lookAtMatrix;
PSInput main(VSInput In)
{
PSInput Out = (PSInput) 0;
//projectionMatrix * lookAtMatrix * modelMatrix;
matrix MVP = mul(modelMatrix, lookAtMatrix);
MVP = mul(MVP, projectionMatrix);
Out.Normal = mul(In.Normal, (float3x3)modelMatrix);
Out.Pos = mul(float4(In.Pos, 1.0), MVP);
Out.TexcoordUV = In.TexcoordUV;
Out.TexcoordST = In.TexcoordST;
return Out;
}
same works under OpenGL + GLSL except mix replaced by lerp (I hope its correct).
By example from http://www.two-kings.de/tutorials/dxgraphics/dxgraphics18.html I passing textures with:
ps_pConstantTable.SetInt(m_pD3DDevice, texCountHandle, 0);
for i := 0 to texCount - 1 do begin
tBlp := texture_buf[cx, cy][i];
if tBlp = nil then
break;
m_pD3DDevice.SetTexture(i, tBlp.itex);
ps_pConstantTable.SetInt(m_pD3DDevice, texCountHandle, i);
if i > 0 then begin
// this time, use blending:
m_pD3DDevice.SetTexture(i + 3, AlphaMaps[cx, cy][i]);
end;
end;
so ordinal textures have indices 0-3 and alpha 4-6 (max texCount 4).
The problem is: I can see mesh (terrain) but it is solid black. Am I need something else to set (without shaders it also was black until I assigned materials and light)? Can I pass textures like that? Can I do this with sampler2D as uniform (how)?
Edit: example with sources, shaders, several used textures and alphamaps, vertex data with normals at filebeam http://fbe.am/nm4 added. As small as possible. Also contains DXErr9ab.dll to log errors.
To use texture in pixel shader, you may following below steps
Create texture in your C/C++ file by D3DXCreateTextureFromFile or other functions.
if( FAILED( D3DXCreateTextureFromFile( g_pd3dDevice, "FaceTexture.jpg",
&g_pTexture ) ) )
return E_FAIL;
Declare a D3DXHANDLE and associate it with the texture in your shader file.(you should compile your effect file before this step, effects_ here is a pointer to ID3DXEffect)
texture_handle = effects->GetParameterByName(0, "FaceTexture");
Set the texture in render function
effects_->SetTexture(texture_handle, g_pTexture);
Declare a texture in your shader file
texture FaceTexture;
Declare a sampler in your shader file
// Face texture sampler
sampler FaceTextureSampler = sampler_state
{
Texture = <FaceTexture>;
MipFilter = LINEAR;
MinFilter = LINEAR;
MagFilter = LINEAR;
};
Do sampling in your pixel shader function
float4 BasicPS(OutputVS outputVS) : COLOR
{
float4 Output;
Output = FaceTexture.Sample(FaceTextureSampler, outputVS.texUV);
return Output;
}
If you have DirectX SDK installed, I recommend you to take a look at the sample "BasicHLSL" which has a very basic introduction of Vertex shader and Pixel shader(including texture).

HLSL invalid ps_2_0 input semantic POSITION0

I'm attempting to write a phong shader effect for Dx9 in RenderMonkey.
I'm getting a compile error in the pixel shader
*"invalid ps_2_0 input semantic 'POSITION0'"*
and I'm not sure how to fix it, although I know it's got to be something to do with the POSITION0 semantic in VS_OUTPUT.
I tried changing VS_OUTPUT's Pos semantic to TEXCOORD0, but then the system reports that
vertex shader must minimally write all four components of POSITION
Shaders are supplied below. Any suggestions?
Here's my vertex shader:
struct VS_INPUT
{
float4 Pos : POSITION0;
float3 Normal : NORMAL0;
};
struct VS_OUTPUT
{
float4 Pos : POSITION0;
float3 Normal : TEXCOORD0;
};
VS_OUTPUT vs_main( VS_INPUT Input )
{
VS_OUTPUT Output;
Output.Pos = Input.Pos;
Output.Normal = Input.Normal;
return Output;
}
and my pixel shader:
float4x4 matViewProjection;
// light source
float4 lightPos;
float4 Ambient;
float4 Diffuse;
float4 Specular;
// material reflection properties
float4 Ke;
float4 Ka;
float4 Kd;
float4 Ks;
float nSpecular;
// eye
float4 eyePosition;
struct VS_OUTPUT
{
float4 Pos : POSITION0;
float3 Normal : TEXCOORD0;
};
float4 ps_main( VS_OUTPUT vsOutput ) : COLOR0
{
vsOutput.Pos = mul( vsOutput.Pos, matViewProjection );
float3 ViewDirection = normalize( eyePosition.xyz - vsOutput.Pos.xyz );
float3 LightDirection = normalize( lightPos.xyz - vsOutput.Pos.xyz );
float3 N = normalize( vsOutput.Normal );
float3 R = reflect( -LightDirection, N );
float LdotN = max( 0.0, dot( LightDirection, N ) );
float VdotR = max( 0.0, dot( ViewDirection, R ) );
// find colour components
float4 a = Ka * Ambient;
float4 d = Kd * Diffuse * LdotN;
float4 s = Ks * Specular * pow( VdotR, nSpecular );
float4 FragColour = Ke + a + d + s;
return FragColour;
}
Okay, I found a solution for those interested.
The Vertex Shader should have the following structs defined:
struct VS_INPUT
{
float4 Pos : POSITION0;
float3 Normal : NORMAL0;
};
struct VS_OUTPUT
{
float4 Pos : POSITION0;
float4 PosOut : TEXCOORD0;
float3 Normal : TEXCOORD1;
};
The VS_OUTPUT struct should be different in the pixel shader:
struct VS_OUTPUT
{
float4 PosOut : TEXCOORD0;
float3 Normal : TEXCOORD1;
};
My problem stemmed from the fact that you can't have a POSITION semantic as input to the pixel shader. At least for ps_2_0.
As user, one is not allowed to use the value of POSITION0 of the VertexShaderOutput in the PixelShaderFunction. This attribute seems to be cut off at some point between Vertex and Pixel Shader.
Instead you need to declare another attribute in the VertexshaderOutput, e.g.,
float4 newPosition : TEXCOORD1;
which you assign the same value as POSITION0. This new attribute, you may use ist in the PixelShaderfunction.

Resources