How to use DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL correctly? - directx

I am having issue while drawing my cube on the window. I don't see any graphics on the window. I see the following warning messages in visual studio:
D3D11 WARNING: ID3D11DeviceContext::DrawIndexed: The Pixel Shader expects a Render Target View bound to slot 0, but the Render Target View was unbound during a call to Present. A successful Present call for DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL SwapChains unbinds backbuffer 0 from all GPU writeable bind points. [ EXECUTION WARNING #3146082:
D3D11 WARNING: ID3D11DeviceContext::DrawIndexed: The Pixel Shader expects a Render Target View bound to slot 0, but none is bound. This is OK, as writes of an unbound Render Target View are discarded. It is also possible the developer knows the data will not be used anyway. This is only a problem if the developer actually intended to bind a Render Target View here. [ EXECUTION WARNING #3146081: DEVICE_DRAW_RENDERTARGETVIEW_NOT_SET]
I created swapchain using the following API:
virtual IDXGISwapChain* SwapChain(HWND wnd)
{
HRESULT hr = S_OK;
IDXGISwapChain* swapchain = nullptr;
DXGI_SWAP_CHAIN_DESC desc;
ZeroMemory(&desc, sizeof(DXGI_SWAP_CHAIN_DESC));
desc.Windowed = TRUE; // Sets the initial state of full-screen mode.
desc.BufferCount = 2;
desc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
desc.SampleDesc.Count = 1; //multisampling setting
desc.SampleDesc.Quality = 0; //vendor-specific flag
desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
desc.OutputWindow = wnd;
// Create the DXGI device object to use in other factories, such as Direct2D.
IDXGIDevice3* dxgiDevice;
hr = device_->QueryInterface(__uuidof(IDXGIDevice3), reinterpret_cast<void**>(&dxgiDevice));
if (FAILED(hr))
return nullptr;
// Create swap chain.
IDXGIAdapter* adapter;
IDXGIFactory* factory;
hr = dxgiDevice->GetAdapter(&adapter);
dxgiDevice->Release();
if (FAILED(hr))
return nullptr;
adapter->GetParent(IID_PPV_ARGS(&factory));
hr = factory->CreateSwapChain(device_, &desc, &swapchain);
adapter->Release();
factory->Release();
return swapchain;
}
Render Target is bound using the call:
m_d3dDevice.Context()->OMSetRenderTargets(1, &m_pRenderTarget, , _pDepthStencilView);
The Present is implemented as:
swap_chain->Present(0, 0);
The shader code is:
cbuffer ConstantBuffer : register(b0)
{
matrix World;
matrix View;
matrix Projection;
float4 vLightDir[2];
float4 vLightColor[2];
float4 vOutputColor;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float3 Norm : NORMAL;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float3 Norm : TEXCOORD0;
};
PS_INPUT VS(VS_INPUT input)
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = mul(input.Pos, World);
output.Pos = mul(output.Pos, View);
output.Pos = mul(output.Pos, Projection);
output.Norm = mul(float4(input.Norm, 1), World).xyz;
return output;
}
float4 PS(PS_INPUT input) : SV_Target
{
float4 finalColor = 0;
//do NdotL lighting for 2 lights
for (int i = 0; i < 2; i++)
{
finalColor += saturate(dot((float3)vLightDir[i],input.Norm) * vLightColor[i]);
}
finalColor.a = 1;
return finalColor;
}
float4 PSSolid(PS_INPUT input) : SV_Target
{
return vOutputColor;
}

Invoke ID3D11DeviceContext::OMSetRenderTargets(...) before each rendering
// Add this before each rendering
spImCtx->OMSetRenderTargets(1, spRTV.GetAddressOf(), spZView.Get());
// clear
spImCtx->ClearRenderTargetView(spRTV.Get(), Colors::Black);
spImCtx->ClearDepthStencilView(spZView.Get(), D3D11_CLEAR_DEPTH, 1.0f, 0);
// drawing...
// swap
spSwapChain->Present(1, 0);

Related

Update texture from vector directx9

I' m trying to render two textures one for RGB and another on for the alpha channel, I blend them together with a shader.
The alpha channel texture doesn't overlap properly to the RGB one. It seems to be stretched.
The alpha channel texture changes at every frame and I need to fill starting from an array of uint8_t by the following fuction:
D3DLOCKED_RECT locked_rect;
HRESULT hr = alpha_tex->LockRect(0, &locked_rect, nullptr, 0);
if (!FAILED(hr)) {
ret_code = 0;
BYTE *p_dst = (BYTE *)locked_rect.pBits;
for (uint y = 0; y < height; y++) {
memcpy(p_dst, alpha_array, width);
alpha_array += width;
p_dst += locked_rect.Pitch;
}
alpha_tex->UnlockRect(0);
}
where the alpha_array is a uint8_t array containing the alpha values.
To render the texture i use the following function:
hwctx->d3d9device->Clear(0, 0, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, 0xffeeeeee, 1.0f, 0);
hwctx->d3d9device->BeginScene();
ctx->mFX->SetTechnique(ctx->mhTech);
ctx->texRGB->GetSurfaceLevel(0, &ctx->surfRGB);
hwctx->d3d9device->StretchRect((IDirect3DSurface9*)s->vdrFrame->data[3], NULL, ctx->surfRGB, NULL, D3DTEXF_LINEAR);
ctx->mFX->SetTexture(ctx->mhTexRGB, ctx->texRGB);
ctx->mFX->SetTexture(ctx->mhTexAlpha, ctx->texAlpha);
// Enable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, true);
hwctx->d3d9device->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
hwctx->d3d9device->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
UINT numPasses = 0;
ctx->mFX->Begin(&numPasses, 0);
for (UINT i = 0; i < numPasses; ++i){
ctx->mFX->BeginPass(i);
hwctx->d3d9device->DrawPrimitive(D3DPT_TRIANGLEFAN, 0, 2);
ctx->mFX->EndPass();
}
ctx->mFX->End();
hwctx->d3d9device->EndScene();
hwctx->d3d9device->Present(0, 0, 0, 0);
// Disable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, false);
I combine the textures by vertex/pixel shader:
uniform extern texture gTexRGB;
uniform extern texture gTexAlpha;
sampler TexRGB = sampler_state{
Texture = <gTexRGB>;
AddressU = WRAP;
AddressV = WRAP;
};
sampler TexAlpha = sampler_state{
Texture = <gTexAlpha>;
AddressU = WRAP;
AddressV = WRAP;
};
struct OutputVS{
float4 posH : POSITION0;
float2 tex0 : TEXCOORD0;
};
OutputVS TextureBlendingVS(float2 tex0: TEXCOORD0){
// Zero out our output.
OutputVS outVS = (OutputVS)0;
// Pass on texture coordinates to be interpolated in rasterization.
outVS.tex0 = tex0;
// Done--return the output.
return outVS;
}
float4 TextureBlendingPS(float2 tex0 : TEXCOORD0) : COLOR{
float3 rgb = tex2D(TexRGB, tex0).rgb;
float alpha = tex2D(TexAlpha, tex0).a;
return float4(rgb, alpha);
}
technique DirLightTexTech{
pass P0 {
// Specify the vertex and pixel shader associated with this pass.
vertexShader = compile vs_2_0 TextureBlendingVS();
pixelShader = compile ps_2_0 TextureBlendingPS();
}
}
The size of the textures is the same but during the rendering something goes wrong.
Please help me. :)

DX11 HLSL Secondary Texture Coordinates Lost

Been banging my head up against the wall with this for a while. Despite the fact that I THINK I have a proper Vertex Format defined with D3D11_INPUT_ELEMENT_DESC, no matter what I do, I can't see to read my TEXCOORD1 values from this shader. To test this shader, I put random values into my second set of UV coordinates just to see if they were reaching the shader, but to my dismay, I haven't been able to find these random values anywhere. I have also watched the data go into the mapped memory directly, and I am pretty sure the random values were there when they were mapped.
Here is the Shader code:
sampler ImageSampler: register(s0);
Texture2D <float4> ImageTexture: register(t0);
Texture2D <float4> ReflectionTexture: register(t1);
//Texture2D <float4> ReflectionMap: register(t0);
struct PS_IN
{
float4 InPos: SV_POSITION;
float2 InTex: TEXCOORD;
float2 InRef: TEXCOORD1;
float4 InCol: COLOR0;
};
float4 main(PS_IN input): SV_TARGET
{
float4 res;
float4 mul;
float2 tcRef;
float4 res1 = ImageTexture.Sample(ImageSampler, input.InTex) * input.InCol;
float4 res2 = ReflectionTexture.Sample(ImageSampler, input.InRef+input.InTex);
mul.r = 0.5;
mul.g = 0.5;
mul.b = 0.5;
mul.a = 0.5;
res = res1 + res2;
res = res * mul;
res.a = res1.a;
res.r = input.InRef.x;//<-----should be filled with random stuff... not working
res.b = input.InRef.y;//<-----should be filled with random stuff... not working
return res;
}
Here is my D3D11_ELEMENT_DESC... (sorry it is in pascal, but I like pascal)
const
CanvasVertexLayout: array[0..3] of D3D11_INPUT_ELEMENT_DESC =
((SemanticName: 'POSITION';
SemanticIndex: 0;
Format: DXGI_FORMAT_R32G32_FLOAT;
InputSlot: 0;
AlignedByteOffset: 0;
InputSlotClass: D3D11_INPUT_PER_VERTEX_DATA;
InstanceDataStepRate: 0),
(SemanticName: 'TEXCOORD';
SemanticIndex: 0;
Format: DXGI_FORMAT_R32G32_FLOAT;
InputSlot: 0;
AlignedByteOffset: 8;
InputSlotClass: D3D11_INPUT_PER_VERTEX_DATA;
InstanceDataStepRate: 0),
(SemanticName: 'TEXCOORD';
SemanticIndex: 1;
Format: DXGI_FORMAT_R32G32_FLOAT;
InputSlot: 0;
AlignedByteOffset: 16;
InputSlotClass: D3D11_INPUT_PER_VERTEX_DATA;
InstanceDataStepRate: 0),
(SemanticName: 'COLOR';
SemanticIndex: 0;
Format: DXGI_FORMAT_R8G8B8A8_UNORM;
InputSlot: 0;
AlignedByteOffset: 24;
InputSlotClass: D3D11_INPUT_PER_VERTEX_DATA;
InstanceDataStepRate: 0)
);
And here's the Vertext Struct
TVertexEntry = packed record
X, Y: Single;
U, V: Single;
u2,v2:single;
Color: LongWord;
end;
Since the COLOR semantic follows the TEXTURE semantics, my best guess is that the problem is with the SHADER and not the pascal code... but since I'm new to this kind of stuff, I'm obviously lost
Any insight is appreciated.
Answering my own question. Since I'm new to Shaders in general, maybe this will help some other newbs.
I was assuming that all I needed to do was add a second set of UV coordinates to the Vertex Format and add a D3D11_INPUT_ELEMENT_DESC for it. However, there is also a vertex shader involved, more-or-less a passthrough and that vertex shader needs to be aware of the new UV coordinates and let them pass through. I was just making a 2D engine so I didn't think that I'd even have to mess with VertexShaders... go figure. So I modified the vertex shader, and this was the result:
void main(
float2 InPos: POSITION0,
float2 InTex: TEXCOORD0,
float2 InTex2: TEXCOORD1,//<--added
float4 InCol: COLOR0,
out float4 OutPos: SV_POSITION,
out float2 OutTex: TEXCOORD2,
out float2 OutTex2: TEXCOORD3,//<--added
out float4 OutCol: COLOR0)
{
OutPos = float4(InPos, 0.0, 1.0);
OutTex = InTex;
OutCol = InCol;
OutTex2 = InTex2;//<--added
}

Writing on a UAV in pixel shader

I was doing some experiment with textures as UAV in pixel shader by writing some value on it, but I'm not seeing its effect in the next draw call when I bind the same texture again as an SRV.
Example shader:
RWTexture2D<unsigned int> uav;
Texture2D tex : register(t0);
// Vertex Shader
float4 VS( float4 Pos : POSITION ) : SV_POSITION
{
return Pos;
}
// Pixel Shader, draw1 warm up
float4 PS( float4 Pos : SV_POSITION ) : SV_Target
{
return float4( 1.0f, 1.0f, 0.0f, 1.0f ); // Yellow, with Alpha = 1
}
// Pixel Shader, we are writing onto the texture by binding it as an UAV, draw2
float4 PS1( float4 Pos : SV_POSITION ) : SV_Target
{
if((Pos.x %2) && (Pos.y %2))
{
uav[Pos.xy]=0xFF000000; //some color
}
else
{
uav[Pos.xy]=0x00FF0000; //some color
}
return float4( 1.0f, 0.0f, 0.0f, 1.0f );
}
// Pixel Shader, here we are accessing texture as an SRV, draw3
float4 PS2( float4 Pos : SV_POSITION ) : SV_Target
{
float4 x = tex[Pos.xy];
return x;
}
I can provide the app source code if required.
I enabled the debug layer. It was UAV format mismatch error. In the UAV description, I declared R8G8B8A8_UNORM as a format and I'm accessing the element as UINT in the shader.
description: D3D11 ERROR: ID3D11DeviceContext::Draw: The resource return type for component 0 declared in the shader code (UINT) is not compatible with the resource type bound to Unordered Access View slot 1 of the Pixel Shader unit (UNORM). This mismatch is invalid if the shader actually uses the view [ EXECUTION ERROR #2097372: DEVICE_UNORDEREDACCESSVIEW_RETURN_TYPE_MISMATCH]
Source code:
D3D11_UNORDERED_ACCESS_VIEW_DESC UAVdesc;
ZeroMemory( &SRVdesc, sizeof(SRVdesc));
UAVdesc.Format=DXGI_FORMAT_R8G8B8A8_UNORM;
UAVdesc.ViewDimension=D3D11_UAV_DIMENSION_TEXTURE2D;
UAVdesc.Texture2D.MipSlice=0;
g_pd3dDevice->CreateUnorderedAccessView( g_pTexture, &UAVdesc, &g_pUAV);
Texture created :
D3D11_TEXTURE2D_DESC TextureData;
ZeroMemory( &TextureData, sizeof(TextureData) );
TextureData.ArraySize=1;
TextureData.Height=height;
TextureData.Width=width;
TextureData.Format=DXGI_FORMAT_R8G8B8A8_TYPELESS;
TextureData.CPUAccessFlags=0;
TextureData.BindFlags=D3D11_BIND_SHADER_RESOURCE|D3D11_BIND_RENDER_TARGET|D3D11_BIND_UNORDERED_ACCESS;
TextureData.MipLevels=1;
TextureData.MiscFlags=0;
TextureData.SampleDesc.Count=1;
TextureData.SampleDesc.Quality=0;
TextureData.Usage=D3D11_USAGE_DEFAULT;
D3D11_SUBRESOURCE_DATA InitialData;
ZeroMemory( &InitialData, sizeof(InitialData));
InitialData.pSysMem=pData;
InitialData.SysMemPitch=width * sizeof(UINT);
InitialData.SysMemSlicePitch=width * sizeof(UINT) * height;
g_pd3dDevice->CreateTexture2D( &TextureData, &InitialData, &g_pTexture);
Shader code is already given above.
Fix:
D3D11_UNORDERED_ACCESS_VIEW_DESC UAVdesc;
ZeroMemory( &SRVdesc, sizeof(SRVdesc));
**UAVdesc.Format=DXGI_FORMAT_R32_UINT;**
UAVdesc.ViewDimension=D3D11_UAV_DIMENSION_TEXTURE2D;
UAVdesc.Texture2D.MipSlice=0;
g_pd3dDevice->CreateUnorderedAccessView( g_pTexture, &UAVdesc, &g_pUAV);
confirmed by dumping texture in staging resource. Thanks guys.

Vertex color is not interpolated in the context of ID3DXLine

I've created a standard Win32 DirectX9 window and I'm rendering to it using a custom effect, however I have a problem where the colours of vertices are not interpolated in the result.
void CRender::Begin()
{
perf.begin();
// Capture device state so it can be restored later.
// We use ID3DXLine::Begin() to fix some bugs that I don't know how to fix.
mpLine->Begin();
// Setup shader
shader.Begin( static_cast<float>(FloatTime()) );
}
void CRender::End()
{
// Reverse order of Begin()
shader.End();
mpLine->End();
}
The problem here lies with mpLine->Begin(), without calling this I get a perfectly nice interpolated triangle, with it the whole triangle has the same colour as the first vertex.
Image for clarification: http://i.imgur.com/vKN4SnE.png
I am using ID3DXLine::Begin() just to set up the device state for me. The reason I am using it is because I'm rendering in the context of another program (a game) by hooking its EndScene(). The game may leave the device in an unusable state causing rendering glitches in my overlay, all these problems go away when using ID3DXLine::Begin() except vertex colours aren't interpolated any more.
Vertex declaration:
// Create the vertex declaration for use with the shaders.
static const D3DVERTEXELEMENT9 vformat[] =
{
{ 0, 0, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_POSITION, 0 },
{ 0, 8, D3DDECLTYPE_D3DCOLOR, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_COLOR, 0 },
{ 0, 12, D3DDECLTYPE_FLOAT2, D3DDECLMETHOD_DEFAULT, D3DDECLUSAGE_TEXCOORD, 0 },
D3DDECL_END()
};
HRESULT hr = dev->CreateVertexDeclaration( vformat, &decl );
Effect source:
// Vertex shader input
struct VSIN
{
float2 coord : POSITION;
float4 color : COLOR0;
float2 tex : TEXCOORD0;
};
// Vertex shader output / Pixel shader input
struct VSOUT
{
float4 coord : POSITION;
float4 color : COLOR0;
float2 tex : TEXCOORD0;
float2 pos : TEXCOORD1;
};
uniform float2 screen;
uniform float2x4 project;
float4 vstransform( float2 coord, const float2 shift )
{
float2 final = ( mul( project, float4(coord.x,coord.y,1,1) ) + shift ) * 2 / screen;
return float4( final.x-1, 1-final.y, 0, 1 );
}
VSOUT vsfix( VSIN data )
{
VSOUT vert;
const float2 shift = { -0.5f, -0.5f };
vert.coord = vstransform( data.coord, shift );
vert.color = data.color;
vert.tex = data.tex;
vert.pos = vert.coord.xy;
return vert;
}
float4 diffuse( VSOUT vert ) : COLOR
{
float4 px = vert.color;
return px;
}
technique Diffuse
{
pass p0
{
PixelShader = compile ps_2_0 diffuse();
VertexShader = compile vs_2_0 vsfix();
}
}

DX10 Skybox Shader

I'm trying to write a skybox shader in DX10 using the following HLSL code:
//////////////////////////////////////////////////////////////////////////
// world matrix for each rendered object
float4x4 g_mWorld;
// single cubemap texture
Texture2D g_tCubeMap;
// basic mirror texture sampler
SamplerState g_sSamplerMirror
{
Filter = MIN_MAG_MIP_POINT;
AddressU = MIRROR;
AddressV = MIRROR;
};
//////////////////////////////////////////////////////////////////////////
// pre-defined vertex formats for vertex input layouts
struct VS_INPUT
{
float3 Position : POSITION;
};
struct PS_INPUT
{
float4 SPosition : SV_POSITION;
float3 UV : TEXCOORD;
};
//////////////////////////////////////////////////////////////////////////
PS_INPUT VS_Default( VS_INPUT Input )
{
PS_INPUT Output = (PS_INPUT)0;
Output.SPosition = float4(Input.Position,1.0f);
Output.UV = normalize( mul( Output.SPosition, g_mWorld ) ).xyz;
return Output;
}
//////////////////////////////////////////////////////////////////////////
float4 PS_Default( PS_INPUT Input ) : SV_TARGET0
{
return float4( texCUBE( g_sSamplerMirror, Input.UV ) );
}
//////////////////////////////////////////////////////////////////////////
technique10 TECH_Default
{
pass
{
SetVertexShader( CompileShader( vs_4_0, VS_Default() ) );
SetPixelShader( CompileShader( ps_4_0, PS_Default() ) );
SetGeometryShader( 0 );
}
}
Which gives the error "DX-9 style intrinsics are disabled when not in dx9 compatibility mode." on line 46:
return float4( texCUBE( g_sSamplerMirror, Input.UV ) );
Is there an alternative to texCUBE? How can I fix this without enabling dx9 compatibility mode?
Since you are using Shader Model 4 you should be able to create a TextureCube object and then call the Sample method.

Resources