DX11 triangle list is not rendering at all - directx

I have a list of 4 verts loaded into a vert buffer, and an index loaded into a index buffer.
The issue I have is that while the LineList rendermode shows a quad just fine (see below) the TriangleList shows nothing (See below)
void BLX::Model::load(std::filesystem::path path, Model* model, ID3D11Device* d3dDevice, ID3D11DeviceContext* d3dContext)
{
// tmp: just making a quad
float num = 0.5f;
std::vector<BLX::Vertex> vertices = {
BLX::Vertex { DirectX::XMFLOAT3(-num, -num, 0.0f), DirectX::XMFLOAT3(0.0f, 0.0f, 0.5f), }, // 0 = TL
BLX::Vertex { DirectX::XMFLOAT3(num, -num, 0.0f), DirectX::XMFLOAT3(0.0f, 0.5f, 0.0f), }, // 1 = TR
BLX::Vertex { DirectX::XMFLOAT3(num, num, 0.0f), DirectX::XMFLOAT3(0.5f, 0.0f, 0.0f), }, // 2 = BR
BLX::Vertex { DirectX::XMFLOAT3(-num, num, 0.0f), DirectX::XMFLOAT3(0.5f, 0.5f, 0.0f), }, // 3 = BL
};
// line list
//std::vector<unsigned int> indices = { 0, 1, 1, 2, 2, 3, 3, 0 };
// triangle list
std::vector<unsigned int> indices = { 0, 1, 3, 3, 1, 2 };
model->vertexCount = vertices.size();
model->indexCount = indices.size();
// Vertex Buffer
D3D11_BUFFER_DESC vbd = {};
vbd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vbd.Usage = D3D11_USAGE_DEFAULT;
vbd.CPUAccessFlags = 0u;
vbd.MiscFlags = 0u;
vbd.ByteWidth = sizeof(BLX::Vertex) * model->vertexCount;
vbd.StructureByteStride = sizeof(BLX::Vertex);
D3D11_SUBRESOURCE_DATA vsd = {};
vsd.pSysMem = &vertices[0];
vsd.SysMemPitch = 0;
vsd.SysMemSlicePitch = 0;
d3dDevice->CreateBuffer(&vbd, &vsd, &model->vertexBuffer);
/// Index Buffer
D3D11_BUFFER_DESC ibd = {};
ibd.Usage = D3D11_USAGE_DEFAULT;
ibd.ByteWidth = sizeof(unsigned int) * model->indexCount;
ibd.BindFlags = D3D11_BIND_INDEX_BUFFER;
ibd.CPUAccessFlags = 0;
ibd.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA isd = {};
isd.pSysMem = &indices[0];
isd.SysMemPitch = 0;
isd.SysMemSlicePitch = 0;
d3dDevice->CreateBuffer(&ibd, &isd, &model->indexBuffer);
// IA = Input Assembly
// pixel shader
D3DReadFileToBlob(L"PixelShader2.cso", &model->pBlob);
d3dDevice->CreatePixelShader(model->pBlob->GetBufferPointer(), model->pBlob->GetBufferSize(), nullptr, &model->pPixelShader);
// Vertex Shader
D3DReadFileToBlob(L"VertexShader2.cso", &model->pBlob);
d3dDevice->CreateVertexShader(model->pBlob->GetBufferPointer(), model->pBlob->GetBufferSize(), nullptr, &model->pVertexShader);
const D3D11_INPUT_ELEMENT_DESC ied[] =
{
// "Position" correcponds to Vertex Shader Semantic Name
// semantic index
// data type format
// Input slot
// Aligned byte offset
// Input slot class
// Instance data step rate
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{ "COLOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
// needs vertex shader blob
d3dDevice->CreateInputLayout(ied, ARRAYSIZE(ied), model->pBlob->GetBufferPointer(), model->pBlob->GetBufferSize(), &model->pInputLayout);
}
void BLX::Model::render(ID3D11Device* d3dDevice, ID3D11DeviceContext* d3dContext, D3D11_VIEWPORT * vp)
{
const UINT stride = sizeof(Vertex);
const UINT offset[] = { 0u, 0u };
d3dContext->IASetVertexBuffers(0u, 1u, vertexBuffer.GetAddressOf(), &stride, &offset[0]);
d3dContext->IASetIndexBuffer(*indexBuffer.GetAddressOf(), DXGI_FORMAT_R32_UINT, offset[1]);
d3dContext->PSSetShader(pPixelShader.Get(), nullptr, 0u);
d3dContext->VSSetShader(pVertexShader.Get(), nullptr, 0u);
d3dContext->IASetInputLayout(pInputLayout.Get());
d3dContext->RSSetViewports(1u, vp);
//d3dContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY::D3D11_PRIMITIVE_TOPOLOGY_LINELIST);
d3dContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY::D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3dContext->DrawIndexed(indexCount, 0, 0);
}
When using the LineList index and topology:
When using the TriangleList index and topology:
But when I was doing this:
// tmp: just making a quad
float num = 0.5f;
std::vector<BLX::Vertex> vertices = {
BLX::Vertex { DirectX::XMFLOAT3(0.0f, num, 0.0f), DirectX::XMFLOAT3(0.0f, 0.0f, 0.5f), },
BLX::Vertex { DirectX::XMFLOAT3(num, -num, 0.0f), DirectX::XMFLOAT3(0.0f, 0.5f, 0.0f), },
BLX::Vertex { DirectX::XMFLOAT3(-num, -num, 0.0f), DirectX::XMFLOAT3(0.5f, 0.0f, 0.0f), },
};
// triangle list
std::vector<unsigned int> indices = { 0, 1, 2 };
(everything else the exact same) I got this:
Just really curious what I'm not seeing or getting when trying to render two triangles to make up a quad

Your rectangle has indices organized in a clockwise manner, which are culled by the default rasterizer (since you do not specify one it culls clockwise primitives)
Your triangle vertices order was counter clockwise, so the primitive was not culled.
To solve it, two solutions:
Change your indices order :
std::vector<unsigned int> indices = { 0, 3, 1, 3, 2, 1 };
Disable culling in the rasterizer state :
First create one Rasterizer Description
D3D11_RASTERIZER_DESC raster_desc;
raster_desc.FillMode = D3D11_FILL_SOLID;
raster_desc.CullMode= D3D11_CULL_NONE;
raster_desc.FrontCounterClockwise = false;
raster_desc.DepthBias = 0;
raster_desc.DepthBiasClamp= 0.0f;
raster_desc.SlopeScaledDepthBias= 0.0f;
raster_desc.DepthClipEnable= true;
raster_desc.ScissorEnable= false;
raster_desc.MultisampleEnable= false;
raster_desc.AntialiasedLineEnable= false;
Then create a rasterizer state using your device:
ID3D11RasterizerState* raster_state;
HRESULT hr =d3dDevice->CreateRasterizerState(&raster_desc, &raster_state);
Before the draw, assign your rasterizer state to your context:
d3dContext->RSSetState(raster_state);

Your two meshes, triangle and quad, have opposite triangle winding order. Here’s how.
By default, D3D11 uses CullMode=Back and FrontCounterClockwise=FALSE.
This means it only renders front faces, and front face is defined as “when the vertices are counter-clockwise”.
As you see from the above illustration, your triangle indeed has counter-clockwise order, however both triangles of your quad are clockwise, GPU considers them as back faces and skips both.
You have many ways to fix, any of the following will do.
Reorder vertices in vertex buffer.
Flip triangles in index buffer to { 0, 3, 1, 1, 3, 2 }
Change rasterizer state to disable back face culling, CullMode=D3D11_CULL_NONE
Change rasterizer state to switch front face winding direction, FrontCounterClockwise=TRUE
Change matrix passed to vertex shader to include mirroring component, e.g. scale with vector [ -1, 1, 1 ] represents a mirror transform that flips X, this will flip winding order of the whole mesh.

Related

Color conversion from DXGI_FORMAT_B8G8R8A8_UNORM to NV12 in GPU using DirectX11 pixel shaders

I'm working on a code to capture the desktop using Desktop duplication and encode the same to h264 using Intel hardwareMFT. The encoder only accepts NV12 format as input. I have got a DXGI_FORMAT_B8G8R8A8_UNORM to NV12 converter(https://github.com/NVIDIA/video-sdk-samples/blob/master/nvEncDXGIOutputDuplicationSample/Preproc.cpp) that works fine, and is based on DirectX VideoProcessor.
The problem is that the VideoProcessor on certain intel graphics hardware supports conversions only from DXGI_FORMAT_B8G8R8A8_UNORM to YUY2 but not NV12, I have confirmed the same by enumerating the supported formats through GetVideoProcessorOutputFormats. Though the VideoProcessor Blt succeeded without any errors, and I could see that the frames in the output video are pixelated a bit, I could notice it if I look at it closely.
I guess, the VideoProcessor has simply failed over to the next supported output format (YUY2) and I'm unknowingly feeding it to the encoder that thinks that the input is in NV12 as configured. There is no failure or major corruption of frames due to the fact that there is little difference like byte order and subsampling between NV12 and YUY2. Also, I don't have pixelating problems on hardware that supports NV12 conversion.
So I decided to do the color conversion using pixel shaders which is based on this code(https://github.com/bavulapati/DXGICaptureDXColorSpaceConversionIntelEncode/blob/master/DXGICaptureDXColorSpaceConversionIntelEncode/DuplicationManager.cpp). I'm able make the pixel shaders work, I have also uploaded my code here(https://codeshare.io/5PJjxP) for reference (simplified it as much as possible).
Now, I'm left with two channels, chroma, and luma respectively
(ID3D11Texture2D textures). And I'm really confused about efficiently
packing the two separate channels into one ID3D11Texture2D texture so
that I may feed the same to the encoder. Is there a way to efficiently
pack the Y and UV channels into a single ID3D11Texture2D in GPU? I'm
really tired of CPU based approaches due to the fact that it's costly,
and doesn't offer the best possible frame rates. In fact, I'm
reluctant to even copy the textures to CPU. I'm thinking of a way to
do it in GPU without any back and forth copies between CPU and GPU.
I have been researching this for quite some time without any progress, any help would be appreciated.
/**
* This method is incomplete. It's just a template of what I want to achieve.
*/
HRESULT CreateNV12TextureFromLumaAndChromaSurface(ID3D11Texture2D** pOutputTexture)
{
HRESULT hr = S_OK;
try
{
//Copying from GPU to CPU. Bad :(
m_pD3D11DeviceContext->CopyResource(m_CPUAccessibleLuminanceSurf, m_LuminanceSurf);
D3D11_MAPPED_SUBRESOURCE resource;
UINT subresource = D3D11CalcSubresource(0, 0, 0);
HRESULT hr = m_pD3D11DeviceContext->Map(m_CPUAccessibleLuminanceSurf, subresource, D3D11_MAP_READ, 0, &resource);
BYTE* sptr = reinterpret_cast<BYTE*>(resource.pData);
BYTE* dptrY = nullptr; // point to the address of Y channel in output surface
//Store Image Pitch
int m_ImagePitch = resource.RowPitch;
int height = GetImageHeight();
int width = GetImageWidth();
for (int i = 0; i < height; i++)
{
memcpy_s(dptrY, m_ImagePitch, sptr, m_ImagePitch);
sptr += m_ImagePitch;
dptrY += m_ImagePitch;
}
m_pD3D11DeviceContext->Unmap(m_CPUAccessibleLuminanceSurf, subresource);
//Copying from GPU to CPU. Bad :(
m_pD3D11DeviceContext->CopyResource(m_CPUAccessibleChrominanceSurf, m_ChrominanceSurf);
hr = m_pD3D11DeviceContext->Map(m_CPUAccessibleChrominanceSurf, subresource, D3D11_MAP_READ, 0, &resource);
sptr = reinterpret_cast<BYTE*>(resource.pData);
BYTE* dptrUV = nullptr; // point to the address of UV channel in output surface
m_ImagePitch = resource.RowPitch;
height /= 2;
width /= 2;
for (int i = 0; i < height; i++)
{
memcpy_s(dptrUV, m_ImagePitch, sptr, m_ImagePitch);
sptr += m_ImagePitch;
dptrUV += m_ImagePitch;
}
m_pD3D11DeviceContext->Unmap(m_CPUAccessibleChrominanceSurf, subresource);
}
catch(HRESULT){}
return hr;
}
Draw NV12:
//
// Draw frame for NV12 texture
//
HRESULT DrawNV12Frame(ID3D11Texture2D* inputTexture)
{
HRESULT hr;
// If window was resized, resize swapchain
if (!m_bIntialized)
{
HRESULT Ret = InitializeNV12Surfaces(inputTexture);
if (!SUCCEEDED(Ret))
{
return Ret;
}
m_bIntialized = true;
}
m_pD3D11DeviceContext->CopyResource(m_ShaderResourceSurf, inputTexture);
D3D11_TEXTURE2D_DESC FrameDesc;
m_ShaderResourceSurf->GetDesc(&FrameDesc);
D3D11_SHADER_RESOURCE_VIEW_DESC ShaderDesc;
ShaderDesc.Format = FrameDesc.Format;
ShaderDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
ShaderDesc.Texture2D.MostDetailedMip = FrameDesc.MipLevels - 1;
ShaderDesc.Texture2D.MipLevels = FrameDesc.MipLevels;
// Create new shader resource view
ID3D11ShaderResourceView* ShaderResource = nullptr;
hr = m_pD3D11Device->CreateShaderResourceView(m_ShaderResourceSurf, &ShaderDesc, &ShaderResource);
IF_FAILED_THROW(hr);
m_pD3D11DeviceContext->PSSetShaderResources(0, 1, &ShaderResource);
// Set resources
m_pD3D11DeviceContext->OMSetRenderTargets(1, &m_pLumaRT, nullptr);
m_pD3D11DeviceContext->PSSetShader(m_pPixelShaderLuma, nullptr, 0);
m_pD3D11DeviceContext->RSSetViewports(1, &m_VPLuminance);
// Draw textured quad onto render target
m_pD3D11DeviceContext->Draw(NUMVERTICES, 0);
m_pD3D11DeviceContext->OMSetRenderTargets(1, &m_pChromaRT, nullptr);
m_pD3D11DeviceContext->PSSetShader(m_pPixelShaderChroma, nullptr, 0);
m_pD3D11DeviceContext->RSSetViewports(1, &m_VPChrominance);
// Draw textured quad onto render target
m_pD3D11DeviceContext->Draw(NUMVERTICES, 0);
// Release shader resource
ShaderResource->Release();
ShaderResource = nullptr;
return S_OK;
}
Init shaders:
void SetViewPort(D3D11_VIEWPORT* VP, UINT Width, UINT Height)
{
VP->Width = static_cast<FLOAT>(Width);
VP->Height = static_cast<FLOAT>(Height);
VP->MinDepth = 0.0f;
VP->MaxDepth = 1.0f;
VP->TopLeftX = 0;
VP->TopLeftY = 0;
}
HRESULT MakeRTV(ID3D11RenderTargetView** pRTV, ID3D11Texture2D* pSurf)
{
if (*pRTV)
{
(*pRTV)->Release();
*pRTV = nullptr;
}
// Create a render target view
HRESULT hr = m_pD3D11Device->CreateRenderTargetView(pSurf, nullptr, pRTV);
IF_FAILED_THROW(hr);
return S_OK;
}
HRESULT InitializeNV12Surfaces(ID3D11Texture2D* inputTexture)
{
ReleaseSurfaces();
D3D11_TEXTURE2D_DESC lOutputDuplDesc;
inputTexture->GetDesc(&lOutputDuplDesc);
// Create shared texture for all duplication threads to draw into
D3D11_TEXTURE2D_DESC DeskTexD;
RtlZeroMemory(&DeskTexD, sizeof(D3D11_TEXTURE2D_DESC));
DeskTexD.Width = lOutputDuplDesc.Width;
DeskTexD.Height = lOutputDuplDesc.Height;
DeskTexD.MipLevels = 1;
DeskTexD.ArraySize = 1;
DeskTexD.Format = lOutputDuplDesc.Format;
DeskTexD.SampleDesc.Count = 1;
DeskTexD.Usage = D3D11_USAGE_DEFAULT;
DeskTexD.BindFlags = D3D11_BIND_SHADER_RESOURCE;
HRESULT hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, nullptr, &m_ShaderResourceSurf);
IF_FAILED_THROW(hr);
DeskTexD.Format = DXGI_FORMAT_R8_UNORM;
DeskTexD.BindFlags = D3D11_BIND_RENDER_TARGET;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, nullptr, &m_LuminanceSurf);
IF_FAILED_THROW(hr);
DeskTexD.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
DeskTexD.Usage = D3D11_USAGE_STAGING;
DeskTexD.BindFlags = 0;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, NULL, &m_CPUAccessibleLuminanceSurf);
IF_FAILED_THROW(hr);
SetViewPort(&m_VPLuminance, DeskTexD.Width, DeskTexD.Height);
HRESULT Ret = MakeRTV(&m_pLumaRT, m_LuminanceSurf);
if (!SUCCEEDED(Ret))
return Ret;
DeskTexD.Width = lOutputDuplDesc.Width / 2;
DeskTexD.Height = lOutputDuplDesc.Height / 2;
DeskTexD.Format = DXGI_FORMAT_R8G8_UNORM;
DeskTexD.Usage = D3D11_USAGE_DEFAULT;
DeskTexD.CPUAccessFlags = 0;
DeskTexD.BindFlags = D3D11_BIND_RENDER_TARGET;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, nullptr, &m_ChrominanceSurf);
IF_FAILED_THROW(hr);
DeskTexD.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
DeskTexD.Usage = D3D11_USAGE_STAGING;
DeskTexD.BindFlags = 0;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, NULL, &m_CPUAccessibleChrominanceSurf);
IF_FAILED_THROW(hr);
SetViewPort(&m_VPChrominance, DeskTexD.Width, DeskTexD.Height);
return MakeRTV(&m_pChromaRT, m_ChrominanceSurf);
}
HRESULT InitVertexShader(ID3D11VertexShader** ppID3D11VertexShader)
{
HRESULT hr = S_OK;
UINT Size = ARRAYSIZE(g_VS);
try
{
IF_FAILED_THROW(m_pD3D11Device->CreateVertexShader(g_VS, Size, NULL, ppID3D11VertexShader));;
m_pD3D11DeviceContext->VSSetShader(m_pVertexShader, nullptr, 0);
// Vertices for drawing whole texture
VERTEX Vertices[NUMVERTICES] =
{
{ XMFLOAT3(-1.0f, -1.0f, 0), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 0), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, 0), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(1.0f, -1.0f, 0), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 0), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, 0), XMFLOAT2(1.0f, 0.0f) },
};
UINT Stride = sizeof(VERTEX);
UINT Offset = 0;
D3D11_BUFFER_DESC BufferDesc;
RtlZeroMemory(&BufferDesc, sizeof(BufferDesc));
BufferDesc.Usage = D3D11_USAGE_DEFAULT;
BufferDesc.ByteWidth = sizeof(VERTEX) * NUMVERTICES;
BufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
BufferDesc.CPUAccessFlags = 0;
D3D11_SUBRESOURCE_DATA InitData;
RtlZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = Vertices;
// Create vertex buffer
IF_FAILED_THROW(m_pD3D11Device->CreateBuffer(&BufferDesc, &InitData, &m_VertexBuffer));
m_pD3D11DeviceContext->IASetVertexBuffers(0, 1, &m_VertexBuffer, &Stride, &Offset);
m_pD3D11DeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
D3D11_INPUT_ELEMENT_DESC Layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 }
};
UINT NumElements = ARRAYSIZE(Layout);
hr = m_pD3D11Device->CreateInputLayout(Layout, NumElements, g_VS, Size, &m_pVertexLayout);
m_pD3D11DeviceContext->IASetInputLayout(m_pVertexLayout);
}
catch (HRESULT) {}
return hr;
}
HRESULT InitPixelShaders()
{
HRESULT hr = S_OK;
// Refer https://codeshare.io/5PJjxP for g_PS_Y & g_PS_UV blobs
try
{
UINT Size = ARRAYSIZE(g_PS_Y);
hr = m_pD3D11Device->CreatePixelShader(g_PS_Y, Size, nullptr, &m_pPixelShaderChroma);
IF_FAILED_THROW(hr);
Size = ARRAYSIZE(g_PS_UV);
hr = m_pD3D11Device->CreatePixelShader(g_PS_UV, Size, nullptr, &m_pPixelShaderLuma);
IF_FAILED_THROW(hr);
}
catch (HRESULT) {}
return hr;
}
I am experimenting this RGBA conversion to NV12 in the GPU only, using DirectX11.
This is a good challenge. I'm not familiar with Directx11, so this is my first experimentation.
Check this project for updates : D3D11ShaderNV12
In my current implementation (may not be the last), here is what I do:
Step 1: use a DXGI_FORMAT_B8G8R8A8_UNORM as input texture
Step 2: make a 1st pass shader to get 3 textures (Y:Luma, U:ChromaCb and V:ChromaCr): see YCbCrPS2.hlsl
Step 3: Y is DXGI_FORMAT_R8_UNORM, and is ready for final NV12 texture
Step 4: UV needs to be downsampled in a 2nd pass shader: see ScreenPS2.hlsl (using linear filtering)
Step 5: a third pass shader to sample Y texture
Step 6: a fourth pass shader to sample UV texture using a shift texture (I think other technique could be use)
My final texture is not DXGI_FORMAT_NV12, but a similar DXGI_FORMAT_R8_UNORM texture. My computer is Windows7, so DXGI_FORMAT_NV12 is not handled. I will try later on a another computer.
The process with pictures:

Strange errors during drawing hollow circles in the 3D space

I am trying to draw two hollow circles that are surrounding a cube which is located at the 0, 0, 0 position..
so far I've implemented the cube and the two circles here is what I get.
there are two strange things happening here.
One is that I want to draw the circles but I can see the lines radiating from the origin.
and two is that interpolated colors, even though I set just one color for the fragment shader.
here is you can see clearly those lines with interpolated color...
here is my vertex shader code and the fragment shader code
"use strict";
const loc_aPosition = 1;
const loc_aColor = 2;
const loc_UVCoord = 3;
const VSHADER_SOURCE =
`#version 300 es
layout(location=${loc_aPosition}) in vec4 aPosition;
layout(location=${loc_aColor}) in vec4 aColor;
layout(location=${loc_UVCoord}) in vec2 UVCoord;
out vec4 vColor;
out vec2 vUVCoord;
uniform mat4 uMVP;
void main()
{
gl_Position = uMVP * aPosition;
vColor = aColor;
vUVCoord = UVCoord;
}`;
const FSHADER_SOURCE =
`#version 300 es
precision mediump float;
in vec4 vColor;
out vec4 fColor;
void main()
{
fColor = vColor;
}`;
and the initilize functions for the two circles and there is the only difference is the target plane.
function init_equator(gl)
{
let vertices = []; // for the vertices
let color = [1, 0, 0]; // red color
for(var i = 0; i <= 360; i+=10)
{
let j = i * Math.PI/180;
let vert = [R * Math.cos(j), 0, R * Math.sin(j)]; // drawing a circle at the XZ plane since it has to be an equator for the cube...
vertices.push( vert[0], vert[1], vert[2] ); // push the vertices
vertices.push( color[0], color[1], color[2]); // set the color
}
const SZ = vertices.BYTES_PER_ELEMENT;
let vao = gl.createVertexArray();
gl.bindVertexArray(vao);
let vbo = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vbo);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
gl.vertexAttribPointer(loc_aPosition, 3, gl.FLOAT, false, SZ * 6, 0); // stride is 6, 3 for positions and 3 for the color
gl.enableVertexAttribArray(loc_aPosition);
gl.vertexAttribPointer(loc_aColor, 3, gl.FLOAT, false, SZ * 6, SZ * 3); // stride is 6, offset is this is because 3 color elements are located after 3 position elements..
gl.enableVertexAttribArray(loc_aColor);
gl.bindVertexArray(null);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
return { vao, n : vertices.length / 3 }; // since it has three coordinates so devide by 3
}
function init_latitude(gl)
{
let vertices = []; // for the vertices
let color = [1, 0, 0]; // supposed to be the red
for(var i = 0; i <= 360; i+=10)
{
let j = i * Math.PI/180;
let vert = [0, R * Math.cos(j), R * Math.sin(j)]; // drawing a circle on the YZ plane
vertices.push( vert[0], vert[1], vert[2] );
vertices.push( color[0], color[1], color[2]);
}
const SZ = vertices.BYTES_PER_ELEMENT;
let vao = gl.createVertexArray();
gl.bindVertexArray(vao);
let vbo = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, vbo);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(vertices), gl.STATIC_DRAW);
gl.vertexAttribPointer(loc_aPosition, 3, gl.FLOAT, false, SZ * 6, 0); // stride is 6, 3 for positions and 3 for the color
gl.enableVertexAttribArray(loc_aPosition);
gl.vertexAttribPointer(loc_aColor, 3, gl.FLOAT, false, SZ * 6, SZ * 3); // stride is 6, offset is this is because 3 color elements are located after 3 position elements..
gl.enableVertexAttribArray(loc_aColor);
gl.bindVertexArray(null);
gl.bindBuffer(gl.ARRAY_BUFFER, null);
return { vao, n : vertices.length / 3 }; // since it has three coordinates so devide by 3
}
I refer these drawing fucntions from here drawing circle
in the main function I called the draw function like this..
........
MVP.setOrtho(LEFT, RIGHT, BOTTOM, TOP, NEAR, FAR); // setting MVP matrix to orthographic mode
MVP.lookAt(FIXED_X, FIXED_Y, FIXED_Z, 0,0,0, 0,1,0); // Eye position x, y, z Look at position 0, 0, 0 Up vector 0, 1, 0
gl.uniformMatrix4fv(loc_MVP, false, MVP.elements);
gl.bindVertexArray(cube.vao);
gl.drawElements(gl.TRIANGLES, cube.n, gl.UNSIGNED_BYTE, 0)
gl.bindVertexArray(null);
gl.bindVertexArray(equator.vao);
gl.drawArrays(gl.LINE_LOOP, 0, equator.n);
gl.bindVertexArray(null);
gl.bindVertexArray(latitudeCircle.vao);
gl.drawArrays(gl.LINE_LOOP, 0, latitudeCircle.n);
gl.bindVertexArray(null);
I have no ideas why the lines are radiating from the origin and the mixed color...
could somebody help me?
this line, which appears twice in the code you posted
const SZ = vertices.BYTES_PER_ELEMENT;
is SZ will be undefined. vertices is a native JavaScript array, not a typedarray array like Float32Array. After that every calculation with SZ will be 0 or NaN
In other words these lines
gl.vertexAttribPointer(loc_aPosition, 3, gl.FLOAT, false, SZ * 6, 0);
gl.vertexAttribPointer(loc_aColor, 3, gl.FLOAT, false, SZ * 6, SZ * 3);
Will be
gl.vertexAttribPointer(loc_aPosition, 3, gl.FLOAT, false, 0, 0);
gl.vertexAttribPointer(loc_aColor, 3, gl.FLOAT, false, 0, 0);
Which means every other position is a color, and every other color is a position which explains why lines go to the center and why colors are interpolated.
Note that if you had stepped through the code in the debugger you'd have probably seen this issue so it would be good to learn how to use the debugger.
Also FYI unrelated to your issue you don't need to call gl.bindVertexArray twice in a row, once with null and once with the next thing you want to draw with.
this
gl.bindVertexArray(cube.vao);
gl.drawElements(gl.TRIANGLES, cube.n, gl.UNSIGNED_BYTE, 0)
gl.bindVertexArray(null);
gl.bindVertexArray(equator.vao);
gl.drawArrays(gl.LINE_LOOP, 0, equator.n);
gl.bindVertexArray(null);
gl.bindVertexArray(latitudeCircle.vao);
gl.drawArrays(gl.LINE_LOOP, 0, latitudeCircle.n);
gl.bindVertexArray(null);
can just be this
gl.bindVertexArray(cube.vao);
gl.drawElements(gl.TRIANGLES, cube.n, gl.UNSIGNED_BYTE, 0)
gl.bindVertexArray(equator.vao);
gl.drawArrays(gl.LINE_LOOP, 0, equator.n);
gl.bindVertexArray(latitudeCircle.vao);
gl.drawArrays(gl.LINE_LOOP, 0, latitudeCircle.n);
gl.bindVertexArray(null); // this is also not technically needed
Also also, you can use the spread operator.
This
vertices.push( vert[0], vert[1], vert[2] ); // push the vertices
vertices.push( color[0], color[1], color[2]); // set the color
can be this
vertices.push( ...vert ); // push the vertices
vertices.push( ...color ); // set the color
Also you might find these tutorials useful.

DirectX 11 Render to Texture Issue

In my code, i am making 2 rectangles,
Rectangle1: Rendering On a Texture.
Rectangle2: Rendering On Back Buffer.
I am trying to do programmable blending,so need to access the destination pixel in pixel shader.
In my code,
I am creating a texture like below:
d3d11Device->CreateTexture2D(&textureDesc, NULL, &renderTargetTextureMap);
After this i am creating Render Target view of the texture.
d3d11Device->CreateRenderTargetView(renderTargetTextureMap, &renderTargetViewDesc, &renderTargetViewMap);
After this i am declaring vertex and pixel shader.
Then in my draw call,
i am performing following thing:
float bgColor[4] = {0.0f, 0.0f,0.0f, 1.0f };
d3d11DevCon->ClearRenderTargetView(renderTargetViewMap, bgColor);
float bgColor2[4] = { 0.0f, 1.0f, 0.0f, 1.0f };
////////////////////////////////////////////////Buffer 1///////////////////////////////////////////////////////////////
//Set the vertex buffer
UINT stride = sizeof(Vertex);
UINT offset = 0;
///////////////////////////////////////////////////////////Buffer 2//////////////////////////////////////////////////////////////////
d3d11DevCon->IASetIndexBuffer(d2dIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
d3d11DevCon->IASetVertexBuffers(0, 1, &triangleVertBuffer, &stride, &offset);
////Draw the triangle
d3d11DevCon->DrawIndexed(6, 0, 0);
I assume that since i have set my render target view as renderTargetViewMap, so my draw call will render to texture only.
Now i am rendering to my backbuffer:
////////////////////////////////////////////
d3d11DevCon->OMSetRenderTargets(1, &renderTargetView, NULL);
d3d11DevCon->PSSetShaderResources(0, 1, &shaderResourceViewMap);
//d3d11DevCon->ClearRenderTargetView(renderTargetView, bgColor2);
d3d11DevCon->IASetIndexBuffer(d2dIndexBuffer2, DXGI_FORMAT_R32_UINT, 0);
d3d11DevCon->IASetVertexBuffers(0, 1, &triangleVertBuffer2, &stride, &offset);
////Draw the triangle
d3d11DevCon->DrawIndexed(6, 0, 0);
//Present the backbuffer to the screen
SwapChain->Present(0, 0);
So, in this way my rendering is happening.
Issue Face:
In my pixel shader,
VS_OUTPUT VS(float4 inPos : POSITION, float4 inColor : COLOR)
{
VS_OUTPUT output;
output.Pos = inPos;
output.Color = inColor;
return output;
}
float4 PS(VS_OUTPUT input) : SV_TARGET
{
float2 temp;
temp = input.Pos;
float4 diffuse = ObjTexture.Sample(ObjSamplerState,0.5+0.5*temp);
return input.Color + diffuse;
}
Here the diffuse is comming out to be equal to my bgcolor which i have set when rendering to texture
float bgColor[4] = {0.0f, 0.0f,0.0f, 1.0f };
d3d11DevCon->ClearRenderTargetView(renderTargetViewMap, bgColor);
I have also drawn a rectangle on it, but those pixels i am not able to access.
How can i access the pixel of rectangle that i have drawn on rendering to texture.
This is Issue Image
Desired Result
Shader File: Effect.fx
struct VS_OUTPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
Texture2D ObjTexture;
SamplerState ObjSamplerState;
VS_OUTPUT VS(float4 inPos : POSITION, float4 inColor : COLOR)
{
VS_OUTPUT output;
output.Pos = inPos;
output.Color = inColor;
return output;
}
float4 PS(VS_OUTPUT input) : SV_TARGET
{
float2 temp;
temp = input.Pos;
float4 diffuse = ObjTexture.Sample(ObjSamplerState,0.5+0.5*temp);
return input.Color + diffuse;
}
Edit - 1:
With my latest change in code, i am able to blend my rectangle 2 with rectangle 1, but one issue i am facing is that when i blend then major part of my rectangle 2 is changes to yellow (red + green) on edges only i am able to see the actual green color.
Modified code:
main.cpp
//Include and link appropriate libraries and headers//
#pragma comment(lib, "d3d11.lib")
#pragma comment(lib, "d3dx11.lib")
#pragma comment(lib, "d3dx10.lib")
#include <windows.h>
#include <d3d11.h>
#include <d3dx11.h>
#include <d3dx10.h>
#include <xnamath.h>
//Global Declarations - Interfaces//
IDXGISwapChain* SwapChain;
ID3D11Device* d3d11Device;
ID3D11DeviceContext* d3d11DevCon;
ID3D11RenderTargetView* renderTargetView;
ID3D11Buffer* triangleVertBuffer;
ID3D11Buffer* triangleVertBuffer2;
ID3D11VertexShader* VS;
ID3D11PixelShader* PS;
ID3D10Blob* VS_Buffer;
ID3D10Blob* PS_Buffer;
ID3D11InputLayout* vertLayout;
XMMATRIX mapView;
XMMATRIX mapProjection;
XMVECTOR DefaultForward = XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f);
//Global Declarations - Others//
LPCTSTR WndClassName = L"firstwindow";
HWND hwnd = NULL;
HRESULT hr;
const int Width = 800;
const int Height = 600;
bool InitializeDirect3d11App(HINSTANCE hInstance)
{
//Describe our Buffer
DXGI_MODE_DESC bufferDesc;
ZeroMemory(&bufferDesc, sizeof(DXGI_MODE_DESC));
bufferDesc.Width = Width;
bufferDesc.Height = Height;
bufferDesc.RefreshRate.Numerator = 60;
bufferDesc.RefreshRate.Denominator = 1;
bufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
bufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
bufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
//Describe our SwapChain
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapChainDesc.BufferDesc = bufferDesc;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 1;
swapChainDesc.OutputWindow = hwnd;
swapChainDesc.Windowed = TRUE;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
//Create our SwapChain
hr = D3D11CreateDeviceAndSwapChain(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, NULL, NULL, NULL,
D3D11_SDK_VERSION, &swapChainDesc, &SwapChain, &d3d11Device, NULL, &d3d11DevCon);
//Create our BackBuffer
ID3D11Texture2D* BackBuffer;
hr = SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&BackBuffer);
//Create our Render Target
hr = d3d11Device->CreateRenderTargetView(BackBuffer, NULL, &renderTargetView);
BackBuffer->Release();
////////////////////////////////////////////////////////////////////////EXPERIMENT AREA//////////////////////////////////////////////////////////////////////////////////////
ZeroMemory(&textureDesc, sizeof(textureDesc));
// Setup the texture description.
// We will have our map be a square
// We will need to have this texture bound as a render target AND a shader resource
textureDesc.Width = Width/ 3.9729999999999999999999999999999;
textureDesc.Height = Height/3.9729999999999999999999999999999;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
textureDesc.SampleDesc.Count = 1;
textureDesc.SampleDesc.Quality = 0;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = 0;
textureDesc.MiscFlags = 0;
d3d11Device->CreateTexture2D(&textureDesc, NULL, &renderTargetTextureMap);
// Setup the description of the render target view.
renderTargetViewDesc.Format = textureDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
renderTargetViewDesc.Texture2D.MipSlice = 0;
// Create the render target view.
d3d11Device->CreateRenderTargetView(renderTargetTextureMap, &renderTargetViewDesc, &renderTargetViewMap);
/////////////////////// Map's Shader Resource View
// Setup the description of the shader resource view.
shaderResourceViewDesc.Format = textureDesc.Format;
shaderResourceViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
shaderResourceViewDesc.Texture2D.MipLevels = 1;
// Create the shader resource view.
d3d11Device->CreateShaderResourceView(renderTargetTextureMap, &shaderResourceViewDesc, &shaderResourceViewMap);
d3d11DevCon->OMSetRenderTargets(1, &renderTargetViewMap, NULL);
d3d11DevCon->PSSetShaderResources(0, 1, &shaderResourceViewMap);
return true;
}
void CleanUp()
{
//Release the COM Objects we created
SwapChain->Release();
d3d11Device->Release();
d3d11DevCon->Release();
renderTargetView->Release();
triangleVertBuffer->Release();
VS->Release();
PS->Release();
VS_Buffer->Release();
PS_Buffer->Release();
vertLayout->Release();
}
bool InitScene()
{
//Compile Shaders from shader file
hr = D3DX11CompileFromFile(L"Effect.fx", 0, 0, "VS", "vs_5_0", 0, 0, 0, &VS_Buffer, 0, 0);
hr = D3DX11CompileFromFile(L"Effect.fx", 0, 0, "PS", "ps_5_0", 0, 0, 0, &PS_Buffer, 0, 0);
//Create the Shader Objects
hr = d3d11Device->CreateVertexShader(VS_Buffer->GetBufferPointer(), VS_Buffer->GetBufferSize(), NULL, &VS);
hr = d3d11Device->CreatePixelShader(PS_Buffer->GetBufferPointer(), PS_Buffer->GetBufferSize(), NULL, &PS);
//Set Vertex and Pixel Shaders
d3d11DevCon->VSSetShader(VS, 0, 0);
d3d11DevCon->PSSetShader(PS, 0, 0);
//Create the Input Layout
hr = d3d11Device->CreateInputLayout(layout, numElements, VS_Buffer->GetBufferPointer(),
VS_Buffer->GetBufferSize(), &vertLayout);
//Set the Input Layout
d3d11DevCon->IASetInputLayout(vertLayout);
//Set Primitive Topology
d3d11DevCon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
//Create the Viewport
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
viewport.Width = 800;
viewport.Height = 600;
//Set the Viewport
d3d11DevCon->RSSetViewports(1, &viewport);
////////////////***********************First Texture Vertex Buffer *******************************/////////////////////////////
//Create the vertex buffer
Vertex v[] =
{
Vertex(-0.5f, -0.5f, 0.0f, 1.0f,0.0f,0.0f, 1.0f),
Vertex(-0.5f, 0.5f, 0.0f, 1.0f,0.0f,0.0f, 1.0f),
Vertex(0.5f, 0.5f, 0.0f, 1.0f, 0.0f,0.0f, 1.0f),
Vertex(0.5f, -0.5f, 0.0f, 1.0f,0.0f, 0.0f, 1.0f),
};
DWORD indices[] = {
// Front Face
0, 1, 3,
1, 2, 3,
};
D3D11_BUFFER_DESC indexBufferDesc;
ZeroMemory(&indexBufferDesc, sizeof(indexBufferDesc));
indexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc.ByteWidth = sizeof(DWORD) * 2 * 3;
indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc.CPUAccessFlags = 0;
indexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iinitData;
iinitData.pSysMem = indices;
d3d11Device->CreateBuffer(&indexBufferDesc, &iinitData, &d2dIndexBuffer);
D3D11_BUFFER_DESC vertexBufferDesc;
ZeroMemory(&vertexBufferDesc, sizeof(vertexBufferDesc));
vertexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc.ByteWidth = sizeof(Vertex) * 4;
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexBufferDesc.CPUAccessFlags = 0;
vertexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA vertexBufferData;
ZeroMemory(&vertexBufferData, sizeof(vertexBufferData));
vertexBufferData.pSysMem = v;
hr = d3d11Device->CreateBuffer(&vertexBufferDesc, &vertexBufferData, &triangleVertBuffer);
////////////////////////////////////////////////////// Second Vertex.
Vertex v2[] = {
// positions // colors // texture coords
Vertex(1.0f, 1.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // top right
Vertex(1.0f,0.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // bottom right
Vertex(0.0f,0.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // bottom left
Vertex(0.0f, 1.0, 0.0f,0.0f, 1.0f,0.0f,1.0f) // top left
};
DWORD indices2[] = {
// Front Face
0, 1, 2,
0, 2, 3,
};
D3D11_BUFFER_DESC indexBufferDesc2;
ZeroMemory(&indexBufferDesc2, sizeof(indexBufferDesc2));
indexBufferDesc2.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc2.ByteWidth = sizeof(DWORD) * 2 * 3;
indexBufferDesc2.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc2.CPUAccessFlags = 0;
indexBufferDesc2.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iinitData2;
iinitData2.pSysMem = indices2;
d3d11Device->CreateBuffer(&indexBufferDesc2, &iinitData2, &d2dIndexBuffer2);
D3D11_BUFFER_DESC vertexBufferDesc2;
ZeroMemory(&vertexBufferDesc2, sizeof(vertexBufferDesc2));
vertexBufferDesc2.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc2.ByteWidth = sizeof(Vertex) * 4;
vertexBufferDesc2.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexBufferDesc2.CPUAccessFlags = 0;
vertexBufferDesc2.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA vertexBufferData2;
ZeroMemory(&vertexBufferData2, sizeof(vertexBufferData2));
vertexBufferData2.pSysMem = v2;
hr = d3d11Device->CreateBuffer(&vertexBufferDesc2, &vertexBufferData2, &triangleVertBuffer2);
UINT stride = sizeof(Vertex);
UINT offset = 0;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
return true;
}
Shader File:
struct VS_OUTPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
Texture2D ObjTexture;
SamplerState ObjSamplerState;
VS_OUTPUT VS(float4 inPos : POSITION, float4 inColor : COLOR)
{
VS_OUTPUT output;
output.Pos = inPos;
output.Color = inColor;
return output;
}
float4 PS(VS_OUTPUT input) : SV_TARGET
{
float2 temp;
temp = input.Pos;
float4 diffuse = ObjTexture.Sample(ObjSamplerState,0.5*temp);
return input.Color + diffuse ;
}
return DefWindowProc(hwnd,
msg,
wParam,
lParam);
}
Edit 3:
My vertex Structure:
struct Vertex //Overloaded Vertex Structure
{
Vertex() {}
Vertex(float x, float y, float z,
float cr, float cg, float cb, float ca)
: pos(x, y, z), color(cr, cg, cb, ca) {}
XMFLOAT3 pos;
XMFLOAT4 color;
};
Input description:
D3D11_INPUT_ELEMENT_DESC layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
I think so issue is when i pass my texture and read it in diffuse in pixel shader, then mapping of my texture on back buffer is not same and hence i am only finding red color from 2nd scan line and hence resultant zero is produced..?
Edit 3:
My initialization code:
struct Vertex //Overloaded Vertex Structure
{
Vertex() {}
Vertex(float x, float y, float z,
float cr, float cg, float cb, float ca)
: pos(x, y, z), color(cr, cg, cb, ca) {}
XMFLOAT3 pos;
XMFLOAT4 color;
};
ID3D11Texture2D* renderTargetTextureMap;
ID3D11RenderTargetView* renderTargetViewMap;
ID3D11ShaderResourceView* shaderResourceViewMap;
ID3D11SamplerState* CubesTexSamplerState;
ID3D11Buffer *d2dIndexBuffer;
ID3D11Buffer *d2dIndexBuffer2;
D3D11_TEXTURE2D_DESC textureDesc;
D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc;
D3D11_SHADER_RESOURCE_VIEW_DESC shaderResourceViewDesc;
D3D11_INPUT_ELEMENT_DESC layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
UINT numElements = ARRAYSIZE(layout);
bool InitializeDirect3d11App(HINSTANCE hInstance)
{
//Describe our Buffer
DXGI_MODE_DESC bufferDesc;
ZeroMemory(&bufferDesc, sizeof(DXGI_MODE_DESC));
bufferDesc.Width = Width;
bufferDesc.Height = Height;
bufferDesc.RefreshRate.Numerator = 60;
bufferDesc.RefreshRate.Denominator = 1;
bufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
bufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
bufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
//Describe our SwapChain
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapChainDesc.BufferDesc = bufferDesc;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 1;
swapChainDesc.OutputWindow = hwnd;
swapChainDesc.Windowed = TRUE;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
//Create our SwapChain
hr = D3D11CreateDeviceAndSwapChain(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, NULL, NULL, NULL,
D3D11_SDK_VERSION, &swapChainDesc, &SwapChain, &d3d11Device, NULL, &d3d11DevCon);
//Create our BackBuffer
ID3D11Texture2D* BackBuffer;
hr = SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&BackBuffer);
//Create our Render Target
hr = d3d11Device->CreateRenderTargetView(BackBuffer, NULL, &renderTargetView);
BackBuffer->Release();
////////////////////////////////////////////////////////////////////////EXPERIMENT AREA//////////////////////////////////////////////////////////////////////////////////////
ZeroMemory(&textureDesc, sizeof(textureDesc));
// Setup the texture description.
// We will have our map be a square
// We will need to have this texture bound as a render target AND a shader resource
textureDesc.Width = Width/2;
textureDesc.Height = Height/2;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
textureDesc.SampleDesc.Count = 1;
textureDesc.SampleDesc.Quality = 0;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = 0;
textureDesc.MiscFlags = 0;
d3d11Device->CreateTexture2D(&textureDesc, NULL, &renderTargetTextureMap);
// Setup the description of the render target view.
renderTargetViewDesc.Format = textureDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
renderTargetViewDesc.Texture2D.MipSlice = 0;
// Create the render target view.
d3d11Device->CreateRenderTargetView(renderTargetTextureMap, &renderTargetViewDesc, &renderTargetView);
/////////////////////// Map's Shader Resource View
// Setup the description of the shader resource view.
shaderResourceViewDesc.Format = textureDesc.Format;
shaderResourceViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
shaderResourceViewDesc.Texture2D.MipLevels = 1;
// Create the shader resource view.
d3d11Device->CreateShaderResourceView(renderTargetTextureMap, &shaderResourceViewDesc, &shaderResourceViewMap);
//d3d11DevCon->OMSetRenderTargets(1, &renderTargetViewMap, NULL);
//d3d11DevCon->PSSetShaderResources(0, 1, &shaderResourceViewMap);
return true;
}
void CleanUp()
{
//Release the COM Objects we created
SwapChain->Release();
d3d11Device->Release();
d3d11DevCon->Release();
renderTargetView->Release();
triangleVertBuffer->Release();
VS->Release();
PS->Release();
VS_Buffer->Release();
PS_Buffer->Release();
vertLayout->Release();
}
bool InitScene()
{
//Compile Shaders from shader file
hr = D3DX11CompileFromFile(L"Effect.fx", 0, 0, "VS", "vs_5_0", 0, 0, 0, &VS_Buffer, 0, 0);
hr = D3DX11CompileFromFile(L"Effect.fx", 0, 0, "PS", "ps_5_0", 0, 0, 0, &PS_Buffer, 0, 0);
//Create the Shader Objects
hr = d3d11Device->CreateVertexShader(VS_Buffer->GetBufferPointer(), VS_Buffer->GetBufferSize(), NULL, &VS);
hr = d3d11Device->CreatePixelShader(PS_Buffer->GetBufferPointer(), PS_Buffer->GetBufferSize(), NULL, &PS);
//Set Vertex and Pixel Shaders
d3d11DevCon->VSSetShader(VS, 0, 0);
d3d11DevCon->PSSetShader(PS, 0, 0);
//Create the Input Layout
hr = d3d11Device->CreateInputLayout(layout, numElements, VS_Buffer->GetBufferPointer(),
VS_Buffer->GetBufferSize(), &vertLayout);
//Set the Input Layout
d3d11DevCon->IASetInputLayout(vertLayout);
//Set Primitive Topology
d3d11DevCon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
//Create the Viewport
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
viewport.Width = 800;
viewport.Height = 600;
//Set the Viewport
d3d11DevCon->RSSetViewports(1, &viewport);
////////////////***********************First Texture Vertex Buffer *******************************/////////////////////////////
//Create the vertex buffer
Vertex v[] =
{
Vertex(-0.35f, -0.35f, 0.0f, 1.0f,0.0f,0.0f, 1.0f),
Vertex(-0.35f, 0.35f, 0.0f, 1.0f,0.0f,0.0f, 1.0f),
Vertex(0.35f, 0.35f, 0.0f, 1.0f, 0.0f,0.0f, 1.0f),
Vertex(0.35f, -0.35f, 0.0f, 1.0f,0.0f, 0.0f, 1.0f),
};
DWORD indices[] = {
// Front Face
0, 1, 3,
1, 2, 3,
};
D3D11_BUFFER_DESC indexBufferDesc;
ZeroMemory(&indexBufferDesc, sizeof(indexBufferDesc));
indexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc.ByteWidth = sizeof(DWORD) * 2 * 3;
indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc.CPUAccessFlags = 0;
indexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iinitData;
iinitData.pSysMem = indices;
d3d11Device->CreateBuffer(&indexBufferDesc, &iinitData, &d2dIndexBuffer);
D3D11_BUFFER_DESC vertexBufferDesc;
ZeroMemory(&vertexBufferDesc, sizeof(vertexBufferDesc));
vertexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc.ByteWidth = sizeof(Vertex) * 4;
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexBufferDesc.CPUAccessFlags = 0;
vertexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA vertexBufferData;
ZeroMemory(&vertexBufferData, sizeof(vertexBufferData));
vertexBufferData.pSysMem = v;
hr = d3d11Device->CreateBuffer(&vertexBufferDesc, &vertexBufferData, &triangleVertBuffer);
////////////////////////////////////////////////////// Second Vertex.
Vertex v2[] = {
// positions // colors // texture coords
Vertex(1.0f, 1.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // top right
Vertex(1.0f,0.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // bottom right
Vertex(0.0f,0.0f, 0.0f,0.0f,1.0f,0.0f,1.0f), // bottom left
Vertex(0.0f, 1.0, 0.0f,0.0f, 1.0f,0.0f,1.0f) // top left
};
DWORD indices2[] = {
// Front Face
0, 1, 2,
0, 2, 3,
};
D3D11_BUFFER_DESC indexBufferDesc2;
ZeroMemory(&indexBufferDesc2, sizeof(indexBufferDesc2));
indexBufferDesc2.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc2.ByteWidth = sizeof(DWORD) * 2 * 3;
indexBufferDesc2.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc2.CPUAccessFlags = 0;
indexBufferDesc2.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iinitData2;
iinitData2.pSysMem = indices2;
d3d11Device->CreateBuffer(&indexBufferDesc2, &iinitData2, &d2dIndexBuffer2);
D3D11_BUFFER_DESC vertexBufferDesc2;
ZeroMemory(&vertexBufferDesc2, sizeof(vertexBufferDesc2));
vertexBufferDesc2.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc2.ByteWidth = sizeof(Vertex) * 4;
vertexBufferDesc2.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexBufferDesc2.CPUAccessFlags = 0;
vertexBufferDesc2.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA vertexBufferData2;
ZeroMemory(&vertexBufferData2, sizeof(vertexBufferData2));
vertexBufferData2.pSysMem = v2;
hr = d3d11Device->CreateBuffer(&vertexBufferDesc2, &vertexBufferData2, &triangleVertBuffer2);
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
return true;
}
Edit 4:
Added draw scene code:
void DrawScene()
{
//Set the vertex buffer
UINT stride = sizeof(Vertex);
UINT offset = 0;
///////////////////////////////////////////////////////////Buffer 2//////////////////////////////////////////////////////////////////
d3d11DevCon->IASetIndexBuffer(d2dIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
d3d11DevCon->IASetVertexBuffers(0, 1, &triangleVertBuffer, &stride, &offset);
////Draw the triangle
d3d11DevCon->DrawIndexed(6, 0, 0);
d3d11DevCon->OMSetRenderTargets(1, &renderTargetView, NULL);
////////////////////////////////////////////
d3d11DevCon->PSSetShaderResources(0, 1, &shaderResourceViewMap);
d3d11DevCon->IASetIndexBuffer(d2dIndexBuffer2, DXGI_FORMAT_R32_UINT, 0);
d3d11DevCon->IASetVertexBuffers(0, 1, &triangleVertBuffer2, &stride, &offset);
////Draw the triangle
d3d11DevCon->DrawIndexed(6, 0, 0);
//Present the backbuffer to the screen
SwapChain->Present(0, 0);
}
In order to calculate correct texture coordinates from the vertex Position, you need to:
1.Create the render target texture with Width and Height (not the halves):
textureDesc.Width = Width; // /2; // Do not use half width
textureDesc.Height = Height; // /2; // Do not use half height
2.Divide the position to a float2(1 / Width, 1 / Height) in your pixel shader, like this:
float2 tex = input.Pos * float2(1.0f / 800.0f, 1.0f / 600.0f);
float4 diffuse = ObjTexture.Sample(ObjSamplerState, tex);
return input.Color + diffuse;

Update texture from vector directx9

I' m trying to render two textures one for RGB and another on for the alpha channel, I blend them together with a shader.
The alpha channel texture doesn't overlap properly to the RGB one. It seems to be stretched.
The alpha channel texture changes at every frame and I need to fill starting from an array of uint8_t by the following fuction:
D3DLOCKED_RECT locked_rect;
HRESULT hr = alpha_tex->LockRect(0, &locked_rect, nullptr, 0);
if (!FAILED(hr)) {
ret_code = 0;
BYTE *p_dst = (BYTE *)locked_rect.pBits;
for (uint y = 0; y < height; y++) {
memcpy(p_dst, alpha_array, width);
alpha_array += width;
p_dst += locked_rect.Pitch;
}
alpha_tex->UnlockRect(0);
}
where the alpha_array is a uint8_t array containing the alpha values.
To render the texture i use the following function:
hwctx->d3d9device->Clear(0, 0, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, 0xffeeeeee, 1.0f, 0);
hwctx->d3d9device->BeginScene();
ctx->mFX->SetTechnique(ctx->mhTech);
ctx->texRGB->GetSurfaceLevel(0, &ctx->surfRGB);
hwctx->d3d9device->StretchRect((IDirect3DSurface9*)s->vdrFrame->data[3], NULL, ctx->surfRGB, NULL, D3DTEXF_LINEAR);
ctx->mFX->SetTexture(ctx->mhTexRGB, ctx->texRGB);
ctx->mFX->SetTexture(ctx->mhTexAlpha, ctx->texAlpha);
// Enable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, true);
hwctx->d3d9device->SetRenderState(D3DRS_SRCBLEND, D3DBLEND_SRCALPHA);
hwctx->d3d9device->SetRenderState(D3DRS_DESTBLEND, D3DBLEND_INVSRCALPHA);
UINT numPasses = 0;
ctx->mFX->Begin(&numPasses, 0);
for (UINT i = 0; i < numPasses; ++i){
ctx->mFX->BeginPass(i);
hwctx->d3d9device->DrawPrimitive(D3DPT_TRIANGLEFAN, 0, 2);
ctx->mFX->EndPass();
}
ctx->mFX->End();
hwctx->d3d9device->EndScene();
hwctx->d3d9device->Present(0, 0, 0, 0);
// Disable alpha blending.
hwctx->d3d9device->SetRenderState(D3DRS_ALPHABLENDENABLE, false);
I combine the textures by vertex/pixel shader:
uniform extern texture gTexRGB;
uniform extern texture gTexAlpha;
sampler TexRGB = sampler_state{
Texture = <gTexRGB>;
AddressU = WRAP;
AddressV = WRAP;
};
sampler TexAlpha = sampler_state{
Texture = <gTexAlpha>;
AddressU = WRAP;
AddressV = WRAP;
};
struct OutputVS{
float4 posH : POSITION0;
float2 tex0 : TEXCOORD0;
};
OutputVS TextureBlendingVS(float2 tex0: TEXCOORD0){
// Zero out our output.
OutputVS outVS = (OutputVS)0;
// Pass on texture coordinates to be interpolated in rasterization.
outVS.tex0 = tex0;
// Done--return the output.
return outVS;
}
float4 TextureBlendingPS(float2 tex0 : TEXCOORD0) : COLOR{
float3 rgb = tex2D(TexRGB, tex0).rgb;
float alpha = tex2D(TexAlpha, tex0).a;
return float4(rgb, alpha);
}
technique DirLightTexTech{
pass P0 {
// Specify the vertex and pixel shader associated with this pass.
vertexShader = compile vs_2_0 TextureBlendingVS();
pixelShader = compile ps_2_0 TextureBlendingPS();
}
}
The size of the textures is the same but during the rendering something goes wrong.
Please help me. :)

HLSL Geometry Shader empty output

I am trying to build textured quads out of single vertices (as POINT List) inside the Geometry Shader. The problem i am unable to solve right now is that nothing gets rendered. I already tried to debug it with Visual Studios' Graphics debugger, and found out that the output of my geometry shader is seemingly empty.
This is my Geometry Shader:
struct PS_INPUT
{
float4 position : SV_POSITION;
float2 texCoord : TEXCOORD;
};
struct VS_OUTPUT
{
float4 position : SV_POSITION;
float3 normal : NORMAL;
};
[maxvertexcount(4)]
void GS(
point VS_OUTPUT input[1],
inout TriangleStream< PS_INPUT > output
)
{
float3 decalNormal = input[0].normal;
float3 upVector = float3(0.0, 1.0f, 0.0f);
float3 frontVector = 0.2f * decalNormal;
// 2.0f = half-decal width
float3 rightVector = normalize(cross(decalNormal, upVector)) * 4.0f;
upVector = float3(0, 4.0f, 0);
float3 vertices[4] = { { 1, 0, 0 }, { 0, 0, 1 }, { -1, 0, 0 }, { 0, 0, -1 } };
vertices[0] = input[0].position.xyz - rightVector - upVector + frontVector; // Bottom Left
vertices[1] = input[0].position.xyz + rightVector - upVector + frontVector; // Bottom Right
vertices[2] = input[0].position.xyz - rightVector + upVector + frontVector; // Top Left
vertices[3] = input[0].position.xyz + rightVector + upVector + frontVector; // Top Right
float2 texCoord[4] = { { 0, 0 }, { 0, 0 }, { 0, 0 }, { 0, 0 } };
texCoord[0] = float2(0, 1);
texCoord[1] = float2(1, 1);
texCoord[2] = float2(0, 0);
texCoord[3] = float2(1, 0);
PS_INPUT outputVert;
for (uint i = 0; i < 4; i++)
{
outputVert.position = float4(vertices[i], 1.0f);
outputVert.texCoord = texCoord[i];
output.Append(outputVert);
}
}
This is a screenshot of the Pipeline stages for the Draw call. As you can see there a 3 pixels present inside the vertex shader, but nothing after the geometry shader.
I already disabled all culling:
D3D11_RASTERIZER_DESC rDesc;
ZeroMemory(&rDesc, sizeof(D3D11_RASTERIZER_DESC));
rDesc.FillMode = D3D11_FILL_SOLID;
rDesc.CullMode = D3D11_CULL_NONE; // no culling for now...
rDesc.FrontCounterClockwise = false;
rDesc.DepthClipEnable = false;
And depth testing:
dsDesc.DepthEnable = false;
While setting everything like this:
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_POINTLIST);
context->RSSetState(m_pRasterizer);
context->IASetInputLayout(m_pInputLayout);
context->VSSetShader(m_pVS, 0, 0);
context->GSSetShader(m_pGS, 0, 0);
context->PSSetShader(m_pPS, 0, 0);
What could be the problem here? I assume that there is no Pixel Shader present because the Geometry Shader isn't outputting anything, is that right?
Thank you!
I fixed it on my own. I forgot to multiply the vertex positions out of the geometry shader with my view projections matrix!

Resources