Invisible geometry in DX11 - xna

After struggling for many hours to understand and compensate for Microsoft's deprecation of D3DX in Windows 8, I've run into a problem that I can't shake off.
The first time I started my program, it became immortal because of an access violation. After having restarted my computer and recompiling and running the same code, it doesn't crash, but the cube I should be seeing just isn't there.
I'm also getting a warning that "object declared on the heap may not be aligned 16". AFAIK from my research, this usually occurs because of XNA Math. So, after finding this discussion I litteraly tried everything that was being suggested. Everything except for the XMFLOAT4X4 solution worked, but my cube is still invisible. Here's the part of the code I think is relevant:
class IEGame : public DX11InfernalEngineBase
{
public:
//...
bool LoadContent()
{
//...
Vertex vertices[] =
{
{ XMFLOAT3(-1.0f, 1.0f, -1.0f), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, -1.0f), XMFLOAT2(1.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, 1.0f), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 1.0f), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(-1.0f, -1.0f, -1.0f), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, -1.0f), XMFLOAT2(1.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, 1.0f), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(-1.0f, -1.0f, 1.0f), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(-1.0f, -1.0f, 1.0f), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(-1.0f, -1.0f, -1.0f), XMFLOAT2(1.0f, 0.0f) },
{ XMFLOAT3(-1.0f, 1.0f, -1.0f), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 1.0f), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(1.0f, -1.0f, 1.0f), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, -1.0f), XMFLOAT2(1.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, -1.0f), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(1.0f, 1.0f, 1.0f), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(-1.0f, -1.0f, -1.0f), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, -1.0f), XMFLOAT2(1.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, -1.0f), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, -1.0f), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(-1.0f, -1.0f, 1.0f), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, 1.0f), XMFLOAT2(1.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, 1.0f), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 1.0f), XMFLOAT2(0.0f, 1.0f) }
};
D3D11_BUFFER_DESC vertexDesc;
ZeroMemory(&vertexDesc, sizeof(vertexDesc));
vertexDesc.Usage = D3D11_USAGE_DEFAULT;
vertexDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexDesc.ByteWidth = sizeof(VertexPos)*24;
D3D11_SUBRESOURCE_DATA resourceData;
ZeroMemory(&resourceData, sizeof(resourceData));
resourceData.pSysMem = vertices;
try{
d3dresult = d3dDevice_->CreateBuffer(&vertexDesc, &resourceData, &vertexBuffer_);
if (FAILED(d3dresult))
{
throw _com_error(d3dresult);
}
}
catch (_com_error & comEx){
MessageBox(0, comEx.ErrorMessage(), "Error Creating Vertex Buffer", MB_OK);
return false;
}
WORD indices[] = {
3, 1, 0, 2, 1, 3,
6, 4, 5, 7, 4, 6,
11, 9, 8, 10, 9, 11,
14, 12, 13, 15, 12, 14,
19, 17, 16, 18, 17, 19,
22, 20, 21, 23, 20, 22
};
D3D11_BUFFER_DESC indexDesc;
ZeroMemory(&indexDesc, sizeof(indexDesc));
indexDesc.Usage = D3D11_USAGE_DEFAULT;
indexDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexDesc.ByteWidth = sizeof(WORD)* 36;
indexDesc.CPUAccessFlags = 0;
resourceData.pSysMem = indices;
try{
d3dresult = d3dDevice_->CreateBuffer(&indexDesc, &resourceData, &indexBuffer_);
if (FAILED(d3dresult))
{
throw _com_error(d3dresult);
}
}
catch (_com_error & comEx){
MessageBox(0, comEx.ErrorMessage(), "Error Creating Index Buffer", MB_OK);
return false;
}
try{
std::vector<byte> textureFile = LoadFile("C:\\Users\\Marcus\\documents\\visual studio 2013\\Projects\\infernalEngine\\Debug\\guide.png");
d3dresult = CreateWICTextureFromMemory(d3dDevice_, d3dContext_, textureFile.data(), textureFile.size(), nullptr, &colorMapView_, 0);
if (FAILED(d3dresult))
{
throw _com_error(d3dresult);
}
}
catch (_com_error & comEx){
MessageBox(0, comEx.ErrorMessage(), "Error Loading Texture", MB_OK);
return false;
}
D3D11_SAMPLER_DESC colorMapDesc;
ZeroMemory(&colorMapDesc, sizeof(colorMapDesc));
colorMapDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
colorMapDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
colorMapDesc.MaxLOD = D3D11_FLOAT32_MAX;
try{
d3dresult = d3dDevice_->CreateSamplerState(&colorMapDesc, &colorMapSampler_);
if (FAILED(d3dresult))
{
throw _com_error(d3dresult);
}
}
catch (_com_error & comEx){
MessageBox(0, comEx.ErrorMessage(), "Error Creating Sampler State", MB_OK);
return false;
}
D3D11_BUFFER_DESC constDesc;
ZeroMemory(&constDesc, sizeof(constDesc));
constDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
constDesc.ByteWidth = sizeof(XMMATRIX);
constDesc.Usage = D3D11_USAGE_DEFAULT;
try{
d3dresult = d3dDevice_->CreateBuffer(&constDesc, nullptr, &viewCB_);
if (FAILED(d3dresult))
{
throw _com_error(d3dresult);
}
}
catch (_com_error & comEx){
MessageBox(0, comEx.ErrorMessage(), "Error Creating View Matrix", MB_OK);
return false;
}
try{
d3dresult = d3dDevice_->CreateBuffer(&constDesc, nullptr, &projCB_);
if (FAILED(d3dresult))
{
throw _com_error(d3dresult);
}
}
catch (_com_error & comEx){
MessageBox(0, comEx.ErrorMessage(), "Error Creating Projection Matrix", MB_OK);
return false;
}
try{
d3dresult = d3dDevice_->CreateBuffer(&constDesc, nullptr, &worldCB_);
if (FAILED(d3dresult))
{
throw _com_error(d3dresult);
}
}
catch (_com_error & comEx){
MessageBox(0, comEx.ErrorMessage(), "Error Creating World Matrix", MB_OK);
return false;
}
viewMatrix_ = XMMatrixIdentity();
projMatrix_ = XMMatrixPerspectiveFovLH(XM_PIDIV4, 800.0f / 600.0f, 0.01f, 100.0f);
viewMatrix_ = XMMatrixTranspose(viewMatrix_);
projMatrix_ = XMMatrixTranspose(projMatrix_);
return true;
}
void UnloadContent()
{
if (colorMapSampler_) colorMapSampler_->Release();
if (colorMapView_) colorMapView_->Release();
if (solidColorVS_) solidColorVS_->Release();
if (solidColorPS_) solidColorPS_->Release();
if (inputLayout_) inputLayout_->Release();
if (vertexBuffer_) vertexBuffer_->Release();
if (viewCB_) viewCB_->Release();
if (projCB_) projCB_->Release();
if (worldCB_) worldCB_->Release();
colorMapSampler_ = 0;
colorMapView_ = 0;
solidColorVS_ = 0;
solidColorPS_ = 0;
inputLayout_ = 0;
vertexBuffer_ = 0;
viewCB_ = 0;
projCB_ = 0;
worldCB_ = 0;
}
void Render()
{
if (d3dContext_ == 0)
return;
float clearColor[4] = { 0.0f, 0.0f, 0.25f, 1.0f };
d3dContext_->ClearRenderTargetView(backBufferTarget_, clearColor);
d3dContext_->ClearDepthStencilView(depthStencilView_, D3D11_CLEAR_DEPTH, 1.0f, 0);
unsigned int nStride = sizeof(VertexPos);
unsigned int nOffset = 0;
d3dContext_->IASetInputLayout(inputLayout_);
d3dContext_->IASetVertexBuffers(0, 1, &vertexBuffer_, &nStride, &nOffset);
d3dContext_->IASetIndexBuffer(indexBuffer_, DXGI_FORMAT_R16_UINT, 0);
d3dContext_->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3dContext_->VSSetShader(solidColorVS_, 0, 0);
d3dContext_->PSSetShader(solidColorPS_, 0, 0);
d3dContext_->PSSetShaderResources(0, 1, &colorMapView_);
d3dContext_->PSSetSamplers(0, 1, &colorMapSampler_);
XMMATRIX rotationMatrix;
rotationMatrix = XMMatrixRotationRollPitchYaw(0.0f, 0.7f, 0.7f);
XMMATRIX translationMatrix;
translationMatrix = XMMatrixTranslation(0.0f, 0.0f, 0.6f);
XMMATRIX worldMatrix;
worldMatrix = rotationMatrix * translationMatrix;
worldMatrix = XMMatrixTranspose(worldMatrix);
d3dContext_->UpdateSubresource(worldCB_, 0, nullptr, &worldMatrix, 0, 0);
d3dContext_->UpdateSubresource(viewCB_, 0, nullptr, &viewMatrix_, 0, 0);
d3dContext_->UpdateSubresource(projCB_, 0, nullptr, &projMatrix_, 0, 0);
d3dContext_->VSSetConstantBuffers(0, 1, &worldCB_);
d3dContext_->VSSetConstantBuffers(1, 1, &viewCB_);
d3dContext_->VSSetConstantBuffers(2, 1, &projCB_);
d3dContext_->DrawIndexed(36, 0, 0);
swapChain_->Present(0, 0);
}
private:
ID3D11VertexShader * solidColorVS_;
ID3D11PixelShader * solidColorPS_;
ID3D11InputLayout * inputLayout_;
ID3D11Buffer * vertexBuffer_;
ID3D11Buffer * indexBuffer_;
ID3D11ShaderResourceView * colorMapView_;
ID3D11SamplerState * colorMapSampler_;
ID3D11Buffer * viewCB_;
ID3D11Buffer * projCB_;
ID3D11Buffer * worldCB_;
XMMATRIX viewMatrix_;
XMMATRIX projMatrix_;
};
UPDATE : So, after enabling the D3D Debug Device, I get this in the output. I really can't make sense of it.
D3D11 WARNING: Process is terminating. Using simple reporting. Please call ReportLiveObjects() at runtime for standard reporting. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Producer at 0x00F7F3F4, Refcount: 3. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x00F80218, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0403A110, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0403801C, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x04040304, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0404054C, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0404089C, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x04040AAC, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x04040CE0, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x04041394, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x040428E4, Refcount: 1. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x04043534, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x04043894, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0404490C, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0404F5CC, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0405C7EC, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0405B65C, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x04069D0C, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0406701C, Refcount: 1. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x040653AC, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0408243C, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x04036FFC, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0403719C, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x04085C04, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x0408656C, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object at 0x040618CC, Refcount: 0. [ STATE_CREATION WARNING #0: UNKNOWN]
D3D11 WARNING: Live Object : 25 [ STATE_CREATION WARNING #0: UNKNOWN]
DXGI WARNING: Live Producer at 0x00F4AE50, Refcount: 4. [ STATE_CREATION WARNING #0: ]
DXGI WARNING: Live Object at 0x00F4D480, Refcount: 2. [ STATE_CREATION WARNING #0: ]
DXGI WARNING: Live Object : 1 [ STATE_CREATION WARNING #0: ]
UPDATE : My XMMATRIXs are now properly aligned. The cube is still invisible though. I also can't find the Visual Studio Graphics Debugger. I know where it it should be according to my research, it just isn't there.

The key issue here is that you are using XMMATRIX which requires 16-byte alignment as a class member which is heap allocated, but new on x86 (32-bit) only provides 8-byte alignment by default.
This is exactly what XMFLOAT4X4, XMLoadFloat4x4, and XMStoreFloat4x4 exist to resolve.
XMMATRIX vm = XMMatrixIdentity();
XMMATRIX pm = XMMatrixPerspectiveFovLH(XM_PIDIV4, 800.0f / 600.0f, 0.01f, 100.0f);
XMStoreFloat4x4( &viewMatrix_, XMMatrixTranspose(vm) );
XMStoreFlaot4x4( &projMatrix_, XMMatrixTranspose(pm) );
XMFLOAT4X4 viewMatrix_;
XMFLOAT4X4 projMatrix_;
That or you can just switch to coding in x64 (64-bit) native which has 16-byte alignment by default and you can use XMMATRIX or XMVECTOR as a class member without any worry about alignment in most cases.
This is addressed on MSDN in the DirectXMath Programming Guide. See Getting Started, Type Usage Guidelines
You should also take a look at the SimpleMath wrapper in the DirectX Tool Kit. Those types handle this load/store behavior with C++ "magic":
#include <SimpleMath.h>
XMMATRIX vm = XMMatrixIdentity();
XMMATRIX pm = XMMatrixPerspectiveFovLH(XM_PIDIV4, 800.0f / 600.0f, 0.01f, 100.0f);
viewMatrix_ = XMMatrixTranspose(vm);
projMatrix_ = XMMatrixTranspose(pm);
DirectX::SimpleMath::Matrix viewMatrix_;
DirectX::SimpleMath::Matrix projMatrix_;
If you haven't seen these yet, be sure to read:
Where is the DirectX SDK?
Living without D3DX
DirectX SDK Tools Catalog
DirectX SDK Samples Catalog
DirectX SDKs of a certain age
Direct3D Win32 Game Visual Studio template
EDIT For Windows desktop DirectX development, you should use Visual Studio 2013 Community Edition if you aren't already using VS 2013 Professional+. It includes the VS Graphics Debugger which VS 2013 Express for Windows Desktop does not.

Well, I finally found the solution. As the VS Graphics Debugger gave no errors, I had to do some random things that actually worked.
I HAD to create a rasterizer state before anything became visible. It turned out that my cube was inside out too, so I set the FrontCounterClockWise field of D3D11_RASTERIZER_DESC to false. I also zoomed the camera out by setting the following line in Render():
translationMatrix = XMMatrixTranslation(0.0f, 0.0f, 4.0f);
I also created a scissor rectangle; I don't know whether that had anything to do with the solution.
Here's where I found all the relevant stuff:
This MSDN page.
Thanks for everybody's help, I cleared out the try-catch nonsense too with an inline function.

Related

DX11 triangle list is not rendering at all

I have a list of 4 verts loaded into a vert buffer, and an index loaded into a index buffer.
The issue I have is that while the LineList rendermode shows a quad just fine (see below) the TriangleList shows nothing (See below)
void BLX::Model::load(std::filesystem::path path, Model* model, ID3D11Device* d3dDevice, ID3D11DeviceContext* d3dContext)
{
// tmp: just making a quad
float num = 0.5f;
std::vector<BLX::Vertex> vertices = {
BLX::Vertex { DirectX::XMFLOAT3(-num, -num, 0.0f), DirectX::XMFLOAT3(0.0f, 0.0f, 0.5f), }, // 0 = TL
BLX::Vertex { DirectX::XMFLOAT3(num, -num, 0.0f), DirectX::XMFLOAT3(0.0f, 0.5f, 0.0f), }, // 1 = TR
BLX::Vertex { DirectX::XMFLOAT3(num, num, 0.0f), DirectX::XMFLOAT3(0.5f, 0.0f, 0.0f), }, // 2 = BR
BLX::Vertex { DirectX::XMFLOAT3(-num, num, 0.0f), DirectX::XMFLOAT3(0.5f, 0.5f, 0.0f), }, // 3 = BL
};
// line list
//std::vector<unsigned int> indices = { 0, 1, 1, 2, 2, 3, 3, 0 };
// triangle list
std::vector<unsigned int> indices = { 0, 1, 3, 3, 1, 2 };
model->vertexCount = vertices.size();
model->indexCount = indices.size();
// Vertex Buffer
D3D11_BUFFER_DESC vbd = {};
vbd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vbd.Usage = D3D11_USAGE_DEFAULT;
vbd.CPUAccessFlags = 0u;
vbd.MiscFlags = 0u;
vbd.ByteWidth = sizeof(BLX::Vertex) * model->vertexCount;
vbd.StructureByteStride = sizeof(BLX::Vertex);
D3D11_SUBRESOURCE_DATA vsd = {};
vsd.pSysMem = &vertices[0];
vsd.SysMemPitch = 0;
vsd.SysMemSlicePitch = 0;
d3dDevice->CreateBuffer(&vbd, &vsd, &model->vertexBuffer);
/// Index Buffer
D3D11_BUFFER_DESC ibd = {};
ibd.Usage = D3D11_USAGE_DEFAULT;
ibd.ByteWidth = sizeof(unsigned int) * model->indexCount;
ibd.BindFlags = D3D11_BIND_INDEX_BUFFER;
ibd.CPUAccessFlags = 0;
ibd.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA isd = {};
isd.pSysMem = &indices[0];
isd.SysMemPitch = 0;
isd.SysMemSlicePitch = 0;
d3dDevice->CreateBuffer(&ibd, &isd, &model->indexBuffer);
// IA = Input Assembly
// pixel shader
D3DReadFileToBlob(L"PixelShader2.cso", &model->pBlob);
d3dDevice->CreatePixelShader(model->pBlob->GetBufferPointer(), model->pBlob->GetBufferSize(), nullptr, &model->pPixelShader);
// Vertex Shader
D3DReadFileToBlob(L"VertexShader2.cso", &model->pBlob);
d3dDevice->CreateVertexShader(model->pBlob->GetBufferPointer(), model->pBlob->GetBufferSize(), nullptr, &model->pVertexShader);
const D3D11_INPUT_ELEMENT_DESC ied[] =
{
// "Position" correcponds to Vertex Shader Semantic Name
// semantic index
// data type format
// Input slot
// Aligned byte offset
// Input slot class
// Instance data step rate
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{ "COLOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
// needs vertex shader blob
d3dDevice->CreateInputLayout(ied, ARRAYSIZE(ied), model->pBlob->GetBufferPointer(), model->pBlob->GetBufferSize(), &model->pInputLayout);
}
void BLX::Model::render(ID3D11Device* d3dDevice, ID3D11DeviceContext* d3dContext, D3D11_VIEWPORT * vp)
{
const UINT stride = sizeof(Vertex);
const UINT offset[] = { 0u, 0u };
d3dContext->IASetVertexBuffers(0u, 1u, vertexBuffer.GetAddressOf(), &stride, &offset[0]);
d3dContext->IASetIndexBuffer(*indexBuffer.GetAddressOf(), DXGI_FORMAT_R32_UINT, offset[1]);
d3dContext->PSSetShader(pPixelShader.Get(), nullptr, 0u);
d3dContext->VSSetShader(pVertexShader.Get(), nullptr, 0u);
d3dContext->IASetInputLayout(pInputLayout.Get());
d3dContext->RSSetViewports(1u, vp);
//d3dContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY::D3D11_PRIMITIVE_TOPOLOGY_LINELIST);
d3dContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY::D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3dContext->DrawIndexed(indexCount, 0, 0);
}
When using the LineList index and topology:
When using the TriangleList index and topology:
But when I was doing this:
// tmp: just making a quad
float num = 0.5f;
std::vector<BLX::Vertex> vertices = {
BLX::Vertex { DirectX::XMFLOAT3(0.0f, num, 0.0f), DirectX::XMFLOAT3(0.0f, 0.0f, 0.5f), },
BLX::Vertex { DirectX::XMFLOAT3(num, -num, 0.0f), DirectX::XMFLOAT3(0.0f, 0.5f, 0.0f), },
BLX::Vertex { DirectX::XMFLOAT3(-num, -num, 0.0f), DirectX::XMFLOAT3(0.5f, 0.0f, 0.0f), },
};
// triangle list
std::vector<unsigned int> indices = { 0, 1, 2 };
(everything else the exact same) I got this:
Just really curious what I'm not seeing or getting when trying to render two triangles to make up a quad
Your rectangle has indices organized in a clockwise manner, which are culled by the default rasterizer (since you do not specify one it culls clockwise primitives)
Your triangle vertices order was counter clockwise, so the primitive was not culled.
To solve it, two solutions:
Change your indices order :
std::vector<unsigned int> indices = { 0, 3, 1, 3, 2, 1 };
Disable culling in the rasterizer state :
First create one Rasterizer Description
D3D11_RASTERIZER_DESC raster_desc;
raster_desc.FillMode = D3D11_FILL_SOLID;
raster_desc.CullMode= D3D11_CULL_NONE;
raster_desc.FrontCounterClockwise = false;
raster_desc.DepthBias = 0;
raster_desc.DepthBiasClamp= 0.0f;
raster_desc.SlopeScaledDepthBias= 0.0f;
raster_desc.DepthClipEnable= true;
raster_desc.ScissorEnable= false;
raster_desc.MultisampleEnable= false;
raster_desc.AntialiasedLineEnable= false;
Then create a rasterizer state using your device:
ID3D11RasterizerState* raster_state;
HRESULT hr =d3dDevice->CreateRasterizerState(&raster_desc, &raster_state);
Before the draw, assign your rasterizer state to your context:
d3dContext->RSSetState(raster_state);
Your two meshes, triangle and quad, have opposite triangle winding order. Here’s how.
By default, D3D11 uses CullMode=Back and FrontCounterClockwise=FALSE.
This means it only renders front faces, and front face is defined as “when the vertices are counter-clockwise”.
As you see from the above illustration, your triangle indeed has counter-clockwise order, however both triangles of your quad are clockwise, GPU considers them as back faces and skips both.
You have many ways to fix, any of the following will do.
Reorder vertices in vertex buffer.
Flip triangles in index buffer to { 0, 3, 1, 1, 3, 2 }
Change rasterizer state to disable back face culling, CullMode=D3D11_CULL_NONE
Change rasterizer state to switch front face winding direction, FrontCounterClockwise=TRUE
Change matrix passed to vertex shader to include mirroring component, e.g. scale with vector [ -1, 1, 1 ] represents a mirror transform that flips X, this will flip winding order of the whole mesh.

Direct3D9 not drawing?

I needed to draw some simple shapes and I decided to go with D3D9. After going through a few of the tutorials on directxtutorial.com, I finally have all the code and knowledge I need to make my first shape appear on the screen. The problem is, though, that no image is appearing. I've looked over the code many times and have compared it with the code on the website, and it all checks out. Why is nothing rendering on the screen?
#define CUSTOMFVF (D3DFVF_XYZRHW | D3DFVF_DIFFUSE)
LPDIRECT3D9 d3d;
LPDIRECT3DDEVICE9 d3ddev;
LPDIRECT3DVERTEXBUFFER9 vbuffer;
struct CUSTOMVERTEX
{
float x, y, z, rhw;
DWORD color;
};
void InitD3D(HWND hWnd)
{
d3d = Direct3DCreate9(D3D_SDK_VERSION);
D3DPRESENT_PARAMETERS d3dpp;
ZeroMemory(&d3dpp, sizeof(d3dpp));
d3dpp.Windowed = true;
d3dpp.hDeviceWindow = hWnd;
d3dpp.SwapEffect = D3DSWAPEFFECT_DISCARD;
d3d->CreateDevice(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, hWnd, D3DCREATE_SOFTWARE_VERTEXPROCESSING, &d3dpp, &d3ddev);
}
void InitGraphics()
{
CUSTOMVERTEX verticies[]
{
{50, 70, 1.0f, 1.0f, D3DCOLOR_XRGB(250, 0, 0),},
{70, 50, 1.0f, 1.0f, D3DCOLOR_XRGB(0, 250, 0),},
{40, 80, 1.0f, 1.0f, D3DCOLOR_XRGB(0, 0, 250),},
};
d3ddev->CreateVertexBuffer(3 * sizeof(CUSTOMVERTEX), NULL, CUSTOMFVF, D3DPOOL_MANAGED, &vbuffer, NULL);
void* vp;
vbuffer->Lock(0, 0, (void**)&vp, NULL);
memcpy(vp, verticies, sizeof(verticies));
vbuffer->Unlock();
}
void Draw()
{
d3ddev->Clear(NULL, NULL, D3DCLEAR_TARGET, NULL, 1.0f, NULL);
d3ddev->BeginScene();
d3ddev->SetFVF(CUSTOMFVF);
d3ddev->SetStreamSource(0, vbuffer, 0, sizeof(CUSTOMVERTEX));
d3ddev->DrawPrimitive(D3DPT_TRIANGLELIST, 0, 1);
d3ddev->EndScene();
d3ddev->Present(NULL, NULL, NULL, NULL);
}
void ReleaseD3D()
{
d3d->Release();
d3ddev->Release();
vbuffer->Release();
}
Turns out my vertex positions weren't triangle enough to make a triangle. Thanks for the help, though, it reminded me to set up error-checking.

Color conversion from DXGI_FORMAT_B8G8R8A8_UNORM to NV12 in GPU using DirectX11 pixel shaders

I'm working on a code to capture the desktop using Desktop duplication and encode the same to h264 using Intel hardwareMFT. The encoder only accepts NV12 format as input. I have got a DXGI_FORMAT_B8G8R8A8_UNORM to NV12 converter(https://github.com/NVIDIA/video-sdk-samples/blob/master/nvEncDXGIOutputDuplicationSample/Preproc.cpp) that works fine, and is based on DirectX VideoProcessor.
The problem is that the VideoProcessor on certain intel graphics hardware supports conversions only from DXGI_FORMAT_B8G8R8A8_UNORM to YUY2 but not NV12, I have confirmed the same by enumerating the supported formats through GetVideoProcessorOutputFormats. Though the VideoProcessor Blt succeeded without any errors, and I could see that the frames in the output video are pixelated a bit, I could notice it if I look at it closely.
I guess, the VideoProcessor has simply failed over to the next supported output format (YUY2) and I'm unknowingly feeding it to the encoder that thinks that the input is in NV12 as configured. There is no failure or major corruption of frames due to the fact that there is little difference like byte order and subsampling between NV12 and YUY2. Also, I don't have pixelating problems on hardware that supports NV12 conversion.
So I decided to do the color conversion using pixel shaders which is based on this code(https://github.com/bavulapati/DXGICaptureDXColorSpaceConversionIntelEncode/blob/master/DXGICaptureDXColorSpaceConversionIntelEncode/DuplicationManager.cpp). I'm able make the pixel shaders work, I have also uploaded my code here(https://codeshare.io/5PJjxP) for reference (simplified it as much as possible).
Now, I'm left with two channels, chroma, and luma respectively
(ID3D11Texture2D textures). And I'm really confused about efficiently
packing the two separate channels into one ID3D11Texture2D texture so
that I may feed the same to the encoder. Is there a way to efficiently
pack the Y and UV channels into a single ID3D11Texture2D in GPU? I'm
really tired of CPU based approaches due to the fact that it's costly,
and doesn't offer the best possible frame rates. In fact, I'm
reluctant to even copy the textures to CPU. I'm thinking of a way to
do it in GPU without any back and forth copies between CPU and GPU.
I have been researching this for quite some time without any progress, any help would be appreciated.
/**
* This method is incomplete. It's just a template of what I want to achieve.
*/
HRESULT CreateNV12TextureFromLumaAndChromaSurface(ID3D11Texture2D** pOutputTexture)
{
HRESULT hr = S_OK;
try
{
//Copying from GPU to CPU. Bad :(
m_pD3D11DeviceContext->CopyResource(m_CPUAccessibleLuminanceSurf, m_LuminanceSurf);
D3D11_MAPPED_SUBRESOURCE resource;
UINT subresource = D3D11CalcSubresource(0, 0, 0);
HRESULT hr = m_pD3D11DeviceContext->Map(m_CPUAccessibleLuminanceSurf, subresource, D3D11_MAP_READ, 0, &resource);
BYTE* sptr = reinterpret_cast<BYTE*>(resource.pData);
BYTE* dptrY = nullptr; // point to the address of Y channel in output surface
//Store Image Pitch
int m_ImagePitch = resource.RowPitch;
int height = GetImageHeight();
int width = GetImageWidth();
for (int i = 0; i < height; i++)
{
memcpy_s(dptrY, m_ImagePitch, sptr, m_ImagePitch);
sptr += m_ImagePitch;
dptrY += m_ImagePitch;
}
m_pD3D11DeviceContext->Unmap(m_CPUAccessibleLuminanceSurf, subresource);
//Copying from GPU to CPU. Bad :(
m_pD3D11DeviceContext->CopyResource(m_CPUAccessibleChrominanceSurf, m_ChrominanceSurf);
hr = m_pD3D11DeviceContext->Map(m_CPUAccessibleChrominanceSurf, subresource, D3D11_MAP_READ, 0, &resource);
sptr = reinterpret_cast<BYTE*>(resource.pData);
BYTE* dptrUV = nullptr; // point to the address of UV channel in output surface
m_ImagePitch = resource.RowPitch;
height /= 2;
width /= 2;
for (int i = 0; i < height; i++)
{
memcpy_s(dptrUV, m_ImagePitch, sptr, m_ImagePitch);
sptr += m_ImagePitch;
dptrUV += m_ImagePitch;
}
m_pD3D11DeviceContext->Unmap(m_CPUAccessibleChrominanceSurf, subresource);
}
catch(HRESULT){}
return hr;
}
Draw NV12:
//
// Draw frame for NV12 texture
//
HRESULT DrawNV12Frame(ID3D11Texture2D* inputTexture)
{
HRESULT hr;
// If window was resized, resize swapchain
if (!m_bIntialized)
{
HRESULT Ret = InitializeNV12Surfaces(inputTexture);
if (!SUCCEEDED(Ret))
{
return Ret;
}
m_bIntialized = true;
}
m_pD3D11DeviceContext->CopyResource(m_ShaderResourceSurf, inputTexture);
D3D11_TEXTURE2D_DESC FrameDesc;
m_ShaderResourceSurf->GetDesc(&FrameDesc);
D3D11_SHADER_RESOURCE_VIEW_DESC ShaderDesc;
ShaderDesc.Format = FrameDesc.Format;
ShaderDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
ShaderDesc.Texture2D.MostDetailedMip = FrameDesc.MipLevels - 1;
ShaderDesc.Texture2D.MipLevels = FrameDesc.MipLevels;
// Create new shader resource view
ID3D11ShaderResourceView* ShaderResource = nullptr;
hr = m_pD3D11Device->CreateShaderResourceView(m_ShaderResourceSurf, &ShaderDesc, &ShaderResource);
IF_FAILED_THROW(hr);
m_pD3D11DeviceContext->PSSetShaderResources(0, 1, &ShaderResource);
// Set resources
m_pD3D11DeviceContext->OMSetRenderTargets(1, &m_pLumaRT, nullptr);
m_pD3D11DeviceContext->PSSetShader(m_pPixelShaderLuma, nullptr, 0);
m_pD3D11DeviceContext->RSSetViewports(1, &m_VPLuminance);
// Draw textured quad onto render target
m_pD3D11DeviceContext->Draw(NUMVERTICES, 0);
m_pD3D11DeviceContext->OMSetRenderTargets(1, &m_pChromaRT, nullptr);
m_pD3D11DeviceContext->PSSetShader(m_pPixelShaderChroma, nullptr, 0);
m_pD3D11DeviceContext->RSSetViewports(1, &m_VPChrominance);
// Draw textured quad onto render target
m_pD3D11DeviceContext->Draw(NUMVERTICES, 0);
// Release shader resource
ShaderResource->Release();
ShaderResource = nullptr;
return S_OK;
}
Init shaders:
void SetViewPort(D3D11_VIEWPORT* VP, UINT Width, UINT Height)
{
VP->Width = static_cast<FLOAT>(Width);
VP->Height = static_cast<FLOAT>(Height);
VP->MinDepth = 0.0f;
VP->MaxDepth = 1.0f;
VP->TopLeftX = 0;
VP->TopLeftY = 0;
}
HRESULT MakeRTV(ID3D11RenderTargetView** pRTV, ID3D11Texture2D* pSurf)
{
if (*pRTV)
{
(*pRTV)->Release();
*pRTV = nullptr;
}
// Create a render target view
HRESULT hr = m_pD3D11Device->CreateRenderTargetView(pSurf, nullptr, pRTV);
IF_FAILED_THROW(hr);
return S_OK;
}
HRESULT InitializeNV12Surfaces(ID3D11Texture2D* inputTexture)
{
ReleaseSurfaces();
D3D11_TEXTURE2D_DESC lOutputDuplDesc;
inputTexture->GetDesc(&lOutputDuplDesc);
// Create shared texture for all duplication threads to draw into
D3D11_TEXTURE2D_DESC DeskTexD;
RtlZeroMemory(&DeskTexD, sizeof(D3D11_TEXTURE2D_DESC));
DeskTexD.Width = lOutputDuplDesc.Width;
DeskTexD.Height = lOutputDuplDesc.Height;
DeskTexD.MipLevels = 1;
DeskTexD.ArraySize = 1;
DeskTexD.Format = lOutputDuplDesc.Format;
DeskTexD.SampleDesc.Count = 1;
DeskTexD.Usage = D3D11_USAGE_DEFAULT;
DeskTexD.BindFlags = D3D11_BIND_SHADER_RESOURCE;
HRESULT hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, nullptr, &m_ShaderResourceSurf);
IF_FAILED_THROW(hr);
DeskTexD.Format = DXGI_FORMAT_R8_UNORM;
DeskTexD.BindFlags = D3D11_BIND_RENDER_TARGET;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, nullptr, &m_LuminanceSurf);
IF_FAILED_THROW(hr);
DeskTexD.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
DeskTexD.Usage = D3D11_USAGE_STAGING;
DeskTexD.BindFlags = 0;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, NULL, &m_CPUAccessibleLuminanceSurf);
IF_FAILED_THROW(hr);
SetViewPort(&m_VPLuminance, DeskTexD.Width, DeskTexD.Height);
HRESULT Ret = MakeRTV(&m_pLumaRT, m_LuminanceSurf);
if (!SUCCEEDED(Ret))
return Ret;
DeskTexD.Width = lOutputDuplDesc.Width / 2;
DeskTexD.Height = lOutputDuplDesc.Height / 2;
DeskTexD.Format = DXGI_FORMAT_R8G8_UNORM;
DeskTexD.Usage = D3D11_USAGE_DEFAULT;
DeskTexD.CPUAccessFlags = 0;
DeskTexD.BindFlags = D3D11_BIND_RENDER_TARGET;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, nullptr, &m_ChrominanceSurf);
IF_FAILED_THROW(hr);
DeskTexD.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
DeskTexD.Usage = D3D11_USAGE_STAGING;
DeskTexD.BindFlags = 0;
hr = m_pD3D11Device->CreateTexture2D(&DeskTexD, NULL, &m_CPUAccessibleChrominanceSurf);
IF_FAILED_THROW(hr);
SetViewPort(&m_VPChrominance, DeskTexD.Width, DeskTexD.Height);
return MakeRTV(&m_pChromaRT, m_ChrominanceSurf);
}
HRESULT InitVertexShader(ID3D11VertexShader** ppID3D11VertexShader)
{
HRESULT hr = S_OK;
UINT Size = ARRAYSIZE(g_VS);
try
{
IF_FAILED_THROW(m_pD3D11Device->CreateVertexShader(g_VS, Size, NULL, ppID3D11VertexShader));;
m_pD3D11DeviceContext->VSSetShader(m_pVertexShader, nullptr, 0);
// Vertices for drawing whole texture
VERTEX Vertices[NUMVERTICES] =
{
{ XMFLOAT3(-1.0f, -1.0f, 0), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 0), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, 0), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(1.0f, -1.0f, 0), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 0), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, 0), XMFLOAT2(1.0f, 0.0f) },
};
UINT Stride = sizeof(VERTEX);
UINT Offset = 0;
D3D11_BUFFER_DESC BufferDesc;
RtlZeroMemory(&BufferDesc, sizeof(BufferDesc));
BufferDesc.Usage = D3D11_USAGE_DEFAULT;
BufferDesc.ByteWidth = sizeof(VERTEX) * NUMVERTICES;
BufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
BufferDesc.CPUAccessFlags = 0;
D3D11_SUBRESOURCE_DATA InitData;
RtlZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = Vertices;
// Create vertex buffer
IF_FAILED_THROW(m_pD3D11Device->CreateBuffer(&BufferDesc, &InitData, &m_VertexBuffer));
m_pD3D11DeviceContext->IASetVertexBuffers(0, 1, &m_VertexBuffer, &Stride, &Offset);
m_pD3D11DeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
D3D11_INPUT_ELEMENT_DESC Layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 }
};
UINT NumElements = ARRAYSIZE(Layout);
hr = m_pD3D11Device->CreateInputLayout(Layout, NumElements, g_VS, Size, &m_pVertexLayout);
m_pD3D11DeviceContext->IASetInputLayout(m_pVertexLayout);
}
catch (HRESULT) {}
return hr;
}
HRESULT InitPixelShaders()
{
HRESULT hr = S_OK;
// Refer https://codeshare.io/5PJjxP for g_PS_Y & g_PS_UV blobs
try
{
UINT Size = ARRAYSIZE(g_PS_Y);
hr = m_pD3D11Device->CreatePixelShader(g_PS_Y, Size, nullptr, &m_pPixelShaderChroma);
IF_FAILED_THROW(hr);
Size = ARRAYSIZE(g_PS_UV);
hr = m_pD3D11Device->CreatePixelShader(g_PS_UV, Size, nullptr, &m_pPixelShaderLuma);
IF_FAILED_THROW(hr);
}
catch (HRESULT) {}
return hr;
}
I am experimenting this RGBA conversion to NV12 in the GPU only, using DirectX11.
This is a good challenge. I'm not familiar with Directx11, so this is my first experimentation.
Check this project for updates : D3D11ShaderNV12
In my current implementation (may not be the last), here is what I do:
Step 1: use a DXGI_FORMAT_B8G8R8A8_UNORM as input texture
Step 2: make a 1st pass shader to get 3 textures (Y:Luma, U:ChromaCb and V:ChromaCr): see YCbCrPS2.hlsl
Step 3: Y is DXGI_FORMAT_R8_UNORM, and is ready for final NV12 texture
Step 4: UV needs to be downsampled in a 2nd pass shader: see ScreenPS2.hlsl (using linear filtering)
Step 5: a third pass shader to sample Y texture
Step 6: a fourth pass shader to sample UV texture using a shift texture (I think other technique could be use)
My final texture is not DXGI_FORMAT_NV12, but a similar DXGI_FORMAT_R8_UNORM texture. My computer is Windows7, so DXGI_FORMAT_NV12 is not handled. I will try later on a another computer.
The process with pictures:

How to make spinning cube in directx?

I'm learning directx programming. Yesterday I succeeded in making a cube.
I'm trying to making a spinning cube. In fact, I succeeded but I'm not sure I use the correct way.
Is the code below commonly used way? I call function which translate vertex position and make new vertex buffer on every frames. Is there any other way which not create vertex buffer on every frames? Is there a more effective way to prove performance?
void BoxApp::makeVertex()
{
Vertex vertices[] =
{
{ XMFLOAT3(-1.0f, -1.0f, -1.0f), (const XMFLOAT4)Colors::White },
{ XMFLOAT3(-1.0f, +1.0f, -1.0f), (const XMFLOAT4)Colors::Black },
{ XMFLOAT3(+1.0f, +1.0f, -1.0f), (const XMFLOAT4)Colors::Red },
{ XMFLOAT3(+1.0f, -1.0f, -1.0f), (const XMFLOAT4)Colors::Green },
{ XMFLOAT3(-1.0f, -1.0f, +1.0f), (const XMFLOAT4)Colors::Blue },
{ XMFLOAT3(-1.0f, +1.0f, +1.0f), (const XMFLOAT4)Colors::Yellow },
{ XMFLOAT3(+1.0f, +1.0f, +1.0f), (const XMFLOAT4)Colors::Cyan },
{ XMFLOAT3(+1.0f, -1.0f, +1.0f), (const XMFLOAT4)Colors::Magenta }
};
rotating_angle += 0.01;
for (int i = 0; i < 8; i++) {
XMStoreFloat3(&vertices[i].pos, XMVector3Transform(XMLoadFloat3(&vertices[i].pos),
XMMatrixTranslation(0.0f, 1.0f, 1.0f) *
XMMatrixRotationX(rotating_angle) *
XMMatrixTranslation(0.0f, -1.0f, -1.0f)
));
}
D3D11_BUFFER_DESC vbd;
... #Set BUFFER_DESC
D3D11_SUBRESOURCE_DATA vinitData;
... #Set SUBRESURCE
md3dDevice->CreateBuffer(&vbd, &vinitData, &mBoxVB);
}
Set vertex position as local coordinate and make transformation matrix from local to world coordinate. send it to a vertex shader using a constant buffer.

2D Programming with Direct3D 9 - Test image is distorted

I am trying to build a simple 2D game using 2D sprites with DirectX 9, and I'm having problems getting the images to come out cleanly. I'd like to load bmp images and display them on the screen as is (no interpolation, no magnification, no filtering or anti-aliasing, etc).
I'm sure I'm missing something, but when I try and render a 100x100 bmp to the screen, it looks choppy and distorted, like a pixel art image would normally look when shrunken slightly. I want the bmp to look exactly as it does when loaded in MS Paint.
Does anyone have any idea why this might be the case? My code is shown below:
Initialization code:
g_DxCom = Direct3DCreate9( D3D_SDK_VERSION );
if ( g_DxCom == NULL )
{
return false;
}
D3DDISPLAYMODE d3dDisplayMode;
if ( FAILED( g_DxCom->GetAdapterDisplayMode( D3DADAPTER_DEFAULT, &d3dDisplayMode ) ) )
{
return false;
}
D3DPRESENT_PARAMETERS d3dPresentParameters;
::ZeroMemory( &d3dPresentParameters, sizeof(D3DPRESENT_PARAMETERS) );
d3dPresentParameters.Windowed = FALSE;
d3dPresentParameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
d3dPresentParameters.BackBufferFormat = d3dDisplayMode.Format; // D3DFMT_X8R8G8B8
d3dPresentParameters.BackBufferWidth = d3dDisplayMode.Width;
d3dPresentParameters.BackBufferHeight = d3dDisplayMode.Height;
d3dPresentParameters.PresentationInterval = D3DPRESENT_INTERVAL_ONE;
if ( FAILED( g_DxCom->CreateDevice( D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
this->hWnd,
D3DCREATE_HARDWARE_VERTEXPROCESSING,
&d3dPresentParameters,
&pd3dDevice ) ) )
{
if ( FAILED( g_DxCom->CreateDevice( D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
this->hWnd,
D3DCREATE_SOFTWARE_VERTEXPROCESSING,
&d3dPresentParameters,
&pd3dDevice ) ) )
{
return false;
}
}
texture = NULL;
bg_texture = NULL;
Render code:
LPDIRECT3DDEVICE9 g_dxDevice;
float float1 = 99.5f; // I'd like to render my 100x100 sprite from screen coordinates 100, 100 to 200, 200
float float2 = 198.5f;
CUSTOMVERTEX OurVertices[] =
{
{ float1, float2, 1.0f, 1.0f, 0.0f, 1.0f },
{ float1, float1, 1.0f, 1.0f, 0.0f, 0.0f },
{ float2, float1, 1.0f, 1.0f, 1.0f, 0.0f },
{ float1, float2, 1.0f, 1.0f, 0.0f, 1.0f },
{ float2, float1, 1.0f, 1.0f, 1.0f, 0.0f },
{ float2, float2, 1.0f, 1.0f, 1.0f, 1.0f }
};
LPDIRECT3DVERTEXBUFFER9 v_buffer;
g_dxDevice->CreateVertexBuffer( 6 * sizeof(CUSTOMVERTEX),
0,
CUSTOMFVF,
D3DPOOL_MANAGED,
&v_buffer,
NULL );
VOID* pVoid;
// Lock the vertex buffer into memory
v_buffer->Lock( 0, 0, &pVoid, 0 );
// Copy our vertex buffer to memory
::memcpy( pVoid, OurVertices, sizeof(OurVertices) );
// Unlock buffer
v_buffer->Unlock();
LPDIRECT3DTEXTURE9 g_texture;
HRESULT hError;
DWORD dwTextureFilter = D3DTEXF_NONE;
g_dxDevice->SetSamplerState( 0, D3DSAMP_MINFILTER, dwTextureFilter );
g_dxDevice->SetSamplerState( 0, D3DSAMP_MAGFILTER, dwTextureFilter );
g_dxDevice->SetSamplerState( 0, D3DSAMP_MIPFILTER, dwTextureFilter );
g_dxDevice->SetTextureStageState(0,D3DTSS_COLOROP,D3DTOP_SELECTARG1);
g_dxDevice->SetTextureStageState(0,D3DTSS_COLORARG1,D3DTA_TEXTURE);
g_dxDevice->SetTextureStageState(0,D3DTSS_COLORARG2,D3DTA_DIFFUSE);
hError = D3DXCreateTextureFromFile( g_dxDevice, L"Test.bmp", &g_texture ); // 100x100 sprite
g_dxDevice->SetTexture( 0, g_texture );
g_dxDevice->Clear( 0,
NULL,
D3DCLEAR_TARGET,
D3DCOLOR_XRGB( 0, 40, 100 ),
1.0f,
0 );
g_dxDevice->BeginScene();
// Do rendering on the back buffer here
g_dxDevice->SetFVF( CUSTOMFVF );
g_dxDevice->SetStreamSource( 0, v_buffer, 0, sizeof(CUSTOMVERTEX) );
g_dxDevice->DrawPrimitive( D3DPT_TRIANGLELIST, 0, 6 );
g_dxDevice->EndScene();
g_dxDevice->Present( NULL, NULL, NULL, NULL );
g_texture->Release();
v_buffer->Release();
Okay, so I've finally figured it out, and I should have known this was the case.
It looks like DirectX9 only works with textures with sizes that are multiples of 2. If I change the texture so that the sprite square is 128 x 128 (just adding some transparency) and run the application with float2 changed appropriately, there is no distortion in the rendered image.
Hurrah...

Resources