I am trying to use a texture embedded in a file, it's not a tga.
Here is my code, I don't know where the logical error is.
ID3D11ShaderResourceView* texturePtr = nullptr;
ID3D11Texture2D* texture2D = nullptr;
ID3D11SamplerState* sampleStatePtr = nullptr;
hr = CoInitialize(NULL);
assert(SUCCEEDED(hr));
devConPtr->PSSetSamplers(0, 1, &sampleStatePtr);
devConPtr->PSSetShaderResources(0, 1, &texturePtr);
Texture2D tex : TEXTURE;
SamplerState mySampler : SAMPLER;
D3D11_SAMPLER_DESC sd;
ZeroMemory(&sd, sizeof(sd));
sd.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
sd.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
sd.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
sd.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sd.MipLODBias = 0.0f;
sd.MaxLOD = D3D11_FLOAT32_MAX;
sd.ComparisonFunc = D3D11_COMPARISON_NEVER;
hr = devPtr->CreateSamplerState(&sd, &sampleStatePtr);
DXGI_SAMPLE_DESC sample;
sample.Count = 1;
sample.Quality = 0;
D3D11_TEXTURE2D_DESC textureDesc;
textureDesc.Width = w;
textureDesc.Height = h;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
textureDesc.SampleDesc = sample;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
textureDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA subData;
subData.pSysMem = data;
subData.SysMemPitch = sizeof(*data)*w;
HRESULT hr = devPtr->CreateTexture2D(
&textureDesc,
&subData,
&texture2D
);
assert(SUCCEEDED(hr));
//(ID3D11Texture2D*)texture2D;
texturePtr->QueryInterface(IID_ID3D11Texture2D, (void**)&texture2D);
D3D11_SHADER_RESOURCE_VIEW_DESC shvD;
shvD.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
shvD.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
hr= devPtr->CreateShaderResourceView(texture2D, &shvD, &texturePtr);
assert(SUCCEEDED(hr));
hr = DirectX::CreateWICTextureFromMemory(devPtr, devConPtr, (const
uint8_t*)&data, sizeof(*data),
nullptr, &texturePtr, NULL);
assert(SUCCEEDED(hr));
unsigned int textureCount = mat->GetTextureCount(aiTextureType_UNKNOWN);
for (UINT j = 0; j < textureCount; j++)
{
aiString* path = nullptr;
mat->GetTexture(aiTextureType_UNKNOWN, j, path);
assert(path->length >= 2);
int index = atoi(&path->C_Str()[1]);
createTexture(scenePtr->mTextures[index]->mWidth, scenePtr-
>mTextures[index]->mHeight, (uint8_t*)scenePtr->mTextures[index]->pcData);
}
If you could find some kind of logical error or help with the debugging that would be super helpful, I try to put a breakpoint at my HRESULTS but I can't find the variables however it does say that my resourceviewptr is always nullptr despite me trying to use it.
I am using c++ and directx and directx toolkit etc.
You are not initializing shvD completely. To fix it, initialize it like this:
D3D11_SHADER_RESOURCE_VIEW_DESC shvD;
shvD.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
shvD.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shvD.Texture2D.MostDetailedMip = 0;
shvD.Texture2D.MipLevels = 1;
Related
I created two devices A and B, where A is the producer and B is the consumer. The texture is shared between the two through D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX. Now I find that every time B calls CopyResource to copy the shared texture to the local, the video memory will keep increasing. This The problem has troubled me for a long time. I searched github or other code sources, but couldn’t find a solution. My machine is Win10 and Geforce GTX 1060.
The most streamlined code to reproduce the problem is as follows:
#include <dxgi.h>
#include <d3d11.h>
#include <atlbase.h>
#pragma comment(lib, "D3D11.lib")
#define RETURN_ON_FAIL(hr) if(FAILED(hr)) return hr;
namespace SharedTextureLeakTest
{
const UINT32 texture_width = 320;
const UINT32 texture_height = 240;
ATL::CComPtr<ID3D11Device> device_1 = nullptr;
ATL::CComPtr<ID3D11DeviceContext> context_1 = nullptr;
ATL::CComPtr<ID3D11Device> device_2 = nullptr;
ATL::CComPtr<ID3D11DeviceContext> context_2 = nullptr;
ATL::CComPtr<ID3D11Texture2D> shared_texture_dev1 = nullptr; //owner by device1
ATL::CComPtr<ID3D11Texture2D> dst_texture_dev2 = nullptr; //owner by device2
HANDLE shared_handle = nullptr;
HRESULT CheckEnvironmentValid()
{
HRESULT hr = S_OK;
//Create Device1
if (!device_1)
{
D3D_FEATURE_LEVEL pFeatureLevel;
UINT Flags = D3D11_CREATE_DEVICE_DEBUG;
hr = ::D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, Flags, nullptr, 0, D3D11_SDK_VERSION, &device_1, &pFeatureLevel, &context_1);
if (FAILED(hr))
{
device_1 = nullptr;
return hr;
}
}
//Create Shared Texture
if (!shared_texture_dev1)
{
D3D11_TEXTURE2D_DESC desc;
desc.ArraySize = 1;
desc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = 0;
desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc.Width = texture_width;
desc.Height = texture_height;
desc.MipLevels = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
hr = device_1->CreateTexture2D(&desc, nullptr, &shared_texture_dev1);
RETURN_ON_FAIL(hr);
}
//Get Handle of Shared Texture
if (!shared_handle)
{
ATL::CComPtr<IDXGIResource> dxgi_res;
hr = shared_texture_dev1->QueryInterface(__uuidof(IDXGIResource), (void**)&dxgi_res);
RETURN_ON_FAIL(hr);
hr = dxgi_res->GetSharedHandle(&shared_handle);
RETURN_ON_FAIL(hr);
}
//Create Device2
if (!device_2)
{
D3D_FEATURE_LEVEL pFeatureLevel;
UINT Flags = D3D11_CREATE_DEVICE_DEBUG;
hr = ::D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, Flags, nullptr, 0, D3D11_SDK_VERSION, &device_2, &pFeatureLevel, &context_2);
if (FAILED(hr))
{
device_2= nullptr;
return hr;
}
}
//Create Dst Texture
if (!dst_texture_dev2)
{
D3D11_TEXTURE2D_DESC desc;
desc.ArraySize = 1;
desc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = 0;
desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc.Width = texture_width;
desc.Height = texture_height;
desc.MipLevels = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.MiscFlags = 0;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
hr = device_2->CreateTexture2D(&desc, nullptr, &dst_texture_dev2);
RETURN_ON_FAIL(hr);
}
return hr;
}
HRESULT LoopOnceTest()
{
HRESULT hr = CheckEnvironmentValid();
RETURN_ON_FAIL(hr);
//Open Shared Texture
ATL::CComPtr<ID3D11Texture2D> shared_texture_dev2;
hr = device_2->OpenSharedResource(shared_handle, IID_PPV_ARGS(&shared_texture_dev2));
RETURN_ON_FAIL(hr);
//Copy
do
{
CComPtr<IDXGIKeyedMutex> km;
hr = shared_texture_dev2->QueryInterface(__uuidof(IDXGIKeyedMutex), (void**)&km);
RETURN_ON_FAIL(hr);
INT64 release_key = 0;
hr = km->AcquireSync(0, 10);
RETURN_ON_FAIL(hr);
if (hr == WAIT_OBJECT_0)
{
context_2->CopyResource(dst_texture_dev2, shared_texture_dev2);
// context_2->Flush();
}
hr = km->ReleaseSync(0);
RETURN_ON_FAIL(hr);
} while (FALSE);
return hr;
}
}
void DoTest()
{
for(;;)
{
SharedTextureLeakTest::LoopOnceTest();
Sleep(1000);
}
}
i am learning directx11 these days. and i have been stuck in compute shader section.
so i made four resource and three corresponding view.
immutable input buffer = {1,1,1,1,1} / SRV
immutable input buffer = {2,2,2,2,2} / SRV
output buffer / UAV
staging buffer for reading / No View
and i succeeded to create all things, and dispatch cs function, and copy data from output buffer to staging buffer, and i read/check data.
// INPUT BUFFER1--------------------------------------------------
const int dataSize = 5;
D3D11_BUFFER_DESC vb_dest;
vb_dest.ByteWidth = sizeof(float) * dataSize;
vb_dest.StructureByteStride = sizeof(float);
vb_dest.BindFlags = D3D11_BIND_SHADER_RESOURCE;
vb_dest.Usage = D3D11_USAGE_IMMUTABLE;
vb_dest.CPUAccessFlags = 0;
vb_dest.MiscFlags = 0;
float v1_float[dataSize] = { 1,1,1,1,1 };
D3D11_SUBRESOURCE_DATA v1_data;
v1_data.pSysMem = static_cast<void*>(v1_float);
device->CreateBuffer(
&vb_dest,
&v1_data,
valueBuffer1.GetAddressOf());
D3D11_SHADER_RESOURCE_VIEW_DESC srv_desc;
srv_desc.Format = DXGI_FORMAT_R32_FLOAT;
srv_desc.ViewDimension = D3D11_SRV_DIMENSION_BUFFER;
srv_desc.Buffer.FirstElement = 0;
srv_desc.Buffer.NumElements = dataSize;
srv_desc.Buffer.ElementWidth = sizeof(float);
device->CreateShaderResourceView(
valueBuffer1.Get(),
&srv_desc,
inputSRV1.GetAddressOf());
// INPUT BUFFER2-----------------------------------------------------------
float v2_float[dataSize] = { 2,2,2,2,2 };
D3D11_SUBRESOURCE_DATA v2_data;
v2_data.pSysMem = static_cast<void*>(v2_float);
device->CreateBuffer(
&vb_dest,
&v2_data,
valueBuffer2.GetAddressOf());
device->CreateShaderResourceView(
valueBuffer2.Get(),
&srv_desc,
inputSRV2.GetAddressOf());
// OUTPUT BUFFER-----------------------------------------------------------
D3D11_BUFFER_DESC ov_desc;
ov_desc.ByteWidth = sizeof(float) * dataSize;
ov_desc.StructureByteStride = sizeof(float);
ov_desc.BindFlags = D3D11_BIND_UNORDERED_ACCESS;
ov_desc.Usage = D3D11_USAGE_DEFAULT;
ov_desc.CPUAccessFlags = 0;
ov_desc.MiscFlags = 0;
device->CreateBuffer(
&ov_desc,
nullptr,
outputResource.GetAddressOf());
D3D11_UNORDERED_ACCESS_VIEW_DESC outputUAV_desc;
outputUAV_desc.Format = DXGI_FORMAT_R32_FLOAT;
outputUAV_desc.ViewDimension = D3D11_UAV_DIMENSION_BUFFER;
outputUAV_desc.Buffer.FirstElement = 0;
outputUAV_desc.Buffer.NumElements = dataSize;
outputUAV_desc.Buffer.Flags = 0;
device->CreateUnorderedAccessView(
outputResource.Get(),
&outputUAV_desc,
outputUAV.GetAddressOf());
// BUFFER FOR COPY-----------------------------------------------------------
D3D11_BUFFER_DESC rb_desc;
rb_desc.ByteWidth = sizeof(float) * dataSize;
rb_desc.StructureByteStride = sizeof(float);
rb_desc.Usage = D3D11_USAGE_STAGING;
rb_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
rb_desc.BindFlags = 0;
rb_desc.MiscFlags = 0;
device->CreateBuffer(
&rb_desc,
nullptr,
readResource.GetAddressOf());
// DISPATCH and COPY and GET DATA
dContext->CSSetShaderResources(0, 1, inputSRV1.GetAddressOf());
dContext->CSSetShaderResources(1, 1, inputSRV2.GetAddressOf());
dContext->CSSetUnorderedAccessViews(0, 1, outputUAV.GetAddressOf(), nullptr);
dContext->CSSetShader(cs.Get(), nullptr, 0);
dContext->Dispatch(1, 1, 1);
dContext->CopyResource(readResource.Get(), outputResource.Get());
D3D11_MAPPED_SUBRESOURCE mappedResource2;
ZeroMemory(&mappedResource2, sizeof(D3D11_MAPPED_SUBRESOURCE));
R_CHECK(dContext->Map(readResource.Get(), 0, D3D11_MAP_READ, 0, &mappedResource2));
float* data = static_cast<float*>(mappedResource2.pData);
for (int i = 0; i < 5; ++i)
{
int a = data[i];
}
and this is compute shader code
StructuredBuffer<float> inputA : register(t0);
StructuredBuffer<float> inputB : register(t1);
RWStructuredBuffer<float> output : register(u0);
[numthreads(5, 1, 1)]
void main(int3 id : SV_DispatchThreadID)
{
output[id.x] = inputA[id.x] + inputB[id.x];
}
in CS, it's adding two input buffer data and store into output buffer.
so expected answer would be {3,3,3,3,3}.
but the result is {3,0,0,0,0} only first idx has proper answer.
any advice would be amazing.
dContext->CopyResource(readResource.Get(), outputResource.Get());
D3D11_MAPPED_SUBRESOURCE mappedResource2;
ZeroMemory(&mappedResource2, sizeof(D3D11_MAPPED_SUBRESOURCE));
R_CHECK(dContext->Map(readResource.Get(), 0, D3D11_MAP_READ, 0, &mappedResource2));
float* data = static_cast<float*>(mappedResource2.pData);
for (int i = 0; i < 5; ++i)
{
int a = data[i];
}
this code should be like this.
CopyResource();
Map();
Declare and allocate 'data'
zeromemory(data);
memcopy(data, resource's pointer);
unMap();
for some reason, i have to use the memcopy instead of just reading resource directly with the pointer that i get from mapping.
As I mentioned in title do you know how to flip an ID3D10Texture2D object horizontal/vertical ?
I used this code to take screenshot and save it to a file.
ID3D10Resource *backbufferRes;
renderTargetView->GetResource(&backbufferRes);
ID3D10Texture2D *mRenderedTexture;
// Create our texture
D3D10_TEXTURE2D_DESC texDesc;
texDesc.ArraySize = 1;
texDesc.BindFlags = 0;
texDesc.CPUAccessFlags = 0;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.Width = 640; // must be same as backbuffer
texDesc.Height = 480; // must be same as backbuffer
texDesc.MipLevels = 1;
texDesc.MiscFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D10_USAGE_DEFAULT;
d3d10Device->CreateTexture2D(&texDesc, 0, &mRenderedTexture);
d3d10Device->CopyResource(mRenderedTexture, backbufferRes);
D3DX10FilterTexture(mRenderedTexture, 0, D3DX10_FILTER_MIRROR_U);
D3DX10SaveTextureToFile(mRenderedTexture, D3DX10_IFF_PNG, L"test.png");
D3DX10FilterTexture(mRenderedTexture, 0, D3DX10_FILTER_MIRROR_U); line doesnt mirror my texture. Any suggestions ?
In your shader do 1-u to flip horizontally or 1-v to flip vertically.
Edit: If you aren't actually doing any rendering then there are far better ways to do image manipulation. However if you want to do it manually you will have to use map and flip the data round yourself.
You could do that as follows (The code is not tested so please excuse any compile errors):
D3D10Resource *backbufferRes;
renderTargetView->GetResource(&backbufferRes);
ID3D10Texture2D *mRenderedTexture;
// Create our texture
D3D10_TEXTURE2D_DESC texDesc;
texDesc.ArraySize = 1;
texDesc.BindFlags = 0;
texDesc.CPUAccessFlags = 0;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.Width = 640; // must be same as backbuffer
texDesc.Height = 480; // must be same as backbuffer
texDesc.MipLevels = 1;
texDesc.MiscFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D10_USAGE_DEFAULT;
d3d10Device->CreateTexture2D(&texDesc, 0, &mRenderedTexture);
d3d10Device->CopyResource(mRenderedTexture, backbufferRes);
D3D10_MAPPED_TEXTURE2D d3d10MT = { 0 };
mRenderedTexture->Map( 0, D3D10_MAP_READ_WRITE, 0, &d3d10MT );
unsigned int* pPix = (unsigned int)d3d10MT.pData;
int rows = 0;
int rowsMax = height;
while( rows < rowsMax )
{
unsigned int* pRowStart = pPix + (rows * width);
unsigned int* pRowEnd = pRowStart + width;
std::reverse( pRowStart, pRowEnd );
rows++;
}
mRenderedTexture->Unmap();
D3DX10SaveTextureToFile(mRenderedTexture, D3DX10_IFF_PNG, L"test.png");
From the doc btw:
D3DX10_FILTER_MIRROR_U Pixels off the edge of the texture on the
u-axis should be mirrored, not wrapped.
So that only counts for the pixels round the edge when you are filtering the image.
I have custom model file format that I am reading from to create a model in DX. I use
DWORD dwFVF = ( D3DFVF_XYZ | D3DFVF_NORMAL | D3DFVF_TEX1 );
D3DXCreateMeshFVF(numIndices/3, numVertices, D3DXMESH_MANAGED, dwFVF, *d3ddev, mesh);
to create the mesh, then lock, fill, unlock the index buffer, vertex buffer, and attribute buffer in turn.
void createMeshFromSkn(ifstream* fHandle, LPDIRECT3DDEVICE9 * d3ddev, LPD3DXMESH * mesh)
{
// Start reading the file
int magic = readInt(fHandle);
short version = readShort(fHandle);
short numObjects = readShort(fHandle);
SKNMaterial *materialHeaders;
if (version > 0)
{
// Read in the material headers
int numMaterialHeaders = readInt(fHandle);
fHandle->seekg((16 + MATERIAL_NAME_SIZE) * numMaterialHeaders, ios::cur);
// Read in model data.
int numIndices = readInt(fHandle);
int numVertices = readInt(fHandle);
// Create the mesh
DWORD dwFVF = ( D3DFVF_XYZ | D3DFVF_NORMAL | D3DFVF_TEX1 );
D3DXCreateMeshFVF(numIndices/3, numVertices, D3DXMESH_MANAGED, dwFVF, *d3ddev, mesh);
// Read in the index buffer
WORD* indexBuffer = 0;
(*mesh)->LockIndexBuffer(0, (void**)&indexBuffer);
for (int i = 0; i < numIndices; i++)
{
indexBuffer[i] = readShort(fHandle);
}
(*mesh)->UnlockIndexBuffer();
// Read in the vertexBuffer
D3DVERTEX* vertexBuffer;
(*mesh)->LockVertexBuffer( 0, (void**)&vertexBuffer);
for (int i = 0; i < numVertices; ++i)
{
((D3DVERTEX*)vertexBuffer)[i].position.x = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].position.y = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].position.z = readFloat(fHandle);
for (unsigned int j = 0; j < BONE_INDEX_SIZE; ++j)
{
int bone = (int) readByte(fHandle);
//data->vertices[i].boneIndex[j] = bone;
}
//////////////////////////////////////////////////////////////////////////
//
// Need to fix this to work with bones
//
//////////////////////////////////////////////////////////////////////////
D3DXVECTOR4 weight;
weight.x = readFloat(fHandle);
weight.y = readFloat(fHandle);
weight.z = readFloat(fHandle);
weight.w = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].normal.x = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].normal.y = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].normal.z = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].tu = readFloat(fHandle);
((D3DVERTEX*)vertexBuffer)[i].tv = readFloat(fHandle);
}
(*mesh)->UnlockVertexBuffer();
DWORD *pAttribBuf;
HRESULT hRslt = (*mesh)->LockAttributeBuffer(0, &pAttribBuf);
if(hRslt != D3D_OK)
return; // Add error handling
unsigned int numFaces = (*mesh)->GetNumFaces();
for(unsigned int i=0; i<numFaces; i++)
pAttribBuf[i]= 0;
hRslt = (*mesh)->UnlockAttributeBuffer();
if(hRslt != D3D_OK)
return; // Add error handling
DWORD *m_pAdjacencyBuffer;
m_pAdjacencyBuffer = new DWORD[3 * (*mesh)->GetNumFaces()];
(*mesh)->GenerateAdjacency(0.0f, m_pAdjacencyBuffer);
(*mesh)->OptimizeInplace(D3DXMESHOPT_ATTRSORT | D3DXMESHOPT_VERTEXCACHE, m_pAdjacencyBuffer, NULL, NULL, NULL);
}
return;
}
My problem is that the model is overlapping with itself:
http://imageshack.us/a/img210/2732/20121018181019896.png
I have CCW backface culling enabled:
d3ddev->SetRenderState(D3DRS_CULLMODE, D3DCULL_CCW);
I also have z-buffer enabled, but I'm pretty sure that's only between two meshes, not between a mesh and itself.
I've spent the last day and a half trying to Google for a solution, but I couldn't find anything. Any help or links to help would be greatly appreciated.
It turns out I hadn't actually turned on Z-buffering, because I needed to turn it on in the d3d presentation parameters:
d3dpp.EnableAutoDepthStencil = TRUE;
d3dpp.AutoDepthStencilFormat = D3DFMT_D16;
Once I did that and added a
d3ddev->Clear(0, NULL, D3DCLEAR_ZBUFFER, D3DCOLOR_XRGB(0, 0, 0), 1.0f, 0);
to the render loop, it renders correctly.
Wow, so glad I figured this out. I hope this helps others in their explorations of DX
I am trying to write my first DirectX 10 program that displays a triangle. Everything compiles fine, and the render function is called, since the background changes to black. However, the triangle I'm trying to draw with a triangle strip primitive is not displayed at all.
The Initialization function:
bool InitDirect3D(HWND hWnd, int width, int height)
{
//****** D3DDevice and SwapChain *****//
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(swapChainDesc));
swapChainDesc.BufferCount = 1;
swapChainDesc.BufferDesc.Width = width;
swapChainDesc.BufferDesc.Height = height;
swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDesc.BufferDesc.RefreshRate.Numerator = 60;
swapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.OutputWindow = hWnd;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.Windowed = TRUE;
if (FAILED(D3D10CreateDeviceAndSwapChain( NULL,
D3D10_DRIVER_TYPE_HARDWARE,
NULL,
0,
D3D10_SDK_VERSION,
&swapChainDesc,
&pSwapChain,
&pD3DDevice)))
return fatalError(TEXT("Hardware does not support DirectX 10!"));
//***** Shader *****//
if (FAILED(D3DX10CreateEffectFromFile( TEXT("basicEffect.fx"),
NULL, NULL,
"fx_4_0",
D3D10_SHADER_ENABLE_STRICTNESS,
0,
pD3DDevice,
NULL,
NULL,
&pBasicEffect,
NULL,
NULL)))
return fatalError(TEXT("Could not load effect file!"));
pBasicTechnique = pBasicEffect->GetTechniqueByName("Render");
pViewMatrixEffectVariable = pBasicEffect->GetVariableByName( "View" )->AsMatrix();
pProjectionMatrixEffectVariable = pBasicEffect->GetVariableByName( "Projection" )->AsMatrix();
pWorldMatrixEffectVariable = pBasicEffect->GetVariableByName( "World" )->AsMatrix();
//***** Input Assembly Stage *****//
D3D10_INPUT_ELEMENT_DESC layout[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D10_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D10_INPUT_PER_VERTEX_DATA, 0}
};
UINT numElements = 2;
D3D10_PASS_DESC PassDesc;
pBasicTechnique->GetPassByIndex(0)->GetDesc(&PassDesc);
if (FAILED( pD3DDevice->CreateInputLayout( layout,
numElements,
PassDesc.pIAInputSignature,
PassDesc.IAInputSignatureSize,
&pVertexLayout)))
return fatalError(TEXT("Could not create Input Layout."));
pD3DDevice->IASetInputLayout( pVertexLayout );
//***** Vertex buffer *****//
UINT numVertices = 100;
D3D10_BUFFER_DESC bd;
bd.Usage = D3D10_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(vertex) * numVertices;
bd.BindFlags = D3D10_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
bd.MiscFlags = 0;
if (FAILED(pD3DDevice->CreateBuffer(&bd, NULL, &pVertexBuffer)))
return fatalError(TEXT("Could not create vertex buffer!"));;
UINT stride = sizeof(vertex);
UINT offset = 0;
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
//***** Rasterizer *****//
// Set the viewport
viewPort.Width = width;
viewPort.Height = height;
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
viewPort.TopLeftX = 0;
viewPort.TopLeftY = 0;
pD3DDevice->RSSetViewports(1, &viewPort);
D3D10_RASTERIZER_DESC rasterizerState;
rasterizerState.CullMode = D3D10_CULL_NONE;
rasterizerState.FillMode = D3D10_FILL_SOLID;
rasterizerState.FrontCounterClockwise = true;
rasterizerState.DepthBias = false;
rasterizerState.DepthBiasClamp = 0;
rasterizerState.SlopeScaledDepthBias = 0;
rasterizerState.DepthClipEnable = true;
rasterizerState.ScissorEnable = false;
rasterizerState.MultisampleEnable = false;
rasterizerState.AntialiasedLineEnable = true;
ID3D10RasterizerState* pRS;
pD3DDevice->CreateRasterizerState(&rasterizerState, &pRS);
pD3DDevice->RSSetState(pRS);
//***** Output Merger *****//
// Get the back buffer from the swapchain
ID3D10Texture2D *pBackBuffer;
if (FAILED(pSwapChain->GetBuffer(0, __uuidof(ID3D10Texture2D), (LPVOID*)&pBackBuffer)))
return fatalError(TEXT("Could not get back buffer."));
// create the render target view
if (FAILED(pD3DDevice->CreateRenderTargetView(pBackBuffer, NULL, &pRenderTargetView)))
return fatalError(TEXT("Could not create the render target view."));
// release the back buffer
pBackBuffer->Release();
// set the render target
pD3DDevice->OMSetRenderTargets(1, &pRenderTargetView, NULL);
return true;
}
The render function:
void Render()
{
if (pD3DDevice != NULL)
{
pD3DDevice->ClearRenderTargetView(pRenderTargetView, D3DXCOLOR(0.0f, 0.0f, 0.0f, 0.0f));
//create world matrix
static float r;
D3DXMATRIX w;
D3DXMatrixIdentity(&w);
D3DXMatrixRotationY(&w, r);
r += 0.001f;
//set effect matrices
pWorldMatrixEffectVariable->SetMatrix(w);
pViewMatrixEffectVariable->SetMatrix(viewMatrix);
pProjectionMatrixEffectVariable->SetMatrix(projectionMatrix);
//fill vertex buffer with vertices
UINT numVertices = 3;
vertex* v = NULL;
//lock vertex buffer for CPU use
pVertexBuffer->Map(D3D10_MAP_WRITE_DISCARD, 0, (void**) &v );
v[0] = vertex( D3DXVECTOR3(-1,-1,0), D3DXVECTOR4(1,0,0,1) );
v[1] = vertex( D3DXVECTOR3(0,1,0), D3DXVECTOR4(0,1,0,1) );
v[2] = vertex( D3DXVECTOR3(1,-1,0), D3DXVECTOR4(0,0,1,1) );
pVertexBuffer->Unmap();
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
//get technique desc
D3D10_TECHNIQUE_DESC techDesc;
pBasicTechnique->GetDesc(&techDesc);
for(UINT p = 0; p < techDesc.Passes; ++p)
{
//apply technique
pBasicTechnique->GetPassByIndex(p)->Apply(0);
//draw
pD3DDevice->Draw(numVertices, 0);
}
pSwapChain->Present(0,0);
}
}
I'm not sure try to set:
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
after you unmap the the buffer. To get something like this:
pVertexBuffer->Unmap();
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
I suspect that locking blows avay buffer binding