As everybody knows, DX11 no longer support text writing to the screen. I wanted to do that by creating texture with DX11 and share it for DX10, so it could draw to it with ID3DX10Font interface. Then blend everything with DX11 shaders. I've got an error when trying to open Dx11 created texture with Dx10. Here is the code:
D3D11_TEXTURE2D_DESC desc;
ZeroMemory(&desc,sizeof(D3D11_TEXTURE2D_DESC));
desc.ArraySize = 1;
desc.BindFlags = D3D11_BIND_RENDER_TARGET;
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.Height = 480;
desc.Width = 640;
desc.MipLevels = 1;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
Globals::hr = D3D10CreateDevice(0,D3D10_DRIVER_TYPE_HARDWARE,0,0,D3D10_SDK_VERSION,&Globals::pD3D10Device);
Globals::hr = Globals::pD3D11Device->CreateTexture2D(&desc,0,&Globals::backBuffer10);
//keyed mutex for DX11 device
Globals::hr = Globals::backBuffer10->QueryInterface(__uuidof(IDXGIKeyedMutex),(void**)(&Globals::mutex11));
//Get the shared handle so that DX10 can render on texture
IDXGIResource *sharedResource10;
Globals::hr = Globals::backBuffer10->QueryInterface(__uuidof(IDXGIResource),(void**)(&sharedResource10));
Globals::hr = sharedResource10->GetSharedHandle(&Globals::sharedHandle);
sharedResource10->Release();
//open texture for DX10
IDXGISurface *sharedSurface10;
ID3D10Texture2D *sharedTexture10;
Globals::hr = Globals::pD3D10Device->OpenSharedResource(Globals::sharedHandle,__uuidof(IDXGISurface),(void**)(&sharedSurface10));
I've got an E_INVALIDARGS from last line of the code. Any thoughts? Both DX11 and DX10 devices are created with DRIVER_TYPE_HARDWARE flags, and both have R8G8B8A8_UNORM as back buffer format.
You don't need to cope with such unholy hacks because:
ID3DX10Font and ID3DX10Sprite are really slow and crappy
There are many libraries on the web:
DirectX Tool Kit's SpriteFont from Microsoft folks
FW1FontWrapper - superfast one
Tons for OpenGL (you can adapt them easily)
make your own: click, click. FreeType will make it
easy.
Related
I'm really new to graphics programming in general, so please bear with me. I am trying to add shadow mapping from a distant light (orthogonal projection) into my scene, but when I follow the (very incomplete) steps from Frank Luna's DX12 book I find that my SRV for the shadow map is just filled with depths of 1.
If it helps, here is my SRV definition:
D3D12_TEX2D_SRV texDesc = {
0,
-1,
0,
0.0f
};
D3D12_SHADER_RESOURCE_VIEW_DESC srvDesc = {
DXGI_FORMAT_R32_TYPELESS,
D3D12_SRV_DIMENSION_TEXTURE2D,
D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING,
};
srvDesc.Texture2D = texDesc;
m_device->CreateShaderResourceView(m_lightDepthTexture.Get(),&srvDesc, m_cbvHeap->GetCPUDescriptorHandleForHeapStart());
and here are my DSV heap and descriptor definitions:
D3D12_DESCRIPTOR_HEAP_DESC dsvHeapDesc = {};
dsvHeapDesc.NumDescriptors = 2;
dsvHeapDesc.Type = D3D12_DESCRIPTOR_HEAP_TYPE_DSV;
dsvHeapDesc.Flags = D3D12_DESCRIPTOR_HEAP_FLAG_NONE;
ThrowIfFailed(m_device->CreateDescriptorHeap(&dsvHeapDesc, IID_PPV_ARGS(&m_dsvHeap)));
D3D12_DEPTH_STENCIL_VIEW_DESC depthStencilDesc = {};
depthStencilDesc.Format = DXGI_FORMAT_D32_FLOAT;
depthStencilDesc.ViewDimension = D3D12_DSV_DIMENSION_TEXTURE2D;
depthStencilDesc.Flags = D3D12_DSV_FLAG_NONE;
CD3DX12_HEAP_PROPERTIES heapProps = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_DEFAULT);
CD3DX12_RESOURCE_DESC resourceDesc = CD3DX12_RESOURCE_DESC::Tex2D(DXGI_FORMAT_R32_TYPELESS, m_width, m_height, 1, 0, 1, 0, D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL);
D3D12_CLEAR_VALUE depthOptimizedClearValue = {};
depthOptimizedClearValue.Format = DXGI_FORMAT_D32_FLOAT;
depthOptimizedClearValue.DepthStencil.Depth = 1.0f;
depthOptimizedClearValue.DepthStencil.Stencil = 0;
ThrowIfFailed(m_device->CreateCommittedResource(
&heapProps,
D3D12_HEAP_FLAG_NONE,
&resourceDesc,
D3D12_RESOURCE_STATE_DEPTH_WRITE,
&depthOptimizedClearValue,
IID_PPV_ARGS(&m_dsvBuffer)
));
D3D12_RESOURCE_DESC texDesc;
ZeroMemory(&texDesc, sizeof(D3D12_RESOURCE_DESC));
texDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
texDesc.Alignment = 0;
texDesc.Width = m_width;
texDesc.Height = m_height;
texDesc.DepthOrArraySize = 1;
texDesc.MipLevels = 1;
texDesc.Format = DXGI_FORMAT_R32_TYPELESS;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Layout = D3D12_TEXTURE_LAYOUT_UNKNOWN;
texDesc.Flags = D3D12_RESOURCE_FLAG_ALLOW_DEPTH_STENCIL;
ThrowIfFailed(m_device->CreateCommittedResource(
&heapProps,
D3D12_HEAP_FLAG_NONE,
&texDesc,
D3D12_RESOURCE_STATE_GENERIC_READ,
&depthOptimizedClearValue,
IID_PPV_ARGS(&m_lightDepthTexture)
));
CD3DX12_CPU_DESCRIPTOR_HANDLE dsv(m_dsvHeap->GetCPUDescriptorHandleForHeapStart());
m_device->CreateDepthStencilView(m_dsvBuffer.Get(), &depthStencilDesc, dsv);
dsv.Offset(1, m_device->GetDescriptorHandleIncrementSize(D3D12_DESCRIPTOR_HEAP_TYPE_DSV));
m_device->CreateDepthStencilView(m_lightDepthTexture.Get(), &depthStencilDesc, dsv);
I then created a basic vertex shader that just transforms the vertices with my map (from Frank Luna's book, page 648,650). Since I bound the m_lightDepthTexture to D3D12GraphicsCommandList::OMSetRenderTargets, I assumed that the depth values would be written onto m_lightDepthTexture. But simply sampling this texture in my main pass proves that the values are actually 1.0f. So nothing actually happened on my shadow pass!
I really have no idea what to ask, but if anyone has a sample DX12 shadow map I could see (Google comes up with DX11 or less, or much too complicated samples), or if there's a good source to learn about this, please let me know!
EDIT: I should say that I changed the format from DXGI_FORMAT_D24_UNORM_S8_UINT, as I think the extra 8 bits for stencil is irrelevant to my case. I changed back to the book format and nothing changed, so I think this format should be fine.
If you remove the unecessary return ret; from your shadow vertex shader, the problem then seems to be in winding order of vertices of your sphere. You can easily verify this by setting cull mode to D3D12_CULL_MODE_NONE for your shadow PSO.
You can easily correct your sphere winding order by switching order of any two vertices of every triangle, so wherever you have p1,p2,p3 you just write it for example as p1,p3,p2.
You will also need to check your matrix multiplication order in your vertex shaders, I didn't checked it in detail but it's inconsistent and I believe the cause why the sphere will appear black when you fix the above issue. You also seem to be missing division by w for your light coords in lighting vertex shader.
I have the following texture description:
D3D11_TEXTURE2D_DESC texDesc = {};
texDesc.Width = 1920;
texDesc.Height = 953;
texDesc.MipLevels = 1;
texDesc.ArraySize = 1;
texDesc.Format = DXGI_FORMAT_NV12;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.CPUAccessFlags = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = (D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE);
texDesc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
And I want to create the texture using the description with ID3D11Device::CreateTexture2D:
HRESULT hr = _pDevice->CreateTexture2D(&texDesc, 0, _ppTexOutput);
With the description given, hr is always E_INVALIDARG.
But it all works if texDesc.Height is set to, for example, 954. Also for every value the texture is created successfully if texDesc.Format is set to DXGI_FORMAT_B8G8R8A8_UNORM.
Is it something about DXGI_FORMAT_NV12 format which doesn't support certain texture heights/widths? Should I just use heights that divide by 2? Or is there more complicated rule behind this?
Yes, that format requires that both width and height are even. See here for reference. It explicitly says that for format DXGI_FORMAT_NV12:
Width and height must be even.
If you had debug layer enabled as Simon Mourier said in the comments you would already know this. I strongly advise you to enable it since it makes debugging in DirectX a lot easier.
Im trying to make directx display a sprite from a sprite sheet I found on the internet but it doesn't seem to be displaying the texture in the correct position as im specifying it.
D3DXVECTOR2 scaling = D3DXVECTOR2(1.0,1.0);
D3DXVECTOR2 center = D3DXVECTOR2(64,64);
D3DXMatrixTransformation2D(&mat,NULL,0.0,&scaling,NULL,rot,&position);
sprite->SetTransform(&mat);
sprite->Draw(zombieTexture,&srcRect,NULL,NULL,0xFFFFFFFF);
srcRect is defined as follows:
srcRect.top = 384;
srcRect.bottom = 512;
srcRect.left = 512;
srcRect.right = 640;
which should give the part of the texture like this http://i.imgur.com/hqv5I.png
But instead im getting this http://i.imgur.com/UkOCQ.png (ignore the rotation)
What am I doing wrong?
I've got a question about a PixelShader I am trying to implement, and what I currently do (this is just for debugging, and trying to figure stuff out):
int3 loc;
loc.x = (int)(In.TextureUV.x * resolution_XY.x);
loc.y = (int)(In.TextureUV.x * resolution_XY.x);
loc.z = 0;
float4 r = g_txDiffuse.Load(loc);
return float4(r.x, r.y, r.z, 1);
The point is, this is always 0,0,0,1
The texture buffer is created:
D3D11_TEXTURE2D_DESC tDesc;
tDesc.Height = 480;
tDesc.Width = 640;
tDesc.Usage = D3D11_USAGE_DYNAMIC;
tDesc.MipLevels = 1;
tDesc.ArraySize = 1;
tDesc.SampleDesc.Count = 1;
tDesc.SampleDesc.Quality = 0;
tDesc.Format = DXGI_FORMAT_R8_UINT;
tDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
tDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tDesc.MiscFlags = 0;
V_RETURN(pd3dDevice->CreateTexture2D(&tDesc, NULL, &g_pCurrentImage));
I upload the texture (which should be a live display at the end) via:
D3D11_MAPPED_SUBRESOURCE resource;
pd3dImmediateContext->Map(g_pCurrentImage, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
memcpy( resource.pData, g_Images.GetData(), g_Images.GetDataSize() );
pd3dImmediateContext->Unmap( g_pCurrentImage, 0 );
I've checked the resource.pData, the data in there is a valid 8bit monochrome image. I made sure the data coming from the camera is 8bit monochrome 640x480.
There's a few things I don't fully understand:
if I run the Map / memcpy / Unmap routine in every frame, the driver will ultimately crash, the system will be unresponsive. Is there a different way to update a complete texture every frame which should be done?
the texture I uploaded is 8bit, why is the Texture2D.load() a float4 return? Do I have to use a different method to access the texture data? I tried to .sample it, but that didn't work either. Would I have to use a int buffer or something instead?
is there a way to debug the GPU memory, to check if the memcpy worked in the first place?
The Map, memcpy, Unmap really ought not to crash unless2 you are trying to copy too much data into the texture. It would be interesting to know what "GetDataSize()" returns. Does it equal 307,200? If its more than that then there lies your problem.
Texture2D returns a float4 because thats what you've asked for. If you write float r = g_txDiffuse.Load( ... ). The 8-bits get extended to a normalised float as part of the load process. Are you sure, btw, that your calculation of "loc" is correct because as you have it now loc.x and loc.y will always be the same.
You can debug whats going on with DirectX using PIX. Its a great tool and I highly recommend you familiarise yourself with it.
I just want to enable Antialiasing in DirectX9, but it doesn´t seem to do much, and the text drawn with ID3DXFont.DrawText(...) looks jagged too.
Here is the initialization-part
pDirect3D = Direct3DCreate9( D3D_SDK_VERSION);
memset(&presentParameters, 0, sizeof(_D3DPRESENT_PARAMETERS_));
presentParameters.BackBufferCount = 1;
presentParameters.BackBufferWidth = 800;
presentParameters.BackBufferHeight = 500;
presentParameters.MultiSampleType = D3DMULTISAMPLE_NONMASKABLE;
presentParameters.MultiSampleQuality = 2;
presentParameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
presentParameters.hDeviceWindow = hWnd;
presentParameters.Flags = 0;
presentParameters.FullScreen_RefreshRateInHz = D3DPRESENT_RATE_DEFAULT;
presentParameters.PresentationInterval = D3DPRESENT_INTERVAL_DEFAULT;
presentParameters.BackBufferFormat = D3DFMT_R5G6B5;
presentParameters.EnableAutoDepthStencil = TRUE;
presentParameters.AutoDepthStencilFormat = D3DFMT_D16;
presentParameters.Windowed = TRUE;
pDirect3D->CreateDevice(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, hWnd,D3DCREATE_SOFTWARE_VERTEXPROCESSING, &presentParameters, &pDevice);
pDevice->SetRenderState(D3DRS_CULLMODE, D3DCULL_NONE);
pDevice->SetRenderState(D3DRS_MULTISAMPLEANTIALIAS, TRUE);
Is there something I do wrong?
ShowWindow(hWnd, nCmdShow);
UpdateWindow(hWnd);
First, text isn't anti-aliased by mutli-sampling, secondly a MultiSampleQuality of 2 is barely noticeable. Try a 4 or 8 ensure that the result is achieved, try toggling and watch the jagged edges.
You should checkout the AntiAlias sample provided in the DirectX SDK for details about setting this up properly.
I am creating text with meshes (D3DXCreateTextW), and I notice a significant difference when MultiSampling, even at low quality levels. With any kind of MultiSampling, the text and other lines are smooth, whereas they are jagged without MultiSampling.
Use CheckDeviceMultiSampleType to confirm that your video card does accept the type and level that you are requesting.