Return error when trying to copy the render target's backbuffer - directx

I have one WDDM user mode display driver for DX9. Now I would like to dump the
render target's back buffer to a bmp file. Since the render target resource is
not lockable, I have to create a resource from system buffer and bitblt from the
render target to the system buffer and then save the system buffer to the bmp
file. However, calling the bitblt always return the error code E_FAIL. I also
tried to call the pfnCaptureToSysMem which also returned the same error code.
Anything wrong here?
D3DDDI_SURFACEINFO nfo;
nfo.Depth = 0;
nfo.Width = GetRenderSize().cx;
nfo.Height = GetRenderSize().cy;
nfo.pSysMem = NULL;
nfo.SysMemPitch = 0;
nfo.SysMemSlicePitch = 0;
D3DDDIARG_CREATERESOURCE resource;
resource.Format = D3DDDIFMT_A8R8G8B8;
resource.Pool = D3DDDIPOOL_SYSTEMMEM;
resource.MultisampleType = D3DDDIMULTISAMPLE_NONE;
resource.MultisampleQuality = 0;
resource.pSurfList = &nfo;
resource.SurfCount = 1;
resource.MipLevels = 1;
resource.Fvf = 0;
resource.VidPnSourceId = 0;
resource.RefreshRate.Numerator = 0;
resource.RefreshRate.Denominator = 0;
resource.hResource = NULL;
resource.Flags.Value = 0;
resource.Flags.Texture = 1;
resource.Flags.Dynamic = 1;
resource.Rotation = D3DDDI_ROTATION_IDENTITY;
HRESULT hr = m_pDevice->m_deviceFuncs.pfnCreateResource(m_pDevice->GetDrv(), &resource);
HANDLE hSysSpace = resource.hResource;
D3DDDIARG_BLT blt;
blt.hSrcResource = m_pDevice->m_hRenderTarget;
blt.hDstResource = hSysSpace;
blt.SrcRect.left = 0;
blt.SrcRect.top = 0;
blt.SrcRect.right = GetRenderSize().cx;
blt.SrcRect.bottom = GetRenderSize().cy;
blt.DstRect = blt.SrcRect;
blt.DstSubResourceIndex = 0;
blt.SrcSubResourceIndex = 0;
blt.Flags.Value = 0;
blt.ColorKey = 0;
hr = m_pDevice->m_deviceFuncs.pfnBlt(m_pDevice, &blt);

You are on the right track, but I think you can use the DirectX functions for this.
In order to copy the render target from video memory to system memory you should use the IDirect3DDevice9::GetRenderTargetData() function.
This function requires that the destination surface is an offscreen plain surface created with pool D3DPOOL_SYSTEMMEM. This surface also must have the same dimensions as the render target (no stretching allowed). Use IDirect3DDevice9::CreateOffscreenPlain() to create this surface.
Then this surface can be locked and the color data can be accessed by the CPU.

Related

CreateTexture2D returns black image

I am trying to make a desktop recorder, but all i get, is black screen, i have no clue why at all.
I tried with dx9, but same thing when i use backbuffer, front buffer method does work, and it can capture the frames correctly, but it's too slow (33ms per frame, and all because of GetFrontBuffer).
So i decided to try with dx11, there are no errors from return, no errors when creating swapchain and device, everything is fine, and in fact the frames are captured(i measure the time and fps, and something is going on), but they are all black, like it's not coming from the desktop, but from somewhere else.
This is the capture method
if(contains_errors()){return;}
m_swap_chain->GetBuffer(0, __uuidof(ID3D11Resource), (void**)&m_back_buffer_ptr);
return_if_null(m_back_buffer_ptr);
HRESULT hr = m_back_buffer_ptr->QueryInterface(__uuidof(ID3D11Resource), (void**)&m_back_buffer_data);
return_if_failed(hr);
hr = m_swap_chain->GetDevice(__uuidof(ID3D11Device), (void**)&m_device);
return_if_failed(hr);
hr = m_swap_chain->GetDesc(&m_desc);
return_if_failed(hr);
ID3D11Texture2D* texture = nullptr;
hr = m_device->CreateTexture2D(&m_tex_desc, 0, &texture);
return_if_failed(hr);
ID3D11DeviceContext* context = nullptr;
m_device->GetImmediateContext(&context);
return_if_null(context);
context->CopyResource(texture, m_back_buffer_data);
D3D11_MAPPED_SUBRESOURCE map_subres = {0, 0, 0};
hr = context->Map(texture, 0, D3D11_MAP_READ, 0, &map_subres);
return_if_failed(hr);
if(m_current_frame == 0)
{
m_current_frame = new BYTE[map_subres.DepthPitch];
}
memcpy(m_current_frame, map_subres.pData, map_subres.DepthPitch);
texture->Release();
m_device->Release();
This is the texture desc setup
ZeroMemory(&m_tex_desc, sizeof(m_tex_desc));
m_tex_desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
m_tex_desc.Width = m_desc.BufferDesc.Width;
m_tex_desc.Height = m_desc.BufferDesc.Height;
m_tex_desc.MipLevels = 1;
m_tex_desc.ArraySize = 1;
m_tex_desc.SampleDesc.Count = 1;
m_tex_desc.Usage = D3D11_USAGE_STAGING;
m_tex_desc.BindFlags = 0;
m_tex_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
m_tex_desc.MiscFlags = 0;
This is swapchain desc
m_desc.BufferDesc.Width = 1366;
m_desc.BufferDesc.Height = 768;
m_desc.BufferDesc.RefreshRate.Numerator = 1;
m_desc.BufferDesc.RefreshRate.Denominator = 60;
m_desc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
m_desc.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
m_desc.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
m_desc.SampleDesc.Count = 2;
m_desc.SampleDesc.Quality = 0;
m_desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
m_desc.BufferCount = 1;
m_desc.OutputWindow = (HWND)m_dx_win->winId();
m_desc.Windowed = true;
m_desc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
m_desc.Flags = 0;
Class members
private:
IDXGISwapChain* m_swap_chain = 0;
ID3D11DeviceContext* m_context = 0;
Dx_Output_Window* m_dx_win = 0;
IDXGIResource* m_back_buffer_ptr = 0;
ID3D11Resource* m_back_buffer_data = 0;
ID3D11Device* m_device = 0;
D3D_FEATURE_LEVEL m_selected_feature;
DXGI_SWAP_CHAIN_DESC m_desc;
D3D11_TEXTURE2D_DESC m_tex_desc = {};
I look up basically all the resources i could, but i could not find any info why it does work, but the image is all black. I was thinking maybe there is something up with the display, but no, i took the raw data, and display the value, and all the pixel or whatever it was is, was exactly 0, which is black color.
In the "m_desc.OutputWindow = (HWND)m_dx_win->winId();" i tried to also use GetDesktopWindow(), but it doesn't change anything, in fact i got some warnings instead.

error when creating 2d texture with dynamic format

i am pretty newbie on directx.
and i am stumbling with handling resource.
Okay First, i created texture that i can read/write in GPU, and it worked well.
And now, as you can check in my code, i wanted to read this texture in CPU as well(reading from application side), so i edited the usage from USAGE_DEFAULT to USAGE_DYNAMIC.
Microsoft::WRL::ComPtr<ID3D11Texture2D> outputTexture;
D3D11_TEXTURE2D_DESC outputTex_desc;
outputTex_desc.Format = DXGI_FORMAT_R32_FLOAT;
outputTex_desc.Width = 3;
outputTex_desc.Height = 3;
outputTex_desc.MipLevels = 1;
outputTex_desc.ArraySize = 1;
outputTex_desc.BindFlags = D3D11_BIND_UNORDERED_ACCESS |
D3D11_BIND_SHADER_RESOURCE;
outputTex_desc.SampleDesc.Count = msCount;
outputTex_desc.SampleDesc.Quality = msQuality;
outputTex_desc.Usage = D3D11_USAGE_DYNAMIC;
outputTex_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
outputTex_desc.MiscFlags = 0;
// CREATE 'TEXTURE'
device->CreateTexture2D( // FAIL HERE !!!
&outputTex_desc,
nullptr,
outputTexture.GetAddressOf());
// CREATE 'SRV'
...
// CREATE 'UAV'
...
and it starts failing exactly when it executes the 'device->CreateTexture2D()'
any advice would be amazing.
The first two steps in debugging any Direct3D 11 program is:
Make sure you are checking every HRESULT for either success (SUCCEEDED macro) or failure (FAILED macro). If it is safe to ignore the return value at runtime, then the function returns void. See ThrowIfFailed.
Enable the Direct3D debug device and look for debug output (a.k.a. use D3D11_CREATE_DEVICE_DEBUG in your Debug configuration).
If you enable the Direct3D debug device, you'll get detailed information on why the API returns failure codes in many cases.
If you did that, you'd see the error for this code:
D3D11 ERROR: ID3D11Device::CreateTexture2D: A D3D11_USAGE_DYNAMIC Resource
may only have the D3D11_CPU_ACCESS_WRITE CPUAccessFlags set.
[ STATE_CREATION ERROR #98: CREATETEXTURE2D_INVALIDCPUACCESSFLAGS]
In order to READ it on the CPU, you must copy to a D3D11_USAGE_STAGING Resource first. For example source code on doing that, see the ScreenGrab code from DirectX Tool Kit.
You don't mention what your msCount or msQuality values are here. I assumed 1, and 0 respectively. If you use any other values, you'll get:
D3D11 ERROR: ID3D11Device::CreateTexture2D: Multisampling is not supported
with the D3D11_BIND_UNORDERED_ACCESS BindFlag. SampleDesc.Count must be 1
and SampleDesc.Quality must be 0.
[ STATE_CREATION ERROR #99: CREATETEXTURE2D_INVALIDBINDFLAGS]
In order to access a texture from cpu (read mode), you need you create a separate staging texture, then copy your texture into it.
These are the flags for the gpu only texture (please note I force sample count to 1, as UAV access is not allowed for multisampled textures)
D3D11_TEXTURE2D_DESC gpuTexDesc;
gpuTexDesc.Format = DXGI_FORMAT_R32_FLOAT;
gpuTexDesc.Width = 3;
gpuTexDesc.Height = 3;
gpuTexDesc.MipLevels = 1;
gpuTexDesc.ArraySize = 1;
gpuTexDesc.BindFlags = D3D11_BIND_UNORDERED_ACCESS |
D3D11_BIND_SHADER_RESOURCE;
gpuTexDesc.SampleDesc.Count = 1;
gpuTexDesc.SampleDesc.Quality = 0;
gpuTexDesc.Usage = D3D11_USAGE_DEFAULT;
gpuTexDesc.CPUAccessFlags = D3D11_CPU_ACCESS_NONE;
gpuTexDesc.MiscFlags = 0;
Then you create a second texture for reading
D3D11_TEXTURE2D_DESC readTexDesc;
readTexDesc.Format = DXGI_FORMAT_R32_FLOAT;
readTexDesc.Width = 3;
readTexDesc.Height = 3;
readTexDesc.MipLevels = 1;
readTexDesc.ArraySize = 1;
readTexDesc.BindFlags = 0; //No bind flags allowed for staging
readTexDesc.SampleDesc.Count = 1;
readTexDesc.SampleDesc.Quality = 0;
readTexDesc.Usage = D3D11_USAGE_STAGING; //need staging flag for read
readTexDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
readTexDesc.MiscFlags = 0;
then you can use CopyResource:
deviceContext->CopyResource(readTex, gpuTex);
Once done, you can finally access texture data for reading using Map
D3D11_MAPPED_SUBRESOURCE MappedResource;
deviceContext->Map(readTex, 0, D3D11_MAP_READ, 0, &MappedResource);
MappedResource will give you access to data in cpu, once you done processing, don't forget to Unmap the resource.
deviceContext->Unmap(readTex, 0);

ParamValidationExt error with WelsInitEncoderExt failed while setting up OpenH264 encoder

Scenario:
I am using OpenH264 with my App to encode into a video_file.mp4.
Environment:
Platform : MacOs Sierra
Compiler : Clang++
The code:
Following is the crux of the code I have:
void EncodeVideoFile() {
ISVCEncoder * encoder_;
std:string video_file_name = "/Path/to/some/folder/video_file.mp4";
EncodeFileParam * pEncFileParam;
SEncParamExt * pEnxParamExt;
float frameRate = 1000;
EUsageType usageType = EUsageType::CAMERA_VIDEO_REAL_TIME;
bool denoise = false;
bool lossless = true;
bool enable_ltr = false;
int layers = 1;
bool cabac = false;
int sliceMode = 1;
pEncFileParam = new EncodeFileParam;
pEncFileParam->eUsageType = EUsageType::CAMERA_VIDEO_REAL_TIME;
pEncFileParam->pkcFileName = video_file_name.c_str();
pEncFileParam->iWidth = frame_width;
pEncFileParam->iHeight = frame_height;
pEncFileParam->fFrameRate = frameRate;
pEncFileParam->iLayerNum = layers;
pEncFileParam->bDenoise = denoise;
pEncFileParam->bLossless = lossless;
pEncFileParam->bEnableLtr = enable_ltr;
pEncFileParam->bCabac = cabac;
int rv = WelsCreateSVCEncoder (&encoder_);
pEnxParamExt = new SEncParamExt;
pEnxParamExt->iUsageType = pEncFileParam->eUsageType;
pEnxParamExt->iPicWidth = pEncFileParam->iWidth;
pEnxParamExt->iPicHeight = pEncFileParam->iHeight;
pEnxParamExt->fMaxFrameRate = pEncFileParam->fFrameRate;
pEnxParamExt->iSpatialLayerNum = pEncFileParam->iLayerNum;
pEnxParamExt->bEnableDenoise = pEncFileParam->bDenoise;
pEnxParamExt->bIsLosslessLink = pEncFileParam->bLossless;
pEnxParamExt->bEnableLongTermReference = pEncFileParam->bEnableLtr;
pEnxParamExt->iEntropyCodingModeFlag = pEncFileParam->bCabac ? 1 : 0;
for (int i = 0; i < pEnxParamExt->iSpatialLayerNum; i++) {
pEnxParamExt->sSpatialLayers[i].sSliceArgument.uiSliceMode = pEncFileParam->eSliceMode;
}
encoder_->InitializeExt(pEnxParamExt);
int videoFormat = videoFormatI420;
encoder_->SetOption (ENCODER_OPTION_DATAFORMAT, &videoFormat);
int frameSize = frame_width * frame_height * 3 / 2;
int total_num = 500;
BufferedData buf;
buf.SetLength (frameSize);
// check the buffer before proceeding
if (buf.Length() != (size_t)frameSize) {
CloseEncoder();
return;
}
SFrameBSInfo info;
memset (&info, 0, sizeof (SFrameBSInfo));
SSourcePicture pic;
memset (&pic, 0, sizeof (SSourcePicture));
pic.iPicWidth = frame_width;
pic.iPicHeight = frame_height;
pic.iColorFormat = videoFormatI420;
pic.iStride[0] = pic.iPicWidth;
pic.iStride[1] = pic.iStride[2] = pic.iPicWidth >> 1;
pic.pData[0] = buf.data();
pic.pData[1] = pic.pData[0] + frame_width * frame_height;
pic.pData[2] = pic.pData[1] + (frame_width * frame_height >> 2);
for(int num = 0; num < total_num; num++) {
// try to encode the frame
rv = encoder_->EncodeFrame (&pic, &info);
}
if (encoder_) {
encoder_->Uninitialize();
WelsDestroySVCEncoder (encoder_);
}
}
Above code is something I pulled up from official usage examples of OpenH264 where BufferedData.h is a class I reused from OpenH264 utils
Issue:
But, I am getting the following error:
[OpenH264] this = 0x0x1038bc8c0, Error:ParamValidationExt(), width > 0, height > 0, width * height <= 9437184, invalid 0 x 0 in dependency layer settings!
[OpenH264] this = 0x0x1038bc8c0, Error:WelsInitEncoderExt(), ParamValidationExt failed return 2.
[OpenH264] this = 0x0x1038bc8c0, Error:CWelsH264SVCEncoder::Initialize(), WelsInitEncoderExt failed.
Above does not crash the application but it goes through a blank run without creating the video_file.mp4 with the dummy data that I am trying to write into it.
Question:
There seems to be something wrong with the set up config I applying to pEnxParamExtwhich goes into encoder_->InitializeExt.
What am I doing wrong with the set up of the encoder?
Note:
I am not trying to hook up to any camera device. I am just trying to create a .mp4 video out of some dummy image data.
If you want to get complete and working OpenH264 Encoder Initialization procedure you can click... here.
According to your problem scenario, you are trying to create a video file(.mp4/.avi) from some dummy images. This task can be accomplished using two different libraries: i) Library for Codec, ii) Library for Container.
i) Library for Codec: It's so much easy to use a OpenH264 to compress data. One thing I must mention is that, OpenH264 always works with raw frames e.g. yuv420 data. So, if you want to compress your image data, you have to convert these image data into yuv420 color format. To get OpenH264 click... here
ii) Library for Container: After getting the encoded data you have to use another library to create the container with extension .mp4, .avi, .flv etc. There exists a lot of libraries in github to do that staff like FFmpeg, OpenCV, Bento4, MP4Maker, mp4parser etc. Before using these libraries please check in detail about the license issues. If you use FFmpeg, you will not need to use OpenH264 becuse FFmpeg itself works along with several codecs. You will also find lot more working examples as so many developers are working with video data out there.
Hope it helps. :)

LibSvm add features using the JAVA api

I have a text and I want to train by adding feature using the java API. Looking at the examples the main class to build the training set is the svm_problem. It appear like the svm_node represents a feature (the index is the feature and the value is the weight of the feature).
What I have done is to have a map (just to simplify the problem) that keeps an association between the feature and an index. For each of my weight> example I do create a new node :
svm_node currentNode = new svm_node();
int index = feature.getIndexInMap();
double value = feature.getWeight();
currentNode.index = index;
currentNode.value = value;
Is my intuition correct? What does the svm_problem.y refers to? Does it refer to the index of the label? Is the svm_problem.l just the length of the two vectors?
Your intuition is very close, but svm_node is a pattern not a feature. The variable svm_problem.y is an array that contains the labels of each pattern and svm_problem.l is the size of the training set.
Also, beware that svm_parameter.nr_weight is the weight of each label (useful if you have an unbalanced training set) but if you are not going to use it you must set that value to zero.
Let me show you a simple example in C++:
#include "svm.h"
#include <iostream>
using namespace std;
int main()
{
svm_parameter params;
params.svm_type = C_SVC;
params.kernel_type = RBF;
params.C = 1;
params.gamma = 1;
params.nr_weight = 0;
params.p= 0.0001;
svm_problem problem;
problem.l = 4;
problem.y = new double[4]{1,-1,-1,1};
problem.x = new svm_node*[4];
{
problem.x[0] = new svm_node[3];
problem.x[0][0].index = 1;
problem.x[0][0].value = 0;
problem.x[0][1].index = 2;
problem.x[0][1].value = 0;
problem.x[0][2].index = -1;
}
{
problem.x[1] = new svm_node[3];
problem.x[1][0].index = 1;
problem.x[1][0].value = 1;
problem.x[1][1].index = 2;
problem.x[1][1].value = 0;
problem.x[1][2].index = -1;
}
{
problem.x[2] = new svm_node[3];
problem.x[2][0].index = 1;
problem.x[2][0].value = 0;
problem.x[2][1].index = 2;
problem.x[2][1].value = 1;
problem.x[2][2].index = -1;
}
{
problem.x[3] = new svm_node[3];
problem.x[3][0].index = 1;
problem.x[3][0].value = 1;
problem.x[3][1].index = 2;
problem.x[3][1].value = 1;
problem.x[3][2].index = -1;
}
for(int i=0; i<4; i++)
{
cout << problem.y[i] << endl;
}
svm_model * model = svm_train(&problem, &params);
svm_save_model("mymodel.svm", model);
for(int i=0; i<4; i++)
{
double d = svm_predict(model, problem.x[i]);
cout << "Prediction " << d << endl;
}
/* We should free the memory at this point.
But this example is large enough already */
}

Creating a Texture2D array with CPU write access

I'm trying to create a Texture2D array with CPU write access. In detail, the code I'm using looks like this:
D3D10_TEXTURE2D_DESC texDesc;
texDesc.Width = 32;
texDesc.Height = 32;
texDesc.MipLevels = 1;
texDesc.ArraySize = 2;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D10_USAGE_DYNAMIC;
texDesc.CPUAccessFlags = CPU_ACCESS_WRITE;
texDesc.Format = DXGI_FORMAT_R32G32B32_FLOAT;
texDesc.BindFlags = D3D10_BIND_SHADER_RESOURCE;
texDesc.MiscFlags = 0;
ID3D10Texture2D* texture2D;
CHECK_SUCCESS(device->CreateTexture2D(&texDesc, NULL, &texture2D));
This, however, fails with E_INVALIDARG. Creating a 3D texture with the same dimensions (ie 32x32x2) and parameters works well, but is not desired in this case.
Does anyone know why this setup would not be valid?
You need to provide an array of D3D11_SUBRESOURCE_DATA of length texDesc.ArraySize as input for the second parameter of CreateTexture2D().
For more help see this example.

Resources