Why is there a video memory leak when using CopyResource to share texture across ID3D11Devices? - directx

I created two devices A and B, where A is the producer and B is the consumer. The texture is shared between the two through D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX. Now I find that every time B calls CopyResource to copy the shared texture to the local, the video memory will keep increasing. This The problem has troubled me for a long time. I searched github or other code sources, but couldn’t find a solution. My machine is Win10 and Geforce GTX 1060.
The most streamlined code to reproduce the problem is as follows:
#include <dxgi.h>
#include <d3d11.h>
#include <atlbase.h>
#pragma comment(lib, "D3D11.lib")
#define RETURN_ON_FAIL(hr) if(FAILED(hr)) return hr;
namespace SharedTextureLeakTest
{
const UINT32 texture_width = 320;
const UINT32 texture_height = 240;
ATL::CComPtr<ID3D11Device> device_1 = nullptr;
ATL::CComPtr<ID3D11DeviceContext> context_1 = nullptr;
ATL::CComPtr<ID3D11Device> device_2 = nullptr;
ATL::CComPtr<ID3D11DeviceContext> context_2 = nullptr;
ATL::CComPtr<ID3D11Texture2D> shared_texture_dev1 = nullptr; //owner by device1
ATL::CComPtr<ID3D11Texture2D> dst_texture_dev2 = nullptr; //owner by device2
HANDLE shared_handle = nullptr;
HRESULT CheckEnvironmentValid()
{
HRESULT hr = S_OK;
//Create Device1
if (!device_1)
{
D3D_FEATURE_LEVEL pFeatureLevel;
UINT Flags = D3D11_CREATE_DEVICE_DEBUG;
hr = ::D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, Flags, nullptr, 0, D3D11_SDK_VERSION, &device_1, &pFeatureLevel, &context_1);
if (FAILED(hr))
{
device_1 = nullptr;
return hr;
}
}
//Create Shared Texture
if (!shared_texture_dev1)
{
D3D11_TEXTURE2D_DESC desc;
desc.ArraySize = 1;
desc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = 0;
desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc.Width = texture_width;
desc.Height = texture_height;
desc.MipLevels = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED_KEYEDMUTEX;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
hr = device_1->CreateTexture2D(&desc, nullptr, &shared_texture_dev1);
RETURN_ON_FAIL(hr);
}
//Get Handle of Shared Texture
if (!shared_handle)
{
ATL::CComPtr<IDXGIResource> dxgi_res;
hr = shared_texture_dev1->QueryInterface(__uuidof(IDXGIResource), (void**)&dxgi_res);
RETURN_ON_FAIL(hr);
hr = dxgi_res->GetSharedHandle(&shared_handle);
RETURN_ON_FAIL(hr);
}
//Create Device2
if (!device_2)
{
D3D_FEATURE_LEVEL pFeatureLevel;
UINT Flags = D3D11_CREATE_DEVICE_DEBUG;
hr = ::D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, Flags, nullptr, 0, D3D11_SDK_VERSION, &device_2, &pFeatureLevel, &context_2);
if (FAILED(hr))
{
device_2= nullptr;
return hr;
}
}
//Create Dst Texture
if (!dst_texture_dev2)
{
D3D11_TEXTURE2D_DESC desc;
desc.ArraySize = 1;
desc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = 0;
desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
desc.Width = texture_width;
desc.Height = texture_height;
desc.MipLevels = 1;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.MiscFlags = 0;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
hr = device_2->CreateTexture2D(&desc, nullptr, &dst_texture_dev2);
RETURN_ON_FAIL(hr);
}
return hr;
}
HRESULT LoopOnceTest()
{
HRESULT hr = CheckEnvironmentValid();
RETURN_ON_FAIL(hr);
//Open Shared Texture
ATL::CComPtr<ID3D11Texture2D> shared_texture_dev2;
hr = device_2->OpenSharedResource(shared_handle, IID_PPV_ARGS(&shared_texture_dev2));
RETURN_ON_FAIL(hr);
//Copy
do
{
CComPtr<IDXGIKeyedMutex> km;
hr = shared_texture_dev2->QueryInterface(__uuidof(IDXGIKeyedMutex), (void**)&km);
RETURN_ON_FAIL(hr);
INT64 release_key = 0;
hr = km->AcquireSync(0, 10);
RETURN_ON_FAIL(hr);
if (hr == WAIT_OBJECT_0)
{
context_2->CopyResource(dst_texture_dev2, shared_texture_dev2);
// context_2->Flush();
}
hr = km->ReleaseSync(0);
RETURN_ON_FAIL(hr);
} while (FALSE);
return hr;
}
}
void DoTest()
{
for(;;)
{
SharedTextureLeakTest::LoopOnceTest();
Sleep(1000);
}
}

Related

Can't set TextureWic C++DirectX

I am trying to use a texture embedded in a file, it's not a tga.
Here is my code, I don't know where the logical error is.
ID3D11ShaderResourceView* texturePtr = nullptr;
ID3D11Texture2D* texture2D = nullptr;
ID3D11SamplerState* sampleStatePtr = nullptr;
hr = CoInitialize(NULL);
assert(SUCCEEDED(hr));
devConPtr->PSSetSamplers(0, 1, &sampleStatePtr);
devConPtr->PSSetShaderResources(0, 1, &texturePtr);
Texture2D tex : TEXTURE;
SamplerState mySampler : SAMPLER;
D3D11_SAMPLER_DESC sd;
ZeroMemory(&sd, sizeof(sd));
sd.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
sd.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
sd.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
sd.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sd.MipLODBias = 0.0f;
sd.MaxLOD = D3D11_FLOAT32_MAX;
sd.ComparisonFunc = D3D11_COMPARISON_NEVER;
hr = devPtr->CreateSamplerState(&sd, &sampleStatePtr);
DXGI_SAMPLE_DESC sample;
sample.Count = 1;
sample.Quality = 0;
D3D11_TEXTURE2D_DESC textureDesc;
textureDesc.Width = w;
textureDesc.Height = h;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
textureDesc.SampleDesc = sample;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
textureDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA subData;
subData.pSysMem = data;
subData.SysMemPitch = sizeof(*data)*w;
HRESULT hr = devPtr->CreateTexture2D(
&textureDesc,
&subData,
&texture2D
);
assert(SUCCEEDED(hr));
//(ID3D11Texture2D*)texture2D;
texturePtr->QueryInterface(IID_ID3D11Texture2D, (void**)&texture2D);
D3D11_SHADER_RESOURCE_VIEW_DESC shvD;
shvD.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
shvD.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
hr= devPtr->CreateShaderResourceView(texture2D, &shvD, &texturePtr);
assert(SUCCEEDED(hr));
hr = DirectX::CreateWICTextureFromMemory(devPtr, devConPtr, (const
uint8_t*)&data, sizeof(*data),
nullptr, &texturePtr, NULL);
assert(SUCCEEDED(hr));
unsigned int textureCount = mat->GetTextureCount(aiTextureType_UNKNOWN);
for (UINT j = 0; j < textureCount; j++)
{
aiString* path = nullptr;
mat->GetTexture(aiTextureType_UNKNOWN, j, path);
assert(path->length >= 2);
int index = atoi(&path->C_Str()[1]);
createTexture(scenePtr->mTextures[index]->mWidth, scenePtr-
>mTextures[index]->mHeight, (uint8_t*)scenePtr->mTextures[index]->pcData);
}
If you could find some kind of logical error or help with the debugging that would be super helpful, I try to put a breakpoint at my HRESULTS but I can't find the variables however it does say that my resourceviewptr is always nullptr despite me trying to use it.
I am using c++ and directx and directx toolkit etc.
You are not initializing shvD completely. To fix it, initialize it like this:
D3D11_SHADER_RESOURCE_VIEW_DESC shvD;
shvD.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
shvD.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shvD.Texture2D.MostDetailedMip = 0;
shvD.Texture2D.MipLevels = 1;

reading GPU resource data by CPU

i am learning directx11 these days. and i have been stuck in compute shader section.
so i made four resource and three corresponding view.
immutable input buffer = {1,1,1,1,1} / SRV
immutable input buffer = {2,2,2,2,2} / SRV
output buffer / UAV
staging buffer for reading / No View
and i succeeded to create all things, and dispatch cs function, and copy data from output buffer to staging buffer, and i read/check data.
// INPUT BUFFER1--------------------------------------------------
const int dataSize = 5;
D3D11_BUFFER_DESC vb_dest;
vb_dest.ByteWidth = sizeof(float) * dataSize;
vb_dest.StructureByteStride = sizeof(float);
vb_dest.BindFlags = D3D11_BIND_SHADER_RESOURCE;
vb_dest.Usage = D3D11_USAGE_IMMUTABLE;
vb_dest.CPUAccessFlags = 0;
vb_dest.MiscFlags = 0;
float v1_float[dataSize] = { 1,1,1,1,1 };
D3D11_SUBRESOURCE_DATA v1_data;
v1_data.pSysMem = static_cast<void*>(v1_float);
device->CreateBuffer(
&vb_dest,
&v1_data,
valueBuffer1.GetAddressOf());
D3D11_SHADER_RESOURCE_VIEW_DESC srv_desc;
srv_desc.Format = DXGI_FORMAT_R32_FLOAT;
srv_desc.ViewDimension = D3D11_SRV_DIMENSION_BUFFER;
srv_desc.Buffer.FirstElement = 0;
srv_desc.Buffer.NumElements = dataSize;
srv_desc.Buffer.ElementWidth = sizeof(float);
device->CreateShaderResourceView(
valueBuffer1.Get(),
&srv_desc,
inputSRV1.GetAddressOf());
// INPUT BUFFER2-----------------------------------------------------------
float v2_float[dataSize] = { 2,2,2,2,2 };
D3D11_SUBRESOURCE_DATA v2_data;
v2_data.pSysMem = static_cast<void*>(v2_float);
device->CreateBuffer(
&vb_dest,
&v2_data,
valueBuffer2.GetAddressOf());
device->CreateShaderResourceView(
valueBuffer2.Get(),
&srv_desc,
inputSRV2.GetAddressOf());
// OUTPUT BUFFER-----------------------------------------------------------
D3D11_BUFFER_DESC ov_desc;
ov_desc.ByteWidth = sizeof(float) * dataSize;
ov_desc.StructureByteStride = sizeof(float);
ov_desc.BindFlags = D3D11_BIND_UNORDERED_ACCESS;
ov_desc.Usage = D3D11_USAGE_DEFAULT;
ov_desc.CPUAccessFlags = 0;
ov_desc.MiscFlags = 0;
device->CreateBuffer(
&ov_desc,
nullptr,
outputResource.GetAddressOf());
D3D11_UNORDERED_ACCESS_VIEW_DESC outputUAV_desc;
outputUAV_desc.Format = DXGI_FORMAT_R32_FLOAT;
outputUAV_desc.ViewDimension = D3D11_UAV_DIMENSION_BUFFER;
outputUAV_desc.Buffer.FirstElement = 0;
outputUAV_desc.Buffer.NumElements = dataSize;
outputUAV_desc.Buffer.Flags = 0;
device->CreateUnorderedAccessView(
outputResource.Get(),
&outputUAV_desc,
outputUAV.GetAddressOf());
// BUFFER FOR COPY-----------------------------------------------------------
D3D11_BUFFER_DESC rb_desc;
rb_desc.ByteWidth = sizeof(float) * dataSize;
rb_desc.StructureByteStride = sizeof(float);
rb_desc.Usage = D3D11_USAGE_STAGING;
rb_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
rb_desc.BindFlags = 0;
rb_desc.MiscFlags = 0;
device->CreateBuffer(
&rb_desc,
nullptr,
readResource.GetAddressOf());
// DISPATCH and COPY and GET DATA
dContext->CSSetShaderResources(0, 1, inputSRV1.GetAddressOf());
dContext->CSSetShaderResources(1, 1, inputSRV2.GetAddressOf());
dContext->CSSetUnorderedAccessViews(0, 1, outputUAV.GetAddressOf(), nullptr);
dContext->CSSetShader(cs.Get(), nullptr, 0);
dContext->Dispatch(1, 1, 1);
dContext->CopyResource(readResource.Get(), outputResource.Get());
D3D11_MAPPED_SUBRESOURCE mappedResource2;
ZeroMemory(&mappedResource2, sizeof(D3D11_MAPPED_SUBRESOURCE));
R_CHECK(dContext->Map(readResource.Get(), 0, D3D11_MAP_READ, 0, &mappedResource2));
float* data = static_cast<float*>(mappedResource2.pData);
for (int i = 0; i < 5; ++i)
{
int a = data[i];
}
and this is compute shader code
StructuredBuffer<float> inputA : register(t0);
StructuredBuffer<float> inputB : register(t1);
RWStructuredBuffer<float> output : register(u0);
[numthreads(5, 1, 1)]
void main(int3 id : SV_DispatchThreadID)
{
output[id.x] = inputA[id.x] + inputB[id.x];
}
in CS, it's adding two input buffer data and store into output buffer.
so expected answer would be {3,3,3,3,3}.
but the result is {3,0,0,0,0} only first idx has proper answer.
any advice would be amazing.
dContext->CopyResource(readResource.Get(), outputResource.Get());
D3D11_MAPPED_SUBRESOURCE mappedResource2;
ZeroMemory(&mappedResource2, sizeof(D3D11_MAPPED_SUBRESOURCE));
R_CHECK(dContext->Map(readResource.Get(), 0, D3D11_MAP_READ, 0, &mappedResource2));
float* data = static_cast<float*>(mappedResource2.pData);
for (int i = 0; i < 5; ++i)
{
int a = data[i];
}
this code should be like this.
CopyResource();
Map();
Declare and allocate 'data'
zeromemory(data);
memcopy(data, resource's pointer);
unMap();
for some reason, i have to use the memcopy instead of just reading resource directly with the pointer that i get from mapping.

How to resemple pcm data in iOS

I want to use AudioConverterFillComplexBuffer to convert sample rate for a pcm buffer(32k to 44.1k)。But i didn't know why the voice seems changed(too many noise)。Here is the main code:
struct AudioFrame {
int samples; //number of samples in this frame. e.g. 320
int bytesPerSample; //number of bytes per sample: 2 for PCM16.
int channels; //number of channels (data are interleaved if stereo)
int samplesPerSec; //sampling rate
void* buffer; //data buffer
};
-(void)convertAudioFrame:(AudioFrame *)buffer outPutData:(unsigned char **)outPutData outPutDataSize:(UInt32 *)outPutDataSize{
if (buffer->bytesPerSample != self.unitDescription.mBitsPerChannel ||
buffer->channels != self.unitDescription.mChannelsPerFrame ||
buffer->samplesPerSec != self.unitDescription.mSampleRate){
// describe the input format's description
AudioStreamBasicDescription inputDescription = {0};
inputDescription.mFormatID = kAudioFormatLinearPCM;
inputDescription.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger;
inputDescription.mChannelsPerFrame = buffer->channels;
inputDescription.mSampleRate = buffer->samplesPerSec;
inputDescription.mBitsPerChannel = 16;
inputDescription.mBytesPerFrame = (inputDescription.mBitsPerChannel/8) * inputDescription.mChannelsPerFrame;
inputDescription.mFramesPerPacket = 1;
inputDescription.mBytesPerPacket = inputDescription.mBytesPerFrame;
AudioStreamBasicDescription outputDescription = {0};
outputDescription.mSampleRate = 44100;
outputDescription.mFormatID = kAudioFormatLinearPCM;
outputDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
outputDescription.mChannelsPerFrame = 1;
outputDescription.mFramesPerPacket = 1;
outputDescription.mBitsPerChannel = 16;
outputDescription.mBytesPerFrame = (outputDescription.mBitsPerChannel/8) * outputDescription.mChannelsPerFrame;
outputDescription.mBytesPerPacket = outputDescription.mBytesPerFrame;
// create an audio converter
AudioConverterRef audioConverter;
OSStatus status = AudioConverterNew(&inputDescription, &outputDescription, &audioConverter);
[self checkError:status errorMsg:#"AudioConverterNew error"];
if(!audioConverter)
{
*outPutDataSize = 0;
return;
}
UInt32 outputBytes = outputDescription.mBytesPerPacket * (buffer->samples*buffer->bytesPerSample / inputDescription.mBytesPerPacket);
unsigned char *outputBuffer = (unsigned char*)malloc(outputBytes);
memset(outputBuffer, 0, outputBytes);
AudioBuffer inputBuffer;
inputBuffer.mNumberChannels = inputDescription.mChannelsPerFrame;
inputBuffer.mDataByteSize = buffer->samples*buffer->bytesPerSample;
inputBuffer.mData = buffer->buffer;
AudioBufferList outputBufferList;
outputBufferList.mNumberBuffers = 1;
outputBufferList.mBuffers[0].mNumberChannels = outputDescription.mChannelsPerFrame;
outputBufferList.mBuffers[0].mDataByteSize = outputBytes;
outputBufferList.mBuffers[0].mData = outputBuffer;
UInt32 outputDataPacketSize = outputBytes / outputDescription.mBytesPerPacket;
self.currentBuffer = &inputBuffer;
self.currentInputDescription = inputDescription;
// convert
OSStatus result = AudioConverterFillComplexBuffer(audioConverter,
converterComplexInputDataProc,
(__bridge void*)self,
&outputDataPacketSize,
&outputBufferList,
NULL);
[self checkError:result errorMsg:#"AudioConverterConvertBuffer error"];
*outPutData = outputBuffer;
*outPutDataSize = outputBytes;
AudioConverterDispose(audioConverter);
}
}
//convert callback
OSStatus converterComplexInputDataProc(AudioConverterRef inAudioConverter,
UInt32* ioNumberDataPackets, AudioBufferList* ioData, AudioStreamPacketDescription** ioDataPacketDescription, void* inUserData){
XMMicAudioManager *self = (__bridge XMMicAudioManager *)inUserData;
ioData->mNumberBuffers = 1;
ioData->mBuffers[0] = *(self.currentBuffer);
*ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize / self.currentInputDescription.mBytesPerPacket;
return 0;
}

AudioConverterFillComplexBuffer returns 1852797029 (kAudioCodecIllegalOperationError)

I'm trying to decode aac data with AudioToolbox in iOS environment. I consulted this thread.
'AudioConverterNew' function call succeed but AudioConverterFillComplexBuffer returns error code 1852797029, kAudioCodecIllegalOperationError.
I'm trying to find my mistakes. Thank you for reading.
- (void)initAudioToolBox {
HCAudioAsset* asset = [self.provider getAudioAsset];
AudioStreamBasicDescription outFormat;
memset(&outFormat, 0, sizeof(outFormat));
outFormat.mSampleRate = 44100;
outFormat.mFormatID = kAudioFormatLinearPCM;
outFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
outFormat.mBytesPerPacket = 2;
outFormat.mFramesPerPacket = 1;
outFormat.mBytesPerFrame = 2;
outFormat.mChannelsPerFrame = 1;
outFormat.mBitsPerChannel = 16;
outFormat.mReserved = 0;
AudioStreamBasicDescription inFormat;
memset(&inFormat, 0, sizeof(inFormat));
inFormat.mSampleRate = [asset sampleRate];
inFormat.mFormatID = kAudioFormatMPEG4AAC;
inFormat.mFormatFlags = kMPEG4Object_AAC_LC;
inFormat.mBytesPerPacket = 0;
inFormat.mFramesPerPacket = (UInt32)[asset framePerPacket];
inFormat.mBytesPerFrame = 0;
inFormat.mChannelsPerFrame = (UInt32)[asset channelCount];
inFormat.mBitsPerChannel = 0;
inFormat.mReserved = 0;
OSStatus status = AudioConverterNew(&inFormat, &outFormat, &audioConverter);
if (status != noErr) {
NSLog(#"setup converter error, status: %i\n", (int)status);
} else {
NSLog(#"Audio Converter is initialized successfully.");
}
}
typedef struct _PassthroughUserData PassthroughUserData;
struct _PassthroughUserData {
UInt32 mChannels;
UInt32 mDataSize;
const void* mData;
AudioStreamPacketDescription mPacket;
};
int inInputDataProc(AudioConverterRef aAudioConverter,
UInt32* aNumDataPackets,
AudioBufferList* aData,
AudioStreamPacketDescription** aPacketDesc,
void* aUserData)
{
PassthroughUserData* userData = (PassthroughUserData*)aUserData;
if (!userData->mDataSize) {
*aNumDataPackets = 0;
NSLog(#"inInputDataProc returns -1");
return -1;
}
if (aPacketDesc) {
userData->mPacket.mStartOffset = 0;
userData->mPacket.mVariableFramesInPacket = 0;
userData->mPacket.mDataByteSize = userData->mDataSize;
NSLog(#"mDataSize:%d", userData->mDataSize);
*aPacketDesc = &userData->mPacket;
}
aData->mBuffers[0].mNumberChannels = userData->mChannels;
aData->mBuffers[0].mDataByteSize = userData->mDataSize;
aData->mBuffers[0].mData = (void*)(userData->mData);
NSLog(#"buffer[0] - channel:%d, byte size:%u, data:%p",
aData->mBuffers[0].mNumberChannels,
(unsigned int)aData->mBuffers[0].mDataByteSize,
aData->mBuffers[0].mData);
// No more data to provide following this run.
userData->mDataSize = 0;
NSLog(#"inInputDataProc returns 0");
return 0;
}
- (void)decodeAudioFrame:(NSData *)frame withPts:(NSInteger)pts{
if(!audioConverter){
[self initAudioToolBox];
}
HCAudioAsset* asset = [self.provider getAudioAsset];
PassthroughUserData userData = { (UInt32)[asset channelCount], (UInt32)frame.length, [frame bytes]};
NSMutableData *decodedData = [NSMutableData new];
const uint32_t MAX_AUDIO_FRAMES = 128;
const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * 1;
do {
uint8_t *buffer = (uint8_t *)malloc(maxDecodedSamples * sizeof(short int));
AudioBufferList decBuffer;
memset(&decBuffer, 0, sizeof(AudioBufferList));
decBuffer.mNumberBuffers = 1;
decBuffer.mBuffers[0].mNumberChannels = 2;
decBuffer.mBuffers[0].mDataByteSize = maxDecodedSamples * sizeof(short int);
decBuffer.mBuffers[0].mData = buffer;
UInt32 numFrames = MAX_AUDIO_FRAMES;
AudioStreamPacketDescription outPacketDescription;
memset(&outPacketDescription, 0, sizeof(AudioStreamPacketDescription));
outPacketDescription.mDataByteSize = MAX_AUDIO_FRAMES;
outPacketDescription.mStartOffset = 0;
outPacketDescription.mVariableFramesInPacket = 0;
NSLog(#"frame - size:%lu, buffer:%p", [frame length], [frame bytes]);
OSStatus rv = AudioConverterFillComplexBuffer(audioConverter,
inInputDataProc,
&userData,
&numFrames,
&decBuffer,
&outPacketDescription);
NSLog(#"num frames:%d, dec buffer [0] channels:%d, dec buffer [0] data byte size:%d, rv:%d",
numFrames, decBuffer.mBuffers[0].mNumberChannels,
decBuffer.mBuffers[0].mDataByteSize, (int)rv);
if (rv && rv != noErr) {
NSLog(#"Error decoding audio stream: %d\n", rv);
break;
}
if (numFrames) {
[decodedData appendBytes:decBuffer.mBuffers[0].mData length:decBuffer.mBuffers[0].mDataByteSize];
}
} while (true);
//void *pData = (void *)[decodedData bytes];
//audioRenderer->Render(&pData, decodedData.length, pts);
}

DirectX 10 Primitive is not displayed

I am trying to write my first DirectX 10 program that displays a triangle. Everything compiles fine, and the render function is called, since the background changes to black. However, the triangle I'm trying to draw with a triangle strip primitive is not displayed at all.
The Initialization function:
bool InitDirect3D(HWND hWnd, int width, int height)
{
//****** D3DDevice and SwapChain *****//
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(swapChainDesc));
swapChainDesc.BufferCount = 1;
swapChainDesc.BufferDesc.Width = width;
swapChainDesc.BufferDesc.Height = height;
swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDesc.BufferDesc.RefreshRate.Numerator = 60;
swapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.OutputWindow = hWnd;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.Windowed = TRUE;
if (FAILED(D3D10CreateDeviceAndSwapChain( NULL,
D3D10_DRIVER_TYPE_HARDWARE,
NULL,
0,
D3D10_SDK_VERSION,
&swapChainDesc,
&pSwapChain,
&pD3DDevice)))
return fatalError(TEXT("Hardware does not support DirectX 10!"));
//***** Shader *****//
if (FAILED(D3DX10CreateEffectFromFile( TEXT("basicEffect.fx"),
NULL, NULL,
"fx_4_0",
D3D10_SHADER_ENABLE_STRICTNESS,
0,
pD3DDevice,
NULL,
NULL,
&pBasicEffect,
NULL,
NULL)))
return fatalError(TEXT("Could not load effect file!"));
pBasicTechnique = pBasicEffect->GetTechniqueByName("Render");
pViewMatrixEffectVariable = pBasicEffect->GetVariableByName( "View" )->AsMatrix();
pProjectionMatrixEffectVariable = pBasicEffect->GetVariableByName( "Projection" )->AsMatrix();
pWorldMatrixEffectVariable = pBasicEffect->GetVariableByName( "World" )->AsMatrix();
//***** Input Assembly Stage *****//
D3D10_INPUT_ELEMENT_DESC layout[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D10_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D10_INPUT_PER_VERTEX_DATA, 0}
};
UINT numElements = 2;
D3D10_PASS_DESC PassDesc;
pBasicTechnique->GetPassByIndex(0)->GetDesc(&PassDesc);
if (FAILED( pD3DDevice->CreateInputLayout( layout,
numElements,
PassDesc.pIAInputSignature,
PassDesc.IAInputSignatureSize,
&pVertexLayout)))
return fatalError(TEXT("Could not create Input Layout."));
pD3DDevice->IASetInputLayout( pVertexLayout );
//***** Vertex buffer *****//
UINT numVertices = 100;
D3D10_BUFFER_DESC bd;
bd.Usage = D3D10_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(vertex) * numVertices;
bd.BindFlags = D3D10_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
bd.MiscFlags = 0;
if (FAILED(pD3DDevice->CreateBuffer(&bd, NULL, &pVertexBuffer)))
return fatalError(TEXT("Could not create vertex buffer!"));;
UINT stride = sizeof(vertex);
UINT offset = 0;
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
//***** Rasterizer *****//
// Set the viewport
viewPort.Width = width;
viewPort.Height = height;
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
viewPort.TopLeftX = 0;
viewPort.TopLeftY = 0;
pD3DDevice->RSSetViewports(1, &viewPort);
D3D10_RASTERIZER_DESC rasterizerState;
rasterizerState.CullMode = D3D10_CULL_NONE;
rasterizerState.FillMode = D3D10_FILL_SOLID;
rasterizerState.FrontCounterClockwise = true;
rasterizerState.DepthBias = false;
rasterizerState.DepthBiasClamp = 0;
rasterizerState.SlopeScaledDepthBias = 0;
rasterizerState.DepthClipEnable = true;
rasterizerState.ScissorEnable = false;
rasterizerState.MultisampleEnable = false;
rasterizerState.AntialiasedLineEnable = true;
ID3D10RasterizerState* pRS;
pD3DDevice->CreateRasterizerState(&rasterizerState, &pRS);
pD3DDevice->RSSetState(pRS);
//***** Output Merger *****//
// Get the back buffer from the swapchain
ID3D10Texture2D *pBackBuffer;
if (FAILED(pSwapChain->GetBuffer(0, __uuidof(ID3D10Texture2D), (LPVOID*)&pBackBuffer)))
return fatalError(TEXT("Could not get back buffer."));
// create the render target view
if (FAILED(pD3DDevice->CreateRenderTargetView(pBackBuffer, NULL, &pRenderTargetView)))
return fatalError(TEXT("Could not create the render target view."));
// release the back buffer
pBackBuffer->Release();
// set the render target
pD3DDevice->OMSetRenderTargets(1, &pRenderTargetView, NULL);
return true;
}
The render function:
void Render()
{
if (pD3DDevice != NULL)
{
pD3DDevice->ClearRenderTargetView(pRenderTargetView, D3DXCOLOR(0.0f, 0.0f, 0.0f, 0.0f));
//create world matrix
static float r;
D3DXMATRIX w;
D3DXMatrixIdentity(&w);
D3DXMatrixRotationY(&w, r);
r += 0.001f;
//set effect matrices
pWorldMatrixEffectVariable->SetMatrix(w);
pViewMatrixEffectVariable->SetMatrix(viewMatrix);
pProjectionMatrixEffectVariable->SetMatrix(projectionMatrix);
//fill vertex buffer with vertices
UINT numVertices = 3;
vertex* v = NULL;
//lock vertex buffer for CPU use
pVertexBuffer->Map(D3D10_MAP_WRITE_DISCARD, 0, (void**) &v );
v[0] = vertex( D3DXVECTOR3(-1,-1,0), D3DXVECTOR4(1,0,0,1) );
v[1] = vertex( D3DXVECTOR3(0,1,0), D3DXVECTOR4(0,1,0,1) );
v[2] = vertex( D3DXVECTOR3(1,-1,0), D3DXVECTOR4(0,0,1,1) );
pVertexBuffer->Unmap();
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
//get technique desc
D3D10_TECHNIQUE_DESC techDesc;
pBasicTechnique->GetDesc(&techDesc);
for(UINT p = 0; p < techDesc.Passes; ++p)
{
//apply technique
pBasicTechnique->GetPassByIndex(p)->Apply(0);
//draw
pD3DDevice->Draw(numVertices, 0);
}
pSwapChain->Present(0,0);
}
}
I'm not sure try to set:
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
after you unmap the the buffer. To get something like this:
pVertexBuffer->Unmap();
pD3DDevice->IASetVertexBuffers( 0, 1, &pVertexBuffer, &stride, &offset );
// Set primitive topology
pD3DDevice->IASetPrimitiveTopology( D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP );
I suspect that locking blows avay buffer binding

Resources