Portaudio MME device behaviour issue - portaudio

I am using the multiple-output-device feature provided by paMME host API to output audio through multiple stereo devices. I also need to use a single multichannel input device using MME.
- When I configure just the output device and play internally generated audio, there is no problem.
- However problem starts to occur when I configure both the input device and the mulitple-stereo output devices. The application crashes when I try to use more than two channels on the output. That is, if I try to increment the 'out' pointer for more than 2*frames_per_buffer , it crashes, which indicates that buffer has been allocated only to two output channels.
Can anybody throw some light on what could be the problem. The configuration code is given below:
outputParameters.device = paUseHostApiSpecificDeviceSpecification;
outputParameters.channelCount = 8;
outputParameters.sampleFormat = paInt16;
outputParameters.hostApiSpecificStreamInfo = NULL;
wmmeStreamInfo.size = sizeof(PaWinMmeStreamInfo);
wmmeStreamInfo.hostApiType = paMME;
wmmeStreamInfo.version = 1;
wmmeStreamInfo.flags = paWinMmeUseMultipleDevices;
wmmeDeviceAndNumChannels[0].device = selectedDeviceIndex[0];
wmmeDeviceAndNumChannels[0].channelCount = 2;
wmmeDeviceAndNumChannels[1].device = selectedDeviceIndex[1];
wmmeDeviceAndNumChannels[1].channelCount = 2;
wmmeDeviceAndNumChannels[2].device = selectedDeviceIndex[2];
wmmeDeviceAndNumChannels[2].channelCount = 2;
wmmeDeviceAndNumChannels[3].device = selectedDeviceIndex[3];
wmmeDeviceAndNumChannels[3].channelCount = 2;
wmmeStreamInfo.devices = wmmeDeviceAndNumChannels;
wmmeStreamInfo.deviceCount = 4;
outputParameters.suggestedLatency = Pa_GetDeviceInfo( selectedDeviceIndex[0] )->defaultLowOutputLatency;
outputParameters.hostApiSpecificStreamInfo = &wmmeStreamInfo;
inputParameters.device = selectedInputDeviceIndex; /* default output device */
inputParameters.channelCount = 8; /* stereo output */
inputParameters.sampleFormat = paInt16; /* 32 bit floating point output */
inputParameters.suggestedLatency = Pa_GetDeviceInfo( inputParameters.device )->defaultLowInputLatency;
inputParameters.hostApiSpecificStreamInfo = NULL;
Thanks and regards,
Siddharth Kumar.

Related

Problem reading data from multiple ADC channels with stm32f4-discovery board using DMA

I am trying to read data from channel 0,1,2 and 3 of ADC1. The issue is that when I read from channel 1 and channel 2 and run the code in debug mode it shows the correct valueenter image description heres but when I modified it for doing the same for channel 3 and 4 also,it does not show any value.Following is the code and the screen shot of memory map:
#include "stm32f4xx.h"
#include "stm32f4_discovery.h"
uint16_t ADC1ConvertedValue[4] = {0,0,0,0};//Stores converted vals [2] = {0,0}
void DMA_config()
{
RCC_AHB1PeriphClockCmd(RCC_AHB1Periph_DMA2 ,ENABLE);
DMA_InitTypeDef DMA_InitStruct;
DMA_InitStruct.DMA_Channel = DMA_Channel_0;
DMA_InitStruct.DMA_PeripheralBaseAddr = (uint32_t) 0x4001204C;//ADC1's data register
DMA_InitStruct.DMA_Memory0BaseAddr = (uint32_t)&ADC1ConvertedValue;
DMA_InitStruct.DMA_DIR = DMA_DIR_PeripheralToMemory;
DMA_InitStruct.DMA_BufferSize = 4;//2
DMA_InitStruct.DMA_PeripheralInc = DMA_PeripheralInc_Disable;
DMA_InitStruct.DMA_MemoryInc = DMA_MemoryInc_Enable;
DMA_InitStruct.DMA_PeripheralDataSize = DMA_PeripheralDataSize_HalfWord;//Reads 16 bit values _HalfWord
DMA_InitStruct.DMA_MemoryDataSize = DMA_MemoryDataSize_HalfWord;//Stores 16 bit values _Halfword
DMA_InitStruct.DMA_Mode = DMA_Mode_Circular;
DMA_InitStruct.DMA_Priority = DMA_Priority_High;
DMA_InitStruct.DMA_FIFOMode = DMA_FIFOMode_Enable;
DMA_InitStruct.DMA_FIFOThreshold = DMA_FIFOThreshold_HalfFull;//_HalfFull
DMA_InitStruct.DMA_MemoryBurst = DMA_MemoryBurst_Single;
DMA_InitStruct.DMA_PeripheralBurst = DMA_PeripheralBurst_Single;
DMA_Init(DMA2_Stream0, &DMA_InitStruct);
DMA_Cmd(DMA2_Stream0, ENABLE);
}
void ADC_config()
{
/* Configure GPIO pins ******************************************************/
ADC_InitTypeDef ADC_InitStruct;
ADC_CommonInitTypeDef ADC_CommonInitStruct;
GPIO_InitTypeDef GPIO_InitStruct;
RCC_AHB1PeriphClockCmd( RCC_AHB1Periph_GPIOA, ENABLE);
RCC_APB2PeriphClockCmd(RCC_APB2Periph_ADC1, ENABLE);//ADC1 is connected to the APB2 peripheral bus
GPIO_InitStruct.GPIO_Pin = GPIO_Pin_0 | GPIO_Pin_1 | GPIO_Pin_2 | GPIO_Pin_3;// PA0,PA1,PA3,PA3
GPIO_InitStruct.GPIO_Mode = GPIO_Mode_AN;//The pins are configured in analog mode
GPIO_InitStruct.GPIO_PuPd = GPIO_PuPd_NOPULL ;//We don't need any pull up or pull down
GPIO_Init(GPIOA, &GPIO_InitStruct);//Initialize GPIOA pins with the configuration
/* ADC Common Init **********************************************************/
ADC_CommonInitStruct.ADC_Mode = ADC_Mode_Independent;
ADC_CommonInitStruct.ADC_Prescaler = ADC_Prescaler_Div2;
ADC_CommonInitStruct.ADC_DMAAccessMode = ADC_DMAAccessMode_Disabled;
ADC_CommonInitStruct.ADC_TwoSamplingDelay = ADC_TwoSamplingDelay_5Cycles;
ADC_CommonInit(&ADC_CommonInitStruct);
/* ADC1 Init ****************************************************************/
ADC_InitStruct.ADC_Resolution = ADC_Resolution_12b;//Input voltage is converted into a 12bit int (max 4095)
ADC_InitStruct.ADC_ScanConvMode = ENABLE;//The scan is configured in multiple channels
ADC_InitStruct.ADC_ContinuousConvMode = ENABLE;//Continuous conversion: input signal is sampled more than once
ADC_InitStruct.ADC_ExternalTrigConv = DISABLE;
ADC_InitStruct.ADC_ExternalTrigConvEdge = ADC_ExternalTrigConvEdge_None;
ADC_InitStruct.ADC_DataAlign = ADC_DataAlign_Right;//Data converted will be shifted to right
ADC_InitStruct.ADC_NbrOfConversion = 4;
ADC_Init(ADC1, &ADC_InitStruct);//Initialize ADC with the configuration
/* Select the channels to be read from **************************************/
ADC_RegularChannelConfig(ADC1, ADC_Channel_0, 1, ADC_SampleTime_144Cycles);//PA0
ADC_RegularChannelConfig(ADC1, ADC_Channel_1, 2, ADC_SampleTime_144Cycles);//PA1
ADC_RegularChannelConfig(ADC1, ADC_Channel_2, 3, ADC_SampleTime_144Cycles);//PA2
ADC_RegularChannelConfig(ADC1, ADC_Channel_3, 4, ADC_SampleTime_144Cycles);//PA3
/* Enable DMA request after last transfer (Single-ADC mode) */
ADC_DMARequestAfterLastTransferCmd(ADC1, ENABLE);
/* Enable ADC1 DMA */
ADC_DMACmd(ADC1, ENABLE);
/* Enable ADC1 */
ADC_Cmd(ADC1, ENABLE);
}
int main(void)
{
DMA_config();
ADC_config();
while(1)
{
ADC_SoftwareStartConv(ADC1);
//value=ADC_Read();
}
return 0;
}
You shouldn't start the ADC conversion multiple times (in your while(1) loop).
Since you've configured the ADC with .ADC_ContinuousConvMode = ENABLE it should be enough to start it before the loop and do nothing within it.

error when creating 2d texture with dynamic format

i am pretty newbie on directx.
and i am stumbling with handling resource.
Okay First, i created texture that i can read/write in GPU, and it worked well.
And now, as you can check in my code, i wanted to read this texture in CPU as well(reading from application side), so i edited the usage from USAGE_DEFAULT to USAGE_DYNAMIC.
Microsoft::WRL::ComPtr<ID3D11Texture2D> outputTexture;
D3D11_TEXTURE2D_DESC outputTex_desc;
outputTex_desc.Format = DXGI_FORMAT_R32_FLOAT;
outputTex_desc.Width = 3;
outputTex_desc.Height = 3;
outputTex_desc.MipLevels = 1;
outputTex_desc.ArraySize = 1;
outputTex_desc.BindFlags = D3D11_BIND_UNORDERED_ACCESS |
D3D11_BIND_SHADER_RESOURCE;
outputTex_desc.SampleDesc.Count = msCount;
outputTex_desc.SampleDesc.Quality = msQuality;
outputTex_desc.Usage = D3D11_USAGE_DYNAMIC;
outputTex_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
outputTex_desc.MiscFlags = 0;
// CREATE 'TEXTURE'
device->CreateTexture2D( // FAIL HERE !!!
&outputTex_desc,
nullptr,
outputTexture.GetAddressOf());
// CREATE 'SRV'
...
// CREATE 'UAV'
...
and it starts failing exactly when it executes the 'device->CreateTexture2D()'
any advice would be amazing.
The first two steps in debugging any Direct3D 11 program is:
Make sure you are checking every HRESULT for either success (SUCCEEDED macro) or failure (FAILED macro). If it is safe to ignore the return value at runtime, then the function returns void. See ThrowIfFailed.
Enable the Direct3D debug device and look for debug output (a.k.a. use D3D11_CREATE_DEVICE_DEBUG in your Debug configuration).
If you enable the Direct3D debug device, you'll get detailed information on why the API returns failure codes in many cases.
If you did that, you'd see the error for this code:
D3D11 ERROR: ID3D11Device::CreateTexture2D: A D3D11_USAGE_DYNAMIC Resource
may only have the D3D11_CPU_ACCESS_WRITE CPUAccessFlags set.
[ STATE_CREATION ERROR #98: CREATETEXTURE2D_INVALIDCPUACCESSFLAGS]
In order to READ it on the CPU, you must copy to a D3D11_USAGE_STAGING Resource first. For example source code on doing that, see the ScreenGrab code from DirectX Tool Kit.
You don't mention what your msCount or msQuality values are here. I assumed 1, and 0 respectively. If you use any other values, you'll get:
D3D11 ERROR: ID3D11Device::CreateTexture2D: Multisampling is not supported
with the D3D11_BIND_UNORDERED_ACCESS BindFlag. SampleDesc.Count must be 1
and SampleDesc.Quality must be 0.
[ STATE_CREATION ERROR #99: CREATETEXTURE2D_INVALIDBINDFLAGS]
In order to access a texture from cpu (read mode), you need you create a separate staging texture, then copy your texture into it.
These are the flags for the gpu only texture (please note I force sample count to 1, as UAV access is not allowed for multisampled textures)
D3D11_TEXTURE2D_DESC gpuTexDesc;
gpuTexDesc.Format = DXGI_FORMAT_R32_FLOAT;
gpuTexDesc.Width = 3;
gpuTexDesc.Height = 3;
gpuTexDesc.MipLevels = 1;
gpuTexDesc.ArraySize = 1;
gpuTexDesc.BindFlags = D3D11_BIND_UNORDERED_ACCESS |
D3D11_BIND_SHADER_RESOURCE;
gpuTexDesc.SampleDesc.Count = 1;
gpuTexDesc.SampleDesc.Quality = 0;
gpuTexDesc.Usage = D3D11_USAGE_DEFAULT;
gpuTexDesc.CPUAccessFlags = D3D11_CPU_ACCESS_NONE;
gpuTexDesc.MiscFlags = 0;
Then you create a second texture for reading
D3D11_TEXTURE2D_DESC readTexDesc;
readTexDesc.Format = DXGI_FORMAT_R32_FLOAT;
readTexDesc.Width = 3;
readTexDesc.Height = 3;
readTexDesc.MipLevels = 1;
readTexDesc.ArraySize = 1;
readTexDesc.BindFlags = 0; //No bind flags allowed for staging
readTexDesc.SampleDesc.Count = 1;
readTexDesc.SampleDesc.Quality = 0;
readTexDesc.Usage = D3D11_USAGE_STAGING; //need staging flag for read
readTexDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
readTexDesc.MiscFlags = 0;
then you can use CopyResource:
deviceContext->CopyResource(readTex, gpuTex);
Once done, you can finally access texture data for reading using Map
D3D11_MAPPED_SUBRESOURCE MappedResource;
deviceContext->Map(readTex, 0, D3D11_MAP_READ, 0, &MappedResource);
MappedResource will give you access to data in cpu, once you done processing, don't forget to Unmap the resource.
deviceContext->Unmap(readTex, 0);

ParamValidationExt error with WelsInitEncoderExt failed while setting up OpenH264 encoder

Scenario:
I am using OpenH264 with my App to encode into a video_file.mp4.
Environment:
Platform : MacOs Sierra
Compiler : Clang++
The code:
Following is the crux of the code I have:
void EncodeVideoFile() {
ISVCEncoder * encoder_;
std:string video_file_name = "/Path/to/some/folder/video_file.mp4";
EncodeFileParam * pEncFileParam;
SEncParamExt * pEnxParamExt;
float frameRate = 1000;
EUsageType usageType = EUsageType::CAMERA_VIDEO_REAL_TIME;
bool denoise = false;
bool lossless = true;
bool enable_ltr = false;
int layers = 1;
bool cabac = false;
int sliceMode = 1;
pEncFileParam = new EncodeFileParam;
pEncFileParam->eUsageType = EUsageType::CAMERA_VIDEO_REAL_TIME;
pEncFileParam->pkcFileName = video_file_name.c_str();
pEncFileParam->iWidth = frame_width;
pEncFileParam->iHeight = frame_height;
pEncFileParam->fFrameRate = frameRate;
pEncFileParam->iLayerNum = layers;
pEncFileParam->bDenoise = denoise;
pEncFileParam->bLossless = lossless;
pEncFileParam->bEnableLtr = enable_ltr;
pEncFileParam->bCabac = cabac;
int rv = WelsCreateSVCEncoder (&encoder_);
pEnxParamExt = new SEncParamExt;
pEnxParamExt->iUsageType = pEncFileParam->eUsageType;
pEnxParamExt->iPicWidth = pEncFileParam->iWidth;
pEnxParamExt->iPicHeight = pEncFileParam->iHeight;
pEnxParamExt->fMaxFrameRate = pEncFileParam->fFrameRate;
pEnxParamExt->iSpatialLayerNum = pEncFileParam->iLayerNum;
pEnxParamExt->bEnableDenoise = pEncFileParam->bDenoise;
pEnxParamExt->bIsLosslessLink = pEncFileParam->bLossless;
pEnxParamExt->bEnableLongTermReference = pEncFileParam->bEnableLtr;
pEnxParamExt->iEntropyCodingModeFlag = pEncFileParam->bCabac ? 1 : 0;
for (int i = 0; i < pEnxParamExt->iSpatialLayerNum; i++) {
pEnxParamExt->sSpatialLayers[i].sSliceArgument.uiSliceMode = pEncFileParam->eSliceMode;
}
encoder_->InitializeExt(pEnxParamExt);
int videoFormat = videoFormatI420;
encoder_->SetOption (ENCODER_OPTION_DATAFORMAT, &videoFormat);
int frameSize = frame_width * frame_height * 3 / 2;
int total_num = 500;
BufferedData buf;
buf.SetLength (frameSize);
// check the buffer before proceeding
if (buf.Length() != (size_t)frameSize) {
CloseEncoder();
return;
}
SFrameBSInfo info;
memset (&info, 0, sizeof (SFrameBSInfo));
SSourcePicture pic;
memset (&pic, 0, sizeof (SSourcePicture));
pic.iPicWidth = frame_width;
pic.iPicHeight = frame_height;
pic.iColorFormat = videoFormatI420;
pic.iStride[0] = pic.iPicWidth;
pic.iStride[1] = pic.iStride[2] = pic.iPicWidth >> 1;
pic.pData[0] = buf.data();
pic.pData[1] = pic.pData[0] + frame_width * frame_height;
pic.pData[2] = pic.pData[1] + (frame_width * frame_height >> 2);
for(int num = 0; num < total_num; num++) {
// try to encode the frame
rv = encoder_->EncodeFrame (&pic, &info);
}
if (encoder_) {
encoder_->Uninitialize();
WelsDestroySVCEncoder (encoder_);
}
}
Above code is something I pulled up from official usage examples of OpenH264 where BufferedData.h is a class I reused from OpenH264 utils
Issue:
But, I am getting the following error:
[OpenH264] this = 0x0x1038bc8c0, Error:ParamValidationExt(), width > 0, height > 0, width * height <= 9437184, invalid 0 x 0 in dependency layer settings!
[OpenH264] this = 0x0x1038bc8c0, Error:WelsInitEncoderExt(), ParamValidationExt failed return 2.
[OpenH264] this = 0x0x1038bc8c0, Error:CWelsH264SVCEncoder::Initialize(), WelsInitEncoderExt failed.
Above does not crash the application but it goes through a blank run without creating the video_file.mp4 with the dummy data that I am trying to write into it.
Question:
There seems to be something wrong with the set up config I applying to pEnxParamExtwhich goes into encoder_->InitializeExt.
What am I doing wrong with the set up of the encoder?
Note:
I am not trying to hook up to any camera device. I am just trying to create a .mp4 video out of some dummy image data.
If you want to get complete and working OpenH264 Encoder Initialization procedure you can click... here.
According to your problem scenario, you are trying to create a video file(.mp4/.avi) from some dummy images. This task can be accomplished using two different libraries: i) Library for Codec, ii) Library for Container.
i) Library for Codec: It's so much easy to use a OpenH264 to compress data. One thing I must mention is that, OpenH264 always works with raw frames e.g. yuv420 data. So, if you want to compress your image data, you have to convert these image data into yuv420 color format. To get OpenH264 click... here
ii) Library for Container: After getting the encoded data you have to use another library to create the container with extension .mp4, .avi, .flv etc. There exists a lot of libraries in github to do that staff like FFmpeg, OpenCV, Bento4, MP4Maker, mp4parser etc. Before using these libraries please check in detail about the license issues. If you use FFmpeg, you will not need to use OpenH264 becuse FFmpeg itself works along with several codecs. You will also find lot more working examples as so many developers are working with video data out there.
Hope it helps. :)

D3D11CreateDeviceAndSwapChain slow

I've been lately bother by a slow startup of my d3d11 app, so I started investigating and found that the culprit is D3D11CreateDeviceAndSwapChain. This single call takes roughly 1.5 seconds. That seems crazy slow to me. Is this also your experience?
This is the setup code:
DXGI_SWAP_CHAIN_DESC swap_chain_desc = {};
swap_chain_desc.BufferDesc.Width = window->window_width;
swap_chain_desc.BufferDesc.Height = window->window_height;
swap_chain_desc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swap_chain_desc.SampleDesc.Count = 1;
swap_chain_desc.SampleDesc.Quality = 0;
swap_chain_desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swap_chain_desc.BufferCount = 2;
swap_chain_desc.OutputWindow = window->window_handle;
swap_chain_desc.BufferDesc.RefreshRate.Numerator = 60;
swap_chain_desc.BufferDesc.RefreshRate.Denominator = 1;
swap_chain_desc.Windowed = true;
swap_chain_desc.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
swap_chain_desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
D3D_FEATURE_LEVEL feature_level = D3D_FEATURE_LEVEL_11_0;
D3D_FEATURE_LEVEL supported_feature_level;
UINT flags = 0;
#ifdef DEBUG
flags = D3D11_CREATE_DEVICE_DEBUG;
#endif
HRESULT hr = D3D11CreateDeviceAndSwapChain(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, flags, &feature_level, 1, D3D11_SDK_VERSION,
&swap_chain_desc, &context.swapChain, &context.device, &supported_feature_level, &context.context);
First, you should consider D3D11CreateDeviceAndSwapChain deprecated, the device should be create separately, and you have a few variant of swap chain creation based on what your application is (hwnd vs core window for ex).
There is no justification to a slow device creation, but past experience with a slow device creation where due to d3d hooking from application like Steam. The easiest way to confirm is to take a profiling capture of your initialization and look at the involved callstacks and DLLs, child of the device creation.

How do I use -analyzeduration from ffmpeg with FFmpegDecoder.framework from mooncatventures?

I have been playing with the RtspPlay1 from mooncatventures to try and stream a live stream from an ffmpeg streaming source with as little delay as possible. The problem is even when I modify the code to indicate the -analyzedelay 0 flags in RtspPlay1 it does not seem to do anything. I came to this conclusion because the delay is the same on my computer without the -analyzeduration 0 flag as the iOS device. Any thoughts would be helpful.
Here is the command I am trying to emulate on the iPhone:
ffplay rtp:///224.1.1.1:11326 -analyzeduration 0
Here is the modified code I tried with RtspPlay1:
forward_argc=1;
forward_argv[1] = "-analyzeduration";
forward_argv[2] = "0";
//forward_argv[3] = "30";
//forward_argv[4] = "-fast";
//forward_argv[5] = "-sync";
//forward_argv[6] = "video";
//forward_argv[7] = "-drp";
//forward_argv[8] = "-skipidct";
//forward_argv[9] = "10";
//forward_argv[10] = "-skiploop";
//forward_argv[11] = "50";
//forward_argv[12] = "-threads";
//forward_argv[13] = "5";
//argv[14] = "-an";
forward_argv[3] = cString;
NSLog(#"glflag %#\n ",[parms objectForKey:#"glflag"] );
if ([parms objectForKey:#"glflag"]!=#"1") {
forward_argv[4]="0";
}else {
forward_argv[4]="1";
}
forward_argc += 4;

Resources