I am working on a iOS project and need to capture input from the microphone and convert it to ULaw (to send out a data stream). I am using an AUGraph with a converter node to accomplish this. The graph is created successfully and initialized, however in my render notify callback, the ioData buffer always contains NULL even thought inNumberFrame contains a value of 93. I think it might have something to due with incorrect size of format converter buffers, but I can figure out what is happening.
Here is the code:
OSStatus status;
// ************************** DEFINE AUDIO STREAM FORMATS ******************************
double currentSampleRate;
currentSampleRate = [[AVAudioSession sharedInstance] sampleRate];
// Describe stream format
AudioStreamBasicDescription streamAudioFormat = {0};
streamAudioFormat.mSampleRate = 8000.00;
streamAudioFormat.mFormatID = kAudioFormatULaw;
streamAudioFormat.mFormatFlags = kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
streamAudioFormat.mFramesPerPacket = 1;
streamAudioFormat.mChannelsPerFrame = 1;
streamAudioFormat.mBitsPerChannel = 8;
streamAudioFormat.mBytesPerPacket = 1;
streamAudioFormat.mBytesPerFrame = streamAudioFormat.mBytesPerPacket * streamAudioFormat.mFramesPerPacket;
// ************************** SETUP SEND AUDIO ******************************
AUNode ioSendNode;
AUNode convertToULAWNode;
AUNode convertToLPCMNode;
AudioUnit convertToULAWUnit;
AudioUnit convertToLPCMUnit;
status = NewAUGraph(&singleChannelSendGraph);
if (status != noErr)
{
NSLog(#"Unable to create send audio graph.");
return;
}
AudioComponentDescription ioDesc = {0};
ioDesc.componentType = kAudioUnitType_Output;
ioDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
ioDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
ioDesc.componentFlags = 0;
ioDesc.componentFlagsMask = 0;
status = AUGraphAddNode(singleChannelSendGraph, &ioDesc, &ioSendNode);
if (status != noErr)
{
NSLog(#"Unable to add IO node.");
return;
}
AudioComponentDescription converterDesc = {0};
converterDesc.componentType = kAudioUnitType_FormatConverter;
converterDesc.componentSubType = kAudioUnitSubType_AUConverter;
converterDesc.componentManufacturer = kAudioUnitManufacturer_Apple;
converterDesc.componentFlags = 0;
converterDesc.componentFlagsMask = 0;
status = AUGraphAddNode(singleChannelSendGraph, &converterDesc, &convertToULAWNode);
if (status != noErr)
{
NSLog(#"Unable to add ULAW converter node.");
return;
}
status = AUGraphAddNode(singleChannelSendGraph, &converterDesc, &convertToLPCMNode);
if (status != noErr)
{
NSLog(#"Unable to add LPCM converter node.");
return;
}
status = AUGraphOpen(singleChannelSendGraph);
if (status != noErr)
{
return;
}
// get the io audio unit
status = AUGraphNodeInfo(singleChannelSendGraph, ioSendNode, NULL, &ioSendUnit);
if (status != noErr)
{
NSLog(#"Unable to get IO unit.");
return;
}
UInt32 enableInput = 1;
status = AudioUnitSetProperty (ioSendUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
1, // microphone bus
&enableInput,
sizeof (enableInput)
);
if (status != noErr)
{
return;
}
UInt32 sizeASBD = sizeof(AudioStreamBasicDescription);
AudioStreamBasicDescription ioASBDin;
AudioStreamBasicDescription ioASBDout;
status = AudioUnitGetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &ioASBDin, &sizeASBD);
if (status != noErr)
{
NSLog(#"Unable to get IO stream input format.");
return;
}
status = AudioUnitGetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &ioASBDout, &sizeASBD);
if (status != noErr)
{
NSLog(#"Unable to get IO stream output format.");
return;
}
ioASBDin.mSampleRate = currentSampleRate;
ioASBDout.mSampleRate = currentSampleRate;
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(#"Unable to set IO stream output format.");
return;
}
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(#"Unable to set IO stream input format.");
return;
}
// get the converter audio unit
status = AUGraphNodeInfo(singleChannelSendGraph, convertToULAWNode, NULL, &convertToULAWUnit);
if (status != noErr)
{
NSLog(#"Unable to get ULAW converter unit.");
return;
}
status = AudioUnitSetProperty(convertToULAWUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(#"Unable to set ULAW stream input format.");
return;
}
status = AudioUnitSetProperty(convertToULAWUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &streamAudioFormat, sizeof(streamAudioFormat));
if (status != noErr)
{
NSLog(#"Unable to set ULAW stream output format.");
return;
}
// get the converter audio unit
status = AUGraphNodeInfo(singleChannelSendGraph, convertToLPCMNode, NULL, &convertToLPCMUnit);
if (status != noErr)
{
NSLog(#"Unable to get LPCM converter unit.");
return;
}
status = AudioUnitSetProperty(convertToLPCMUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &streamAudioFormat, sizeof(streamAudioFormat));
if (status != noErr)
{
NSLog(#"Unable to set LPCM stream input format.");
return;
}
status = AudioUnitSetProperty(convertToLPCMUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &ioASBDin, sizeof(ioASBDin));
if (status != noErr)
{
NSLog(#"Unable to set LPCM stream output format.");
return;
}
status = AUGraphConnectNodeInput(singleChannelSendGraph, ioSendNode, 1, convertToULAWNode, 0);
if (status != noErr)
{
NSLog(#"Unable to set ULAW node input.");
return;
}
status = AUGraphConnectNodeInput(singleChannelSendGraph, convertToULAWNode, 0, convertToLPCMNode, 0);
if (status != noErr)
{
NSLog(#"Unable to set LPCM node input.");
return;
}
status = AUGraphConnectNodeInput(singleChannelSendGraph, convertToLPCMNode, 0, ioSendNode, 0);
if (status != noErr)
{
NSLog(#"Unable to set IO node input.");
return;
}
status = AudioUnitAddRenderNotify(convertToULAWUnit, &outputULAWCallback, (__bridge void*)self);
if (status != noErr)
{
NSLog(#"Unable to add ULAW render notify.");
return;
}
status = AUGraphInitialize(singleChannelSendGraph);
if (status != noErr)
{
NSLog(#"Unable to initialize send graph.");
return;
}
CAShow (singleChannelSendGraph);
}
And the graph nodes are initialized as:
Member Nodes:
node 1: 'auou' 'vpio' 'appl', instance 0x7fd5faf8fac0 O I
node 2: 'aufc' 'conv' 'appl', instance 0x7fd5fad05420 O I
node 3: 'aufc' 'conv' 'appl', instance 0x7fd5fad05810 O I
Connections:
node 1 bus 1 => node 2 bus 0 [ 1 ch, 44100 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
node 2 bus 0 => node 3 bus 0 [ 1 ch, 8000 Hz, 'ulaw' (0x0000000C) 8 bits/channel, 1 bytes/packet, 1 frames/packet, 1 bytes/frame]
node 3 bus 0 => node 1 bus 0 [ 1 ch, 44100 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
And the render notify callback:
static OSStatus outputULAWCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
AudioManager *audioManager = (__bridge AudioManager*)inRefCon;
if ((*ioActionFlags) & kAudioUnitRenderAction_PostRender)
{
if (!audioManager.mute && ioData->mBuffers[0].mData != NULL)
{
TPCircularBufferProduceBytes(audioManager.activeChannel == 0 ? audioManager.channel1StreamOutBufferPtr : audioManager.channel2StreamOutBufferPtr,
ioData->mBuffers[0].mData, ioData->mBuffers[0].mDataByteSize);
// do not want to playback our audio into local speaker
SilenceData(ioData);
}
}
return noErr;
}
Note: if I send the microphone input to the output directly (skipping the converter nodes), I do hear output, so I know the AUGraph is working.
I have a receive AUGraph setup to receive ULaw from a stream and run through a converter to play through the speakers and that is working without an issue.
Just can't figure out why the converter is failing and returning no data.
Has anyone had any experience with this type of issue?
UPDATE
So you're calling AUGraphStart elsewhere, but the ulaw converter is refusing to do general rate conversion for you :( You could add another rate converter to the graph or simply get the vpio unit to do it for you. Changing this code
ioASBDin.mSampleRate = currentSampleRate; // change me to 8000Hz
ioASBDout.mSampleRate = currentSampleRate; // delete me, I'm ignored
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioASBDin, sizeof(ioASBDin));
into
ioASBDin.mSampleRate = streamAudioFormat.mSampleRate; // a.k.a 8000Hz
status = AudioUnitSetProperty(ioSendUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &ioASBDin, sizeof(ioASBDin));
will make the whole graph do 8kHz and give you non-null ioData buffers:
AudioUnitGraph 0xCA51000:
Member Nodes:
node 1: 'auou' 'vpio' 'appl', instance 0x7b5bb320 O I
node 2: 'aufc' 'conv' 'appl', instance 0x7c878d50 O I
node 3: 'aufc' 'conv' 'appl', instance 0x7c875eb0 O I
Connections:
node 1 bus 1 => node 2 bus 0 [ 1 ch, 8000 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
node 2 bus 0 => node 3 bus 0 [ 1 ch, 8000 Hz, 'ulaw' (0x0000000C) 8 bits/channel, 1 bytes/packet, 1 frames/packet, 1 bytes/frame]
node 3 bus 0 => node 1 bus 0 [ 1 ch, 8000 Hz, 'lpcm' (0x0000000C) 16-bit little-endian signed integer]
CurrentState:
mLastUpdateError=0, eventsToProcess=F, isInitialized=T, isRunning=T (1)
old answer
You need to
AUGraphStart your graph
Change your ulaw mSampleRate to 11025, 22050 or 44100
then you will see non-null ioData in the kAudioUnitRenderAction_PostRender phase.
Converting to 8kHz or even 16kHz ulaw seems like something an audio converter should be able to do. I have no idea why it doesn't work, but when you do set the sample rate to anything other than the values in point 2., the ulaw converter reports kAUGraphErr_CannotDoInCurrentContext (-10863) errors, which makes no sense to me.
Related
I'm looking to pan a mono signal using MTAudioProcessingTap and a Multichannel Mixer audio unit, but am getting a mono output instead of a panned, stereo output. The documentation states:
"The Multichannel Mixer unit (subtype
kAudioUnitSubType_MultiChannelMixer) takes any number of mono or
stereo streams and combines them into a single stereo output."
So, the mono output was unexpected. Any way around this? I ran a stereo signal through the exact same code and everything worked great: stereo output, panned as expected. Here's the code from my tap's prepare callback:
static void tap_PrepareCallback(MTAudioProcessingTapRef tap,
CMItemCount maxFrames,
const AudioStreamBasicDescription *processingFormat) {
AVAudioTapProcessorContext *context = (AVAudioTapProcessorContext *)MTAudioProcessingTapGetStorage(tap);
// Store sample rate for -setCenterFrequency:.
context->sampleRate = processingFormat->mSampleRate;
/* Verify processing format (this is not needed for Audio Unit, but for RMS calculation). */
context->supportedTapProcessingFormat = true;
if (processingFormat->mFormatID != kAudioFormatLinearPCM) {
NSLog(#"Unsupported audio format ID for audioProcessingTap. LinearPCM only.");
context->supportedTapProcessingFormat = false;
}
if (!(processingFormat->mFormatFlags & kAudioFormatFlagIsFloat)) {
NSLog(#"Unsupported audio format flag for audioProcessingTap. Float only.");
context->supportedTapProcessingFormat = false;
}
if (processingFormat->mFormatFlags & kAudioFormatFlagIsNonInterleaved) {
context->isNonInterleaved = true;
}
AudioUnit audioUnit;
AudioComponentDescription audioComponentDescription;
audioComponentDescription.componentType = kAudioUnitType_Mixer;
audioComponentDescription.componentSubType = kAudioUnitSubType_MultiChannelMixer;
audioComponentDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
audioComponentDescription.componentFlags = 0;
audioComponentDescription.componentFlagsMask = 0;
AudioComponent audioComponent = AudioComponentFindNext(NULL, &audioComponentDescription);
if (audioComponent) {
if (noErr == AudioComponentInstanceNew(audioComponent, &audioUnit)) {
OSStatus status = noErr;
// Set audio unit input/output stream format to processing format.
if (noErr == status) {
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
processingFormat,
sizeof(AudioStreamBasicDescription));
}
if (noErr == status) {
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
0,
processingFormat,
sizeof(AudioStreamBasicDescription));
}
// Set audio unit render callback.
if (noErr == status) {
AURenderCallbackStruct renderCallbackStruct;
renderCallbackStruct.inputProc = AU_RenderCallback;
renderCallbackStruct.inputProcRefCon = (void *)tap;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&renderCallbackStruct,
sizeof(AURenderCallbackStruct));
}
// Set audio unit maximum frames per slice to max frames.
if (noErr == status) {
UInt32 maximumFramesPerSlice = (UInt32)maxFrames;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global,
0,
&maximumFramesPerSlice,
(UInt32)sizeof(UInt32));
}
// Initialize audio unit.
if (noErr == status) {
status = AudioUnitInitialize(audioUnit);
}
if (noErr != status) {
AudioComponentInstanceDispose(audioUnit);
audioUnit = NULL;
}
context->audioUnit = audioUnit;
}
}
NSLog(#"Tap channels: %d",processingFormat->mChannelsPerFrame); // = 1 for mono source file
}
I've tried a few different options for the output stream format, e.g., AVAudioFormat *outFormat = [[AVAudioFormat alloc] initStandardFormatWithSampleRate:processingFormat->mSampleRate channels:2];, but get this error each time: "Client did not see 20 I/O cycles; giving up." Here's the code that creates the exact same ASBD as the input format except for 2 channels instead of one, and this gives the same "20 I/O cycles" error too:
AudioStreamBasicDescription asbd;
asbd.mFormatID = kAudioFormatLinearPCM;
asbd.mFormatFlags = 0x29;
asbd.mSampleRate = 44100;
asbd.mBitsPerChannel = 32;
asbd.mChannelsPerFrame = 2;
asbd.mBytesPerFrame = 4;
asbd.mFramesPerPacket = 1;
asbd.mBytesPerPacket = 4;
asbd.mReserved = 0;
I have an IPCamera that requires the use of a custom library for connecting and communication. I have the video all taken care of, but I also want to give the user the option to listen to the audio that is recorded by the camera.
I receive the audio in the form of a byte stream (the audio is PCM u-law).
Since I don't read the data from a file or have an URL I can connect to, I think I would have to use something like AudioUnits or openAL to play my audio.
I tried to implement it with AudioUnits based on the examples I found online and this is what I have so far:
-(void) audioThread
{
char buffer[1024];
int size = 0;
boolean audioConfigured = false;
AudioComponentInstance audioUnit;
while (running) {
getAudioData(buffer,size); //fill buffer with my audio
int16_t* tempChar = (int16_t *)calloc(ret, sizeof(int16_t));
for (int i = 0; i < ret; i++) {
tempChar[i] = MuLaw_Decode(buf[i]);
}
uint8_t *data = NULL;
data = malloc(size);
data = memcpy(data, &tempChar, size);
CMBlockBufferRef blockBuffer = NULL;
OSStatus status = CMBlockBufferCreateWithMemoryBlock(NULL, data,
size,
kCFAllocatorNull, NULL,
0,
size,
0, &blockBuffer);
CMSampleBufferRef sampleBuffer = NULL;
// now I create my samplebuffer from the block buffer
if(status == noErr)
{
const size_t sampleSize = size;
status = CMSampleBufferCreate(kCFAllocatorDefault,
blockBuffer, true, NULL, NULL,
formatDesc, 1, 0, NULL, 1,
&sampleSize, &sampleBuffer);
}
AudioStreamBasicDescription audioBasic;
audioBasic.mBitsPerChannel = 16;
audioBasic.mBytesPerPacket = 2;
audioBasic.mBytesPerFrame = 2;
audioBasic.mChannelsPerFrame = 1;
audioBasic.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioBasic.mFormatID = kAudioFormatLinearPCM;
audioBasic.mFramesPerPacket = 1;
audioBasic.mSampleRate = 48000;
audioBasic.mReserved = 0;
if(!audioConfigured)
{
//initialize the circular buffer
if(instance.decodingBuffer == NULL)
instance.decodingBuffer = malloc(sizeof(TPCircularBuffer));
if(!TPCircularBufferInit(instance.decodingBuffer, 1024))
continue;
AudioComponentDescription componentDescription;
componentDescription.componentType = kAudioUnitType_Output;
componentDescription.componentSubType = kAudioUnitSubType_RemoteIO;
componentDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
componentDescription.componentFlags = 0;
componentDescription.componentFlagsMask = 0;
AudioComponent component = AudioComponentFindNext(NULL, &componentDescription);
if(AudioComponentInstanceNew(component, &audioUnit) != noErr) {
NSLog(#"Failed to initialize the AudioComponent");
continue;
}
//enable IO for playback
UInt32 flag = 1;
if(AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &flag, sizeof(flag)) != noErr) {
NSLog(#"Failed to enable IO for playback");
continue;
}
// set the format for the outputstream
if(AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output, 1, &audioBasic, sizeof(audioBasic)) != noErr) {
NSLog(#"Failed to set the format for the outputstream");
continue;
}
// set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = (__bridge void*) self;
if(AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, 0, &callbackStruct, sizeof(callbackStruct))!= noErr) {
NSLog(#"Failed to Set output callback");
continue;
}
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(audioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Output, 1, &flag, sizeof(flag));
if(AudioUnitInitialize(audioUnit) != noErr) {
NSLog(#"Failed to initialize audioUnits");
}
if(AudioOutputUnitStart(audioUnit)!= noErr) {
NSLog(#"[thread_ReceiveAudio] Failed to start audio");
}
audioConfigured = true;
}
AudioBufferList bufferList ;
if (sampleBuffer!=NULL) {
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &bufferList, sizeof(bufferList), NULL, NULL, kCMSampleBufferFlag_AudioBufferList_Assure16ByteAlignment, &blockBuffer);
UInt64 size = CMSampleBufferGetTotalSampleSize(sampleBuffer);
// Put audio into circular buffer
TPCircularBufferProduceBytes(self.decodingBuffer, bufferList.mBuffers[0].mData, size);
//TPCircularBufferCopyAudioBufferList(self.decodingBuffer, &bufferList, NULL, kTPCircularBufferCopyAll, NULL);
CFRelease(sampleBuffer);
CFRelease(blockBuffer);
}
}
//stop playing audio
if(audioConfigured){
if(AudioOutputUnitStop(audioUnit)!= noErr) {
NSLog(#"[thread_ReceiveAudio] Failed to stop audio");
}
else{
//clean up audio
AudioComponentInstanceDispose(audioUnit);
}
}
}
int16_t MuLaw_Decode(int8_t number)
{
const uint16_t MULAW_BIAS = 33;
uint8_t sign = 0, position = 0;
int16_t decoded = 0;
number = ~number;
if (number & 0x80)
{
number &= ~(1 << 7);
sign = -1;
}
position = ((number & 0xF0) >> 4) + 5;
decoded = ((1 << position) | ((number & 0x0F) << (position - 4))
| (1 << (position - 5))) - MULAW_BIAS;
return (sign == 0) ? (decoded) : (-(decoded));
}
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
int bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16 *targetBuffer = (SInt16*)ioData->mBuffers[0].mData;
int32_t availableBytes;
SInt16 *buffer = TPCircularBufferTail(instance.decodingBuffer, &availableBytes);
int sampleCount = MIN(bytesToCopy, availableBytes);
memcpy(targetBuffer, buffer, MIN(bytesToCopy, availableBytes));
TPCircularBufferConsume(self.decodingBuffer, sampleCount);
return noErr;
}
The code above doesn't produce any errors, but won't play any sound. I though I could set the audio through the bufferList in the recordCallback, but it is never called.
So my question is: How do I play audio from a byte stream on iOS?
I decided to look at the project with fresh eyes. I got rid of most of the code and got it to work now. It is not pretty, but at least it runs for now. For example: I had to set my sample rate to 4000, otherwise it would play to fast and I still have performance issues. Anyway this is what I came up with:
#define BUFFER_SIZE 1024
#define NUM_CHANNELS 2
#define kOutputBus 0
#define kInputBus 1
-(void) main
{
char buf[BUFFER_SIZE];
int size;
runloop: while (self.running) {
getAudioData(&buf, size);
if(!self.configured) {
if(![self activateAudioSession])
continue;
self.configured = true;
}
TPCircularBufferProduceBytes(self.decodingBuffer, buf, size);
}
//stop audiounits
AudioOutputUnitStop(self.audioUnit);
AudioComponentInstanceDispose(self.audioUnit);
if (self.decodingBuffer != NULL) {
TPCircularBufferCleanup(self.decodingBuffer);
}
}
static void audioSessionInterruptionCallback(void *inUserData, UInt32 interruptionState) {
if (interruptionState == kAudioSessionEndInterruption) {
AudioSessionSetActive(YES);
AudioOutputUnitStart(self.audioUnit);
}
if (interruptionState == kAudioSessionBeginInterruption) {
AudioOutputUnitStop(self.audioUnit);
}
}
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Notes: ioData contains buffers (may be more than one!)
// Fill them up as much as you can. Remember to set the size value in each buffer to match how much data is in the buffer.
if (!self.running ) {
return -1;
}
int bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16 *targetBuffer = (SInt16*)ioData->mBuffers[0].mData;
// Pull audio from playthrough buffer
int32_t availableBytes;
if(self.decodingBuffer == NULL || self.decodingBuffer->length < 1) {
NSLog(#"buffer is empty");
return 0;
}
SInt16 *buffer = TPCircularBufferTail(self.decodingBuffer, &availableBytes);
int sampleCount = MIN(bytesToCopy, availableBytes);
memcpy(targetBuffer, buffer, sampleCount);
TPCircularBufferConsume(self.decodingBuffer, sampleCount);
return noErr;
}
- (BOOL) activateAudioSession {
if (!self.activated_) {
OSStatus result;
result = AudioSessionInitialize(NULL,
NULL,
audioSessionInterruptionCallback,
(__bridge void *)(self));
if (kAudioSessionAlreadyInitialized != result)
[self checkError:result message:#"Couldn't initialize audio session"];
[self setupAudio]
self.activated_ = YES;
}
return self.activated_;
}
- (void) setupAudio
{
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
AudioComponentInstanceNew(inputComponent, &_audioUnit);
// // Enable IO for recording
// UInt32 flag = 1;
// status = AudioUnitSetProperty(audioUnit,
// kAudioOutputUnitProperty_EnableIO,
// kAudioUnitScope_Input,
// kInputBus,
// &flag,
// sizeof(flag));
// Enable IO for playback
UInt32 flag = 1;
AudioUnitSetProperty(_audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
// Describe format
AudioStreamBasicDescription format;
format.mSampleRate = 4000;
format.mFormatID = kAudioFormatULaw; //kAudioFormatULaw
format.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;//
format.mBitsPerChannel = 8 * sizeof(char);
format.mChannelsPerFrame = NUM_CHANNELS;
format.mBytesPerFrame = sizeof(char) * NUM_CHANNELS;
format.mFramesPerPacket = 1;
format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket;
format.mReserved = 0;
self.audioFormat = format;
// Apply format
AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&_audioFormat,
sizeof(_audioFormat));
AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&_audioFormat,
sizeof(_audioFormat));
// // Set input callback
// AURenderCallbackStruct callbackStruct;
// callbackStruct.inputProc = recordingCallback;
// callbackStruct.inputProcRefCon = self;
// status = AudioUnitSetProperty(audioUnit,
// kAudioOutputUnitProperty_SetInputCallback,
// kAudioUnitScope_Global,
// kInputBus,
// &callbackStruct,
// sizeof(callbackStruct));
// checkStatus(status);
// Set output callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = (__bridge void * _Nullable)(self);
AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag));
//initialize the circular buffer
if(self.decodingBuffer == NULL)
self.decodingBuffer = malloc(sizeof(TPCircularBuffer));
if(!TPCircularBufferInit(self.decodingBuffer, 512*1024))
return NO;
// Initialise
status = AudioUnitInitialize(self.audioUnit);
AudioOutputUnitStart(self.audioUnit);
}
I found most of this by looking through github and from a tasty pixel
If the AVAudioSession is configured to use short buffers, you can use the RemoteIO Audio Unit to play received audio with low additional latency.
Check errors during audio configuration. Some iOS devices only support a 48 kHz sample rate, so you may need to resample your audio PCM data from 8 kHz to another rate.
RemoteIO only supports linear PCM, so you will need to first convert all your incoming 8-bit u-law PCM samples to 16-bit linear PCM format before storing them in a lock-free circular buffer.
You need to call AudioOutputUnitStart to start audio callbacks being called by the OS. Your code should not be calling these callbacks. They will be called by the OS.
AudioUnitRender is used for recording callbacks, not for playing audio. So you don't need to use it. Just fill the AudioBufferList buffers with the requested number of frames in the play callback.
Then you can use the play audio callback to check your circular buffer and pull the requested number of samples, if enough are available. You should not do any memory management (such as a free() call) inside this callback.
I am trying to create an audio graph with a mixer and an io unit. The io unit will receive audio from the microphone and send it to the mixer, which will mix it with an external sound, and play it back out the speaker. I have created my audio graph like below, tried to follow guidelines as much as possible. However, I keep getting error -10865 (kAudioUnitErr_PropertyNotWriteable) when I try to connect the mixer node to the output node. Could somebody clarify for me what is going wrong? I will include more code such as my callback and private variables if needed.
NSLog(#"Creating audio graph");
sampleRate = 44100.0;
// Will check results
OSStatus result;
// Create the graph
result = NewAUGraph(&graph);
if(result != noErr)
NSLog(#"Failed creating graph");
result = AUGraphInitialize(graph);
if(result != noErr)
NSLog(#"Failed to initialize audio graph Error code: %d '%.4s", (int) result, (const char *)&result);
// Create audio nodes
AudioComponentDescription ioDescription;
ioDescription.componentType = kAudioUnitType_Output;
ioDescription.componentSubType = kAudioUnitSubType_RemoteIO;
ioDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
ioDescription.componentFlagsMask = 0;
ioDescription.componentFlags = 0;
AudioComponentDescription mixerDescription;
mixerDescription.componentType = kAudioUnitType_Mixer;
mixerDescription.componentSubType = kAudioUnitSubType_MultiChannelMixer;
mixerDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
mixerDescription.componentFlagsMask = 0;
mixerDescription.componentFlags = 0;
// Add nodes to the graph
AUNode ioNode;
AUNode mixerNode;
result = AUGraphAddNode(graph, &ioDescription, &ioNode);
if(result != noErr)
NSLog(#"Failed to add microphone node");
result = AUGraphAddNode(graph, &mixerDescription, &mixerNode);
if(result != noErr)
NSLog(#"Failed to add mixer node");
// Open the graph
result = AUGraphOpen(graph);
if(result != noErr)
NSLog(#"Failed to open graph");
// Get the IO node
result = AUGraphNodeInfo(graph, ioNode, NULL, &ioUnit);
if(result != noErr)
NSLog(#"Failed to fetch info from io node");
// Enable IO on the io node
UInt32 flag = 1;
result = AudioUnitSetProperty(ioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &flag, sizeof(flag));
if(result != noErr)
NSLog(#"Failed enabling IO on io unit");
// Get the mixer unit
result = AUGraphNodeInfo(graph, mixerNode, NULL, &mixerUnit);
if(result != noErr)
NSLog(#"Failed to fetch info from mixer node");
// Set up the mixer unit bus count
UInt32 busCount = 2;
result = AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &busCount, sizeof(busCount));
if(result != noErr)
NSLog(#"Failed to set property mixer input bus count");
// Attach render callback to sound effect bus
UInt32 soundEffectBus = 1;
AURenderCallbackStruct inputCallbackStruct;
inputCallbackStruct.inputProc = &inputRenderCallback;
inputCallbackStruct.inputProcRefCon = soundStruct;
result = AUGraphSetNodeInputCallback(graph, mixerNode, soundEffectBus, &inputCallbackStruct);
if(result != noErr)
NSLog(#"Failed to set mixer node input callback for sound effect bus");
// Set stream format for sound effect bus
UInt32 bytesPerSample = sizeof (AudioUnitSampleType);
stereoDescription.mFormatID = kAudioFormatLinearPCM;
stereoDescription.mFormatFlags = kAudioFormatFlagsAudioUnitCanonical;
stereoDescription.mBytesPerPacket = bytesPerSample;
stereoDescription.mFramesPerPacket = 1;
stereoDescription.mBytesPerFrame = bytesPerSample;
stereoDescription.mChannelsPerFrame = 2;
stereoDescription.mBitsPerChannel = 8 * bytesPerSample;
stereoDescription.mSampleRate = sampleRate;
result = AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, soundEffectBus, &stereoDescription, sizeof(stereoDescription));
if(result != noErr)
NSLog(#"Failed to set stream description");
// Set mixer output sample rate
result = AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 0, &sampleRate, sizeof(sampleRate));
if(result != noErr)
NSLog(#"Failed to set mixer output sample rate");
// Connect input to mixer
result = AUGraphConnectNodeInput(graph, ioNode, 1, mixerNode, 0);
if(result != noErr)
NSLog(#"Failed to connect microphone node to mixer node");
Error occurs here
// Connect mixer to output
result = AUGraphConnectNodeInput(graph, mixerNode, 0, ioNode, 0);
if(result != noErr)
NSLog(#"Failed to connect mixer to output node %d", result);
// Initialize the audio graph
CAShow(graph);
// Start the graph
result = AUGraphStart(graph);
if(result != noErr)
NSLog(#"Failed to start audio graph");
NSLog(#"Graph started");
EDIT
I was able to understand why I got this error, I think I was assigning my mixer output to the input channel of the io unit (which is read only, of course because it comes from the mic). However, after switching that, as I changed the code above, I have this error
ERROR: [0x196f982a0] 308: input bus 0 sample rate is 0
Could anyone help me? Is there something I am forgetting to set?
According to the error message, I suggest you explicitly set the stream format on every buses of the mixer (both input and output), just to be sure. Personally, I don't set kAudioUnitProperty_SampleRate on the mixer, I don't think it has a meaning there (IMHO, this is meaningful on a hardware IO unit to choose the sample rate of the DAC, it might also have a meaning of format converters units)
I have implemented an AUGraph containing a single AudioUnit for handling IO from the mic and headsets. The issue I'm having is that there are missing chunks of audio input.
I believe the samples are being lost during the hardware to software buffer exchange. I tried slowing down the sample rate of the iPhone, from 44.1 kHz to 20 kHz, to see if this would give me the missing data, but it did not produce the output I expected.
The AUGraph is setup as follows:
// Audio component description
AudioComponentDescription desc;
bzero(&desc, sizeof(AudioComponentDescription));
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
// Stereo ASBD
AudioStreamBasicDescription stereoStreamFormat;
bzero(&stereoStreamFormat, sizeof(AudioStreamBasicDescription));
stereoStreamFormat.mSampleRate = kSampleRate;
stereoStreamFormat.mFormatID = kAudioFormatLinearPCM;
stereoStreamFormat.mFormatFlags = kAudioFormatFlagsCanonical;
stereoStreamFormat.mBytesPerPacket = 4;
stereoStreamFormat.mBytesPerFrame = 4;
stereoStreamFormat.mFramesPerPacket = 1;
stereoStreamFormat.mChannelsPerFrame = 2;
stereoStreamFormat.mBitsPerChannel = 16;
OSErr err = noErr;
#try {
// Create new AUGraph
err = NewAUGraph(&auGraph);
NSAssert1(err == noErr, #"Error creating AUGraph: %hd", err);
// Add node to AUGraph
err = AUGraphAddNode(auGraph,
&desc,
&ioNode);
NSAssert1(err == noErr, #"Error adding AUNode: %hd", err);
// Open AUGraph
err = AUGraphOpen(auGraph);
NSAssert1(err == noErr, #"Error opening AUGraph: %hd", err);
// Add AUGraph node info
err = AUGraphNodeInfo(auGraph,
ioNode,
&desc,
&_ioUnit);
NSAssert1(err == noErr, #"Error adding noe info to AUGraph: %hd", err);
// Enable input, which is disabled by default.
UInt32 enabled = 1;
err = AudioUnitSetProperty(_ioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&enabled,
sizeof(enabled));
NSAssert1(err == noErr, #"Error enabling input: %hd", err);
// Apply format to input of ioUnit
err = AudioUnitSetProperty(_ioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&stereoStreamFormat,
sizeof(stereoStreamFormat));
NSAssert1(err == noErr, #"Error setting input ASBD: %hd", err);
// Apply format to output of ioUnit
err = AudioUnitSetProperty(_ioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&stereoStreamFormat,
sizeof(stereoStreamFormat));
NSAssert1(err == noErr, #"Error setting output ASBD: %hd", err);
// Set hardware IO callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = hardwareIOCallback;
callbackStruct.inputProcRefCon = (__bridge void *)(self);
err = AUGraphSetNodeInputCallback(auGraph,
ioNode,
kOutputBus,
&callbackStruct);
NSAssert1(err == noErr, #"Error setting IO callback: %hd", err);
// Initialize AudioGraph
err = AUGraphInitialize(auGraph);
NSAssert1(err == noErr, #"Error initializing AUGraph: %hd", err);
// Start audio unit
err = AUGraphStart(auGraph);
NSAssert1(err == noErr, #"Error starting AUGraph: %hd", err);
}
#catch (NSException *exception) {
NSLog(#"Failed with exception: %#", exception);
}
Where kOutputBus is defined to be 0, kInputBus is 1, and kSampleRate is 44100. The IO callback function is:
IO Callback Function
static OSStatus hardwareIOCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Scope reference to GSFSensorIOController class
GSFSensorIOController *sensorIO = (__bridge GSFSensorIOController *) inRefCon;
// Grab the samples and place them in the buffer list
AudioUnit ioUnit = sensorIO.ioUnit;
OSStatus result = AudioUnitRender(ioUnit,
ioActionFlags,
inTimeStamp,
kInputBus,
inNumberFrames,
ioData);
if (result != noErr) NSLog(#"Blowing it in interrupt");
// Process input data
[sensorIO processIO:ioData];
// Set up power tone attributes
float freq = 20000.00f;
float sampleRate = kSampleRate;
float phase = sensorIO.sinPhase;
float sinSignal;
double phaseInc = 2 * M_PI * freq / sampleRate;
// Write to output buffers
for(size_t i = 0; i < ioData->mNumberBuffers; ++i) {
AudioBuffer buffer = ioData->mBuffers[i];
for(size_t sampleIdx = 0; sampleIdx < inNumberFrames; ++sampleIdx) {
// Grab sample buffer
SInt16 *sampleBuffer = buffer.mData;
// Generate power tone on left channel
sinSignal = sin(phase);
sampleBuffer[2 * sampleIdx] = (SInt16)((sinSignal * 32767.0f) /2);
// Write to commands to micro on right channel as necessary
if(sensorIO.newDataOut)
sampleBuffer[2*sampleIdx + 1] = (SInt16)((sinSignal * 32767.0f) /2);
else
sampleBuffer[2*sampleIdx + 1] = 0;
phase += phaseInc;
if (phase >= 2 * M_PI * freq) {
phase -= (2 * M_PI * freq);
}
}
}
// Store sine wave phase for next callback
sensorIO.sinPhase = phase;
return result;
}
The processIO function called within hardwareIOCallback is used to process the input and create response for the output. For debugging purposes I just have it pushing each sample of the input buffer to an NSMutableArray.
Process IO
- (void) processIO: (AudioBufferList*) bufferList {
for (int j = 0 ; j < bufferList->mNumberBuffers ; j++) {
AudioBuffer sourceBuffer = bufferList->mBuffers[j];
SInt16 *buffer = (SInt16 *) bufferList->mBuffers[j].mData;
for (int i = 0; i < (sourceBuffer.mDataByteSize / sizeof(sourceBuffer)); i++) {
// DEBUG: Array of raw data points for printing to a file
[self.rawInputData addObject:[NSNumber numberWithInt:buffer[i]]];
}
}
}
I then am writing the contents of this input buffer to a file after I have stopped the AUGraph and have all samples in the array rawInputData. I then open this file in MatLab and plot it. Here I see that the audio input is missing data (seen in the image below circled in red).
I'm out of ideas as to how to fix this issue and could really use some help understanding and fixing this problem.
You callback may be too slow. It's usually not recommended to use any Objective C methods (such as adding to a mutable array, or anything else that could allocate memory) inside an Audio Unit callback.
I'm looking on this example http://teragonaudio.com/article/How-to-do-realtime-recording-with-effect-processing-on-iOS.html
and i want to turn off my output. I try to change: kAudioSessionCategory_PlayAndRecord to kAudioSessionCategory_RecordAudio but this is not working. I also try to get rid off:
if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output, 1, &streamDescription, sizeof(streamDescription)) != noErr) {
return 1;
}
Becouse i want to get sound from microphone but not playing it. But not matter what i do when my sound get to renderCallback method there is a -50 error. When audio is automatically play on output everything works fine...
Update with code:
using namespace std;
AudioUnit *audioUnit = NULL;
float *convertedSampleBuffer = NULL;
int initAudioSession() {
audioUnit = (AudioUnit*)malloc(sizeof(AudioUnit));
if(AudioSessionInitialize(NULL, NULL, NULL, NULL) != noErr) {
return 1;
}
if(AudioSessionSetActive(true) != noErr) {
return 1;
}
UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord;
if(AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
sizeof(UInt32), &sessionCategory) != noErr) {
return 1;
}
Float32 bufferSizeInSec = 0.02f;
if(AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration,
sizeof(Float32), &bufferSizeInSec) != noErr) {
return 1;
}
UInt32 overrideCategory = 1;
if(AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
sizeof(UInt32), &overrideCategory) != noErr) {
return 1;
}
// There are many properties you might want to provide callback functions for:
// kAudioSessionProperty_AudioRouteChange
// kAudioSessionProperty_OverrideCategoryEnableBluetoothInput
// etc.
return 0;
}
OSStatus renderCallback(void *userData, AudioUnitRenderActionFlags *actionFlags,
const AudioTimeStamp *audioTimeStamp, UInt32 busNumber,
UInt32 numFrames, AudioBufferList *buffers) {
OSStatus status = AudioUnitRender(*audioUnit, actionFlags, audioTimeStamp,
1, numFrames, buffers);
int doOutput = 0;
if(status != noErr) {
return status;
}
if(convertedSampleBuffer == NULL) {
// Lazy initialization of this buffer is necessary because we don't
// know the frame count until the first callback
convertedSampleBuffer = (float*)malloc(sizeof(float) * numFrames);
baseTime = (float)QRealTimer::getUptimeInMilliseconds();
}
SInt16 *inputFrames = (SInt16*)(buffers->mBuffers->mData);
// If your DSP code can use integers, then don't bother converting to
// floats here, as it just wastes CPU. However, most DSP algorithms rely
// on floating point, and this is especially true if you are porting a
// VST/AU to iOS.
int i;
for( i = numFrames; i < fftlength; i++ ) // Shifting buffer
x_inbuf[i - numFrames] = x_inbuf[i];
for( i = 0; i < numFrames; i++) {
x_inbuf[i + x_phase] = (float)inputFrames[i] / (float)32768;
}
if( x_phase + numFrames == fftlength )
{
x_alignment.SigProc_frontend(x_inbuf); // Signal processing front-end (FFT!)
doOutput = x_alignment.Align();
/// Output as text! In the real-time version,
// this is where we update visualisation callbacks and launch other services
if ((doOutput) & (x_netscore.isEvent(x_alignment.Position()))
&(x_alignment.lastAction()<x_alignment.Position()) )
{
// here i want to do something with my input!
}
}
else
x_phase += numFrames;
return noErr;
}
int initAudioStreams(AudioUnit *audioUnit) {
UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord;
if(AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
sizeof(UInt32), &audioCategory) != noErr) {
return 1;
}
UInt32 overrideCategory = 1;
if(AudioSessionSetProperty(kAudioSessionProperty_OverrideCategoryDefaultToSpeaker,
sizeof(UInt32), &overrideCategory) != noErr) {
// Less serious error, but you may want to handle it and bail here
}
AudioComponentDescription componentDescription;
componentDescription.componentType = kAudioUnitType_Output;
componentDescription.componentSubType = kAudioUnitSubType_RemoteIO;
componentDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
componentDescription.componentFlags = 0;
componentDescription.componentFlagsMask = 0;
AudioComponent component = AudioComponentFindNext(NULL, &componentDescription);
if(AudioComponentInstanceNew(component, audioUnit) != noErr) {
return 1;
}
UInt32 enable = 1;
if(AudioUnitSetProperty(*audioUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input, 1, &enable, sizeof(UInt32)) != noErr) {
return 1;
}
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = renderCallback; // Render function
callbackStruct.inputProcRefCon = NULL;
if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input, 0, &callbackStruct,
sizeof(AURenderCallbackStruct)) != noErr) {
return 1;
}
AudioStreamBasicDescription streamDescription;
// You might want to replace this with a different value, but keep in mind that the
// iPhone does not support all sample rates. 8kHz, 22kHz, and 44.1kHz should all work.
streamDescription.mSampleRate = 44100;
// Yes, I know you probably want floating point samples, but the iPhone isn't going
// to give you floating point data. You'll need to make the conversion by hand from
// linear PCM <-> float.
streamDescription.mFormatID = kAudioFormatLinearPCM;
// This part is important!
streamDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger |
kAudioFormatFlagsNativeEndian |
kAudioFormatFlagIsPacked;
streamDescription.mBitsPerChannel = 16;
// 1 sample per frame, will always be 2 as long as 16-bit samples are being used
streamDescription.mBytesPerFrame = 2;
streamDescription.mChannelsPerFrame = 1;
streamDescription.mBytesPerPacket = streamDescription.mBytesPerFrame *
streamDescription.mChannelsPerFrame;
// Always should be set to 1
streamDescription.mFramesPerPacket = 1;
// Always set to 0, just to be sure
streamDescription.mReserved = 0;
// Set up input stream with above properties
if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input, 0, &streamDescription, sizeof(streamDescription)) != noErr) {
return 1;
}
// Ditto for the output stream, which we will be sending the processed audio to
if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output, 1, &streamDescription, sizeof(streamDescription)) != noErr) {
return 1;
}
return 0;
}
int startAudioUnit(AudioUnit *audioUnit) {
if(AudioUnitInitialize(*audioUnit) != noErr) {
return 1;
}
if(AudioOutputUnitStart(*audioUnit) != noErr) {
return 1;
}
return 0;
}
And calling from my VC:
initAudioSession();
initAudioStreams( audioUnit);
startAudioUnit( audioUnit);
If you want only recording, no playback, simply comment out the line that sets renderCallback:
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = renderCallback; // Render function
callbackStruct.inputProcRefCon = NULL;
if(AudioUnitSetProperty(*audioUnit, kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input, 0, &callbackStruct,
sizeof(AURenderCallbackStruct)) != noErr) {
return 1;
}
Update after seeing code:
As I suspected, you're missing input callback. Add these lines:
// at top:
#define kInputBus 1
AURenderCallbackStruct callbackStruct;
/**/
callbackStruct.inputProc = &ALAudioUnit::recordingCallback;
callbackStruct.inputProcRefCon = this;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
Now in your recordingCallback:
OSStatus ALAudioUnit::recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// TODO: Use inRefCon to access our interface object to do stuff
// Then, use inNumberFrames to figure out how much data is available, and make
// that much space available in buffers in an AudioBufferList.
// Then:
// Obtain recorded samples
OSStatus status;
ALAudioUnit *pThis = reinterpret_cast<ALAudioUnit*>(inRefCon);
if (!pThis)
return noErr;
//assert (pThis->m_nMaxSliceFrames >= inNumberFrames);
pThis->recorderBufferList->GetBufferList().mBuffers[0].mDataByteSize = inNumberFrames * pThis->m_recorderSBD.mBytesPerFrame;
status = AudioUnitRender(pThis->audioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&pThis->recorderBufferList->GetBufferList());
THROW_EXCEPTION_IF_ERROR(status, "error rendering audio unit");
// If we're not playing, I don't care about the data, simply discard it
if (!pThis->playbackState || pThis->isSeeking) return noErr;
// Now, we have the samples we just read sitting in buffers in bufferList
pThis->DoStuffWithTheRecordedAudio(inNumberFrames, pThis->recorderBufferList, inTimeStamp);
return noErr;
}
Btw, I'm allocating my own buffer instead of using the one provided by AudioUnit. You might want to change those parts if you want to use AudioUnit allocated buffer.
Update:
How to allocate own buffer:
recorderBufferList = new AUBufferList();
recorderBufferList->Allocate(m_recorderSBD, m_nMaxSliceFrames);
recorderBufferList->PrepareBuffer(m_recorderSBD, m_nMaxSliceFrames);
Also, if you're doing this, tell AudioUnit to not allocate buffers:
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
You'll need to include CoreAudio utility classes
Thanks for #Mar0ux 's answer. Whoever got here looking for complete sample code doing this can take a look here:
https://code.google.com/p/ios-coreaudio-example/
I am doing a similar app working with the same code and I found that you can end playback by changing the enumeration kAudioSessionCategory_PlayAndRecord to RecordAudio
int initAudioStreams(AudioUnit *audioUnit) {
UInt32 audioCategory = kAudioSessionCategory_RecordAudio;
if(AudioSessionSetProperty(kAudioSessionProperty_AudioCategory,
sizeof(UInt32), &audioCategory) != noErr) {
return 1;
}
This stopped the feedback between mic and speaker on my hardware.