How do you save generated audio to a file in iOS? - ios

I have successfully generated a tone using iOS with the following code. After that, I want to save the generated tone to an audio file. How can I do this?
- (void)createToneUnit
{
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, #"Can't find default output");
// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &toneUnit);
NSAssert1(toneUnit, #"Error creating unit: %ld", err);
// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = (__bridge void *)(self);
err = AudioUnitSetProperty(toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
NSAssert1(err == noErr, #"Error setting callback: %ld", err);
// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, #"Error setting stream format: %ld", err);
}
The Render Code :
OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// Fixed amplitude is good enough for our purposes
const double amplitude = 0.25;
// Get the tone parameters out of the view controller
ViewController *viewController =
(__bridge ViewController *)inRefCon;
double theta = viewController->theta;
double theta_increment = 2.0 * M_PI * viewController->frequency / viewController->sampleRate;
// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
buffer[frame] = sin(theta) * amplitude;
theta += theta_increment;
if (theta > 2.0 * M_PI)
{
theta -= 2.0 * M_PI;
}
}
// Store the theta back in the view controller
viewController->theta = theta;
return noErr;
}
And to play the generated tone, I just :
OSErr err = AudioUnitInitialize(toneUnit);
err = AudioOutputUnitStart(toneUnit);

The extended audio file api provides an easy way to write audio files to disk.

Related

Using CMSampleTimingInfo, CMSampleBuffer and AudioBufferList from raw PCM 16000 sample rate stream

I recevie audio data and size from outside, the audio appears to be linear PCM, signed int16, but when recording this using an AssetWriter it saves to the audio file highly distorted and higher pitch.
#define kSamplingRate 16000
#define kNumberChannels 1
UInt32 framesAlreadyWritten = 0;
-(AudioStreamBasicDescription) getAudioFormat {
AudioStreamBasicDescription format;
format.mSampleRate = kSamplingRate;
format.mFormatID = kAudioFormatLinearPCM;
format.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
format.mChannelsPerFrame = 1; // mono
format.mBitsPerChannel = 16;
format.mBytesPerFrame = sizeof(SInt16);
format.mFramesPerPacket = 1;
format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket;
format.mReserved = 0;
return format;
}
- (CMSampleBufferRef)createAudioSample:(const void *)audioData frames: (UInt32)len {
AudioStreamBasicDescription asbd = [self getAudioFormat];
CMSampleBufferRef buff = NULL;
static CMFormatDescriptionRef format = NULL;
OSStatus error = 0;
if(format == NULL) {
AudioChannelLayout acl;
bzero(&acl, sizeof(acl));
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
error = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &asbd, sizeof(acl), &acl, 0, NULL, NULL, &format);
}
CMTime duration = CMTimeMake(1, kSamplingRate);
CMTime pts = CMTimeMake(framesAlreadyWritten, kSamplingRate);
NSLog(#"-----------pts");
CMTimeShow(pts);
CMSampleTimingInfo timing = {duration , pts, kCMTimeInvalid };
error = CMSampleBufferCreate(kCFAllocatorDefault, NULL, false, NULL, NULL, format, len, 1, &timing, 0, NULL, &buff);
framesAlreadyWritten += len;
if (error) {
NSLog(#"CMSampleBufferCreate returned error: %ld", (long)error);
return NULL;
}
AudioBufferList audioBufferList;
audioBufferList.mNumberBuffers = 1;
audioBufferList.mBuffers[0].mNumberChannels = asbd.mChannelsPerFrame;
audioBufferList.mBuffers[0].mDataByteSize = (UInt32)(number_of_frames * audioFormat.mBytesPerFrame);
audioBufferList.mBuffers[0].mData = audioData;
error = CMSampleBufferSetDataBufferFromAudioBufferList(buff, kCFAllocatorDefault, kCFAllocatorDefault, 0, &audioBufferList);
if(error) {
NSLog(#"CMSampleBufferSetDataBufferFromAudioBufferList returned error: %ld", (long)error);
return NULL;
}
return buff;
}
Not sure why you're dividing len by two, but your time should progress instead of being constant, something like
CMTime time = CMTimeMake(framesAlreadyWritten , kSamplingRate);

in iOS Entended Audio File Services and Streaming Audio

First Export from iPod Library m4a file.
Second Convert to AAC using Extended Audio File Services
Third transport the music file to other iPhone.
Fourth Streaming audio using Audio File Stream Services
It usually works.But the audio file playtime over about 5minitues it can't work.
However,I try 15minitues audio file mp3 and aac into my project.and streaming it.it worked.
So,I think the cause is Convert Step. Something wrong with my code ?
Could you give me some advices ?
-(void)convertFrom:(NSURL*)fromURL
toURL:(NSURL*)toURL{
ExtAudioFileRef infile,outfile;
OSStatus err;
//ExtAudioFileの作成
err = ExtAudioFileOpenURL((__bridge CFURLRef)fromURL, &infile);
checkError(err,"ExtAudioFileOpenURL");
AudioStreamBasicDescription inputFormat;
AudioStreamBasicDescription outputFormat;
AVAudioSession * audiosession =[AVAudioSession sharedInstance];
[audiosession setActive:YES error:nil];
[audiosession setCategory:AVAudioSessionCategoryAudioProcessing error:nil];
//変換するフォーマット(AAC)
memset(&outputFormat, 0, sizeof(AudioStreamBasicDescription));
outputFormat.mSampleRate = 44100.0;
outputFormat.mFormatID = kAudioFormatMPEG4AAC;//AAC
outputFormat.mChannelsPerFrame = 1;
UInt32 size = sizeof(AudioStreamBasicDescription);
AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
0, NULL,
&size,
&outputFormat);//変換後のフォーマット
err = ExtAudioFileGetProperty(infile,//変換前のファイルのプロパティを取得
kExtAudioFileProperty_FileDataFormat,
&size,
&inputFormat);
checkError(err,"ExtAudioFileGetProperty");
//リニアPCM以外からの変換であれば、リニアPCMとして読み込む
if(inputFormat.mFormatID != kAudioFormatLinearPCM){
//一旦変換するフォーマット(リニアPCM Little Endian)
AudioStreamBasicDescription linearPCMFormat;
linearPCMFormat.mSampleRate = outputFormat.mSampleRate;
linearPCMFormat.mFormatID = kAudioFormatLinearPCM;
linearPCMFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
linearPCMFormat.mFramesPerPacket = 1;
linearPCMFormat.mChannelsPerFrame = outputFormat.mChannelsPerFrame;
linearPCMFormat.mBitsPerChannel = 16;
linearPCMFormat.mBytesPerPacket = 2 * outputFormat.mChannelsPerFrame;
linearPCMFormat.mBytesPerFrame = 2 * outputFormat.mChannelsPerFrame;
linearPCMFormat.mReserved = 0;
//読み出すフォーマットをリニアPCMにする(中間フォーマット)
inputFormat = linearPCMFormat;
}
//読み込むフォーマットを設定
//必ずlinearPCMで読み出される
err = ExtAudioFileSetProperty(infile,
kExtAudioFileProperty_ClientDataFormat,
sizeof(AudioStreamBasicDescription),
&inputFormat);
checkError(err,"ExtAudioFileSetProperty");
err = ExtAudioFileCreateWithURL((__bridge CFURLRef)toURL,
kAudioFileM4AType, //AAC
&outputFormat,
NULL,
kAudioFileFlags_EraseFile,
&outfile);
checkError(err,"ExtAudioFileCreateWithURL");
//書き込むファイルに、入力がリニアPCMであることを設定
err = ExtAudioFileSetProperty(outfile,
kExtAudioFileProperty_ClientDataFormat,
sizeof(AudioStreamBasicDescription),
&inputFormat);
checkError(err,"kExtAudioFileProperty_ClientDataFormat");
//読み込み位置を0に移動
err = ExtAudioFileSeek(infile, 0);
checkError(err,"ExtAudioFileSeek");
//一度に読み込むフレーム数
UInt32 readFrameSize = 1024;
//読み込むバッファ領域を確保
UInt32 bufferByteSize = sizeof(char) * readFrameSize * inputFormat.mBytesPerPacket;
char *buffer = malloc(bufferByteSize);
//AudioBufferListの作成
AudioBufferList audioBufferList;
audioBufferList.mNumberBuffers = 1;
audioBufferList.mBuffers[0].mNumberChannels = inputFormat.mChannelsPerFrame;
audioBufferList.mBuffers[0].mDataByteSize = bufferByteSize;
audioBufferList.mBuffers[0].mData = buffer;
while(1){
UInt32 numPacketToRead = readFrameSize;
err = ExtAudioFileRead(infile, &numPacketToRead, &audioBufferList);
checkError(err,"ExtAudioFileRead");
//読み込むフレームが無くなったら終了する
if(numPacketToRead == 0){
NSLog(#"変換完了");
break;
}
err = ExtAudioFileWrite(outfile,
numPacketToRead,
&audioBufferList);
checkError(err,"ExtAudioFileWrite");
}
ExtAudioFileDispose(infile);
ExtAudioFileDispose(outfile);
free(buffer);
void propertyListenerProc(
void * inClientData,
AudioFileStreamID inAudioFileStream,
AudioFileStreamPropertyID inPropertyID,
UInt32 * ioFlags
){
StreamInfo* streamInfo = (StreamInfo*)inClientData;
OSStatus err;
NSLog(#"property%u",(unsigned int)inPropertyID);

Only play audio from array once without looping

I'm completely a beginner when it comes to audio programming and right now I'm playing around with AudioUnit. I'm following http://www.cocoawithlove.com/2010/10/ios-tone-generator-introduction-to.html and I've ported over the code to work with iOS7. The problem is that I only want it to play the generated sine wave once and not keep on playing the sound wave. I am not sure how to accomplish this though.
Generating audio samples:
OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// Fixed amplitude is good enough for our purposes
const double amplitude = 0.25;
// Get the tone parameters out of the view controller
ToneGeneratorViewController *viewController =
(ToneGeneratorViewController *)inRefCon;
double theta = viewController->theta;
double theta_increment =
2.0 * M_PI * viewController->frequency / viewController->sampleRate;
// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
buffer[frame] = sin(theta) * amplitude;
theta += theta_increment;
if (theta > 2.0 * M_PI)
{
theta -= 2.0 * M_PI;
}
}
// Store the updated theta back in the view controller
viewController->theta = theta;
return noErr;
}
Creating AudioUnit:
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, #"Can't find default output");
// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &toneUnit);
NSAssert1(toneUnit, #"Error creating unit: %ld", err);
// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = self;
err = AudioUnitSetProperty(toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
NSAssert1(err == noErr, #"Error setting callback: %ld", err);
// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, #"Error setting stream format: %ld", err);
Thanks!
The problem is that I only want it to play the generated sine wave once
What you should do is stopping the audio unit after a certain time.
You could, e.g., set an NSTimer when you call AudioOutputUnitStart and then when the timer fires, you call AudioOutputUnitStop (actually, your audio unit disposal code). Even simpler, you could use performSelector:withObject:afterDelay: and call your audio unit disposal method.
Hope this helps.

iOS AudioUnit garbage input and output callback error on writing to right channel

I'm trying to output a sine wave on the left channel and silence on the right channel of an AudioUnit. I receive the following error when trying to write zero to the right channel,
Thread 5: EXC_BAD_ACCESS(code=1, address=0x0)
The callback function where this occurs is below with the line where the error is occuring marked by the comment / **** ERROR HERE **** at the end of the line
Output Callback
static OSStatus outputCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Scope reference to GSFSensorIOController class
GSFSensorIOController *THIS = (__bridge GSFSensorIOController *) inRefCon;
// Communication out on left and right channel if new communication out
AudioSampleType *outLeftSamples = (AudioSampleType *) ioData->mBuffers[0].mData;
AudioSampleType *outRightSamples = (AudioSampleType *) ioData->mBuffers[1].mData;
// Set up power tone attributes
float freq = 20000.00f;
float sampleRate = 44100.00f;
float phase = THIS.sinPhase;
float sinSignal;
double phaseInc = 2 * M_PI * freq / sampleRate;
for (UInt32 curFrame = 0; curFrame < inNumberFrames; ++curFrame) {
// Generate power tone on left channel
sinSignal = sin(phase);
outLeftSamples[curFrame] = (SInt16) ((sinSignal * 32767.0f) /2);
outRightSamples[curFrame] = (SInt16) (0); // **** ERROR HERE ****
phase += phaseInc;
if (phase >= 2 * M_PI * freq) {
phase = phase - (2 * M_PI * freq);
}
}
// Save sine wave phase wave for next callback
THIS.sinPhase = phase;
return noErr;
}
The curFrame = 0 and outRightSamples = NULL at the time the error is thrown. This leads me to believe that I'm setting up the channels incorrectly. Here is where I set up the IO of my AudioUnit,
Audio Unit Set Up
// Audio component description
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Mono ASBD
AudioStreamBasicDescription monoStreamFormat;
monoStreamFormat.mSampleRate = 44100.00;
monoStreamFormat.mFormatID = kAudioFormatLinearPCM;
monoStreamFormat.mFormatFlags = kAudioFormatFlagsCanonical;
monoStreamFormat.mBytesPerPacket = 2;
monoStreamFormat.mBytesPerFrame = 2;
monoStreamFormat.mFramesPerPacket = 1;
monoStreamFormat.mChannelsPerFrame = 1;
monoStreamFormat.mBitsPerChannel = 16;
// Stereo ASBD
AudioStreamBasicDescription stereoStreamFormat;
stereoStreamFormat.mSampleRate = 44100.00;
stereoStreamFormat.mFormatID = kAudioFormatLinearPCM;
stereoStreamFormat.mFormatFlags = kAudioFormatFlagsCanonical;
stereoStreamFormat.mBytesPerPacket = 4;
stereoStreamFormat.mBytesPerFrame = 4;
stereoStreamFormat.mFramesPerPacket = 1;
stereoStreamFormat.mChannelsPerFrame = 2;
stereoStreamFormat.mBitsPerChannel = 16;
OSErr err;
#try {
// Get Audio units
err = AudioComponentInstanceNew(inputComponent, &_ioUnit);
NSAssert1(err == noErr, #"Error setting input component: %hd", err);
// Enable input, which is disabled by default. Output is enabled by default
UInt32 enableInput = 1;
err = AudioUnitSetProperty(_ioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&enableInput,
sizeof(enableInput));
NSAssert1(err == noErr, #"Error enable input: %hd", err);
err = AudioUnitSetProperty(_ioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&enableInput,
sizeof(enableInput));
NSAssert1(err == noErr, #"Error setting output: %hd", err);
// Apply format to input of ioUnit
err = AudioUnitSetProperty(self.ioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&monoStreamFormat,
sizeof(monoStreamFormat));
NSAssert1(err == noErr, #"Error setting input ASBD: %hd", err);
// Apply format to output of ioUnit
err = AudioUnitSetProperty(self.ioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&stereoStreamFormat,
sizeof(stereoStreamFormat));
NSAssert1(err == noErr, #"Error setting output ASBD: %hd", err);
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = inputCallback;
callbackStruct.inputProcRefCon = (__bridge void *)(self);
err = AudioUnitSetProperty(self.ioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
NSAssert1(err == noErr, #"Error setting input callback: %hd", err);
// Set output callback
callbackStruct.inputProc = outputCallback;
callbackStruct.inputProcRefCon = (__bridge void *)(self);
err = AudioUnitSetProperty(self.ioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
NSAssert1(err == noErr, #"Error setting output callback: %hd", err);
// Disable buffer allocation
UInt32 disableBufferAlloc = 0;
err = AudioUnitSetProperty(self.ioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&disableBufferAlloc,
sizeof(disableBufferAlloc));
// Allocate input buffers (1 channel, 16 bits per sample, thus 16 bits per frame and therefore 2 bytes per frame
_inBuffer.mNumberChannels = 1;
_inBuffer.mDataByteSize = 512 * 2;
_inBuffer.mData = malloc( 512 * 2 );
// Initialize audio unit
err = AudioUnitInitialize(self.ioUnit);
NSAssert1(err == noErr, #"Error initializing unit: %hd", err);
//AudioUnitInitialize(self.ioUnit);
// Start audio IO
err = AudioOutputUnitStart(self.ioUnit);
NSAssert1(err == noErr, #"Error starting unit: %hd", err);
//AudioOutputUnitStart(self.ioUnit);
}
#catch (NSException *exception) {
NSLog(#"Failed with exception: %#", exception);
}
I don't believe I'm setting up the AudioUnit correctly because I'm getting random values for my input on the mic line (ie. printing the input buffers to the command prompt gives values that do not change with ambient noise). Here's how I'm using my input callback,
Input Callback
static OSStatus inputCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// Scope reference to GSFSensorIOController class
GSFSensorIOController *THIS = (__bridge GSFSensorIOController *) inRefCon;
// Set up buffer to hold input data
AudioBuffer buffer;
buffer.mNumberChannels = 1;
buffer.mDataByteSize = inNumberFrames * 2;
buffer.mData = malloc( inNumberFrames * 2 );
// Place buffer in an AudioBufferList
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = buffer;
// Grab the samples and place them in the buffer list
AudioUnitRender(THIS.ioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
// Process data
[THIS processIO:&bufferList];
// Free allocated buffer
free(bufferList.mBuffers[0].mData);
return noErr;
}
I've searched for example projects, as a reference, and I can't see a difference in the over all implementation of functionality. Any help is greatly appreciated.
The Audio Unit default setting may be for interleaved stereo channel data rather separate buffers for left and right.
The problem here looks to be that you're writing to unallocated memory. ioData->mBuffers[1] isn't valid for interleaved format. Both left and right channels are interleaved in ioData->mBuffers[0]. If you want non-interleaved data, mBytesPerFrame and mBytesPerPacket should be 2, not 4. That's probably why you're failing on AudioUnitInitialize for that format.
It's easier to deal with setting up these formats if you use the CAStreamBasicDescription utility class. See https://developer.apple.com/library/mac/samplecode/CoreAudioUtilityClasses/Introduction/Intro.html.
Setting up AudioStreamBasicDescription would then be as easy as:
CAStreamBasicDescription stereoStreamFormat(44100.0, 2, CAStreamBasicDescription::kPCMFormatInt16, false);

How can I modify this AudioUnit code so that it has stereo output?

I can't seem to find what I'm looking for in the documentation. This code works great, but I want stereo output.
- (void)createToneUnit
{
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, #"Can't find default output");
// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &_toneUnit);
NSAssert1(_toneUnit, #"Error creating unit: %d", err);
// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = (__bridge void*)self;
err = AudioUnitSetProperty(_toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
NSAssert1(err == noErr, #"Error setting callback: %d", err);
// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = kSampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (_toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, #"Error setting stream format: %dd", err);
}
And here is the callback:
OSStatus RenderTone( void* inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
// Get the tone parameters out of the view controller
VWWSynthesizerC *synth = (__bridge VWWSynthesizerC *)inRefCon;
double theta = synth.theta;
double theta_increment = 2.0 * M_PI * synth.frequency / kSampleRate;
// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
if(synth.muted){
buffer[frame] = 0;
}
else{
switch(synth.waveType){
case VWWWaveTypeSine:{
buffer[frame] = sin(theta) * synth.amplitude;
break;
}
case VWWWaveTypeSquare:{
buffer[frame] = square(theta) * synth.amplitude;
break;
}
case VWWWaveTypeSawtooth:{
buffer[frame] = sawtooth(theta) * synth.amplitude;
break;
}
case VWWWaveTypeTriangle:{
buffer[frame] = triangle(theta) * synth.amplitude;
break;
}
default:
break;
}
}
theta += theta_increment;
if (theta > 2.0 * M_PI)
{
theta -= 2.0 * M_PI;
}
}
synth.theta = theta;
return noErr;
}
If there is a different or better way to render this data, I'm open to suggestions. I'm rendering sine, square, triangle, sawtooth, etc... waves.

Resources