AudioConverterFillComplexBuffer returns 1852797029 (kAudioCodecIllegalOperationError) - ios

I'm trying to decode aac data with AudioToolbox in iOS environment. I consulted this thread.
'AudioConverterNew' function call succeed but AudioConverterFillComplexBuffer returns error code 1852797029, kAudioCodecIllegalOperationError.
I'm trying to find my mistakes. Thank you for reading.
- (void)initAudioToolBox {
HCAudioAsset* asset = [self.provider getAudioAsset];
AudioStreamBasicDescription outFormat;
memset(&outFormat, 0, sizeof(outFormat));
outFormat.mSampleRate = 44100;
outFormat.mFormatID = kAudioFormatLinearPCM;
outFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
outFormat.mBytesPerPacket = 2;
outFormat.mFramesPerPacket = 1;
outFormat.mBytesPerFrame = 2;
outFormat.mChannelsPerFrame = 1;
outFormat.mBitsPerChannel = 16;
outFormat.mReserved = 0;
AudioStreamBasicDescription inFormat;
memset(&inFormat, 0, sizeof(inFormat));
inFormat.mSampleRate = [asset sampleRate];
inFormat.mFormatID = kAudioFormatMPEG4AAC;
inFormat.mFormatFlags = kMPEG4Object_AAC_LC;
inFormat.mBytesPerPacket = 0;
inFormat.mFramesPerPacket = (UInt32)[asset framePerPacket];
inFormat.mBytesPerFrame = 0;
inFormat.mChannelsPerFrame = (UInt32)[asset channelCount];
inFormat.mBitsPerChannel = 0;
inFormat.mReserved = 0;
OSStatus status = AudioConverterNew(&inFormat, &outFormat, &audioConverter);
if (status != noErr) {
NSLog(#"setup converter error, status: %i\n", (int)status);
} else {
NSLog(#"Audio Converter is initialized successfully.");
}
}
typedef struct _PassthroughUserData PassthroughUserData;
struct _PassthroughUserData {
UInt32 mChannels;
UInt32 mDataSize;
const void* mData;
AudioStreamPacketDescription mPacket;
};
int inInputDataProc(AudioConverterRef aAudioConverter,
UInt32* aNumDataPackets,
AudioBufferList* aData,
AudioStreamPacketDescription** aPacketDesc,
void* aUserData)
{
PassthroughUserData* userData = (PassthroughUserData*)aUserData;
if (!userData->mDataSize) {
*aNumDataPackets = 0;
NSLog(#"inInputDataProc returns -1");
return -1;
}
if (aPacketDesc) {
userData->mPacket.mStartOffset = 0;
userData->mPacket.mVariableFramesInPacket = 0;
userData->mPacket.mDataByteSize = userData->mDataSize;
NSLog(#"mDataSize:%d", userData->mDataSize);
*aPacketDesc = &userData->mPacket;
}
aData->mBuffers[0].mNumberChannels = userData->mChannels;
aData->mBuffers[0].mDataByteSize = userData->mDataSize;
aData->mBuffers[0].mData = (void*)(userData->mData);
NSLog(#"buffer[0] - channel:%d, byte size:%u, data:%p",
aData->mBuffers[0].mNumberChannels,
(unsigned int)aData->mBuffers[0].mDataByteSize,
aData->mBuffers[0].mData);
// No more data to provide following this run.
userData->mDataSize = 0;
NSLog(#"inInputDataProc returns 0");
return 0;
}
- (void)decodeAudioFrame:(NSData *)frame withPts:(NSInteger)pts{
if(!audioConverter){
[self initAudioToolBox];
}
HCAudioAsset* asset = [self.provider getAudioAsset];
PassthroughUserData userData = { (UInt32)[asset channelCount], (UInt32)frame.length, [frame bytes]};
NSMutableData *decodedData = [NSMutableData new];
const uint32_t MAX_AUDIO_FRAMES = 128;
const uint32_t maxDecodedSamples = MAX_AUDIO_FRAMES * 1;
do {
uint8_t *buffer = (uint8_t *)malloc(maxDecodedSamples * sizeof(short int));
AudioBufferList decBuffer;
memset(&decBuffer, 0, sizeof(AudioBufferList));
decBuffer.mNumberBuffers = 1;
decBuffer.mBuffers[0].mNumberChannels = 2;
decBuffer.mBuffers[0].mDataByteSize = maxDecodedSamples * sizeof(short int);
decBuffer.mBuffers[0].mData = buffer;
UInt32 numFrames = MAX_AUDIO_FRAMES;
AudioStreamPacketDescription outPacketDescription;
memset(&outPacketDescription, 0, sizeof(AudioStreamPacketDescription));
outPacketDescription.mDataByteSize = MAX_AUDIO_FRAMES;
outPacketDescription.mStartOffset = 0;
outPacketDescription.mVariableFramesInPacket = 0;
NSLog(#"frame - size:%lu, buffer:%p", [frame length], [frame bytes]);
OSStatus rv = AudioConverterFillComplexBuffer(audioConverter,
inInputDataProc,
&userData,
&numFrames,
&decBuffer,
&outPacketDescription);
NSLog(#"num frames:%d, dec buffer [0] channels:%d, dec buffer [0] data byte size:%d, rv:%d",
numFrames, decBuffer.mBuffers[0].mNumberChannels,
decBuffer.mBuffers[0].mDataByteSize, (int)rv);
if (rv && rv != noErr) {
NSLog(#"Error decoding audio stream: %d\n", rv);
break;
}
if (numFrames) {
[decodedData appendBytes:decBuffer.mBuffers[0].mData length:decBuffer.mBuffers[0].mDataByteSize];
}
} while (true);
//void *pData = (void *)[decodedData bytes];
//audioRenderer->Render(&pData, decodedData.length, pts);
}

Related

aes cbc encrypt, decrypt result is diffrent(objecitve-c, c#)

i have a code in c# for aes decryption
i want make same encryption result by objective-c
but i failed.. help me
i can fix objective-c code, what can i for this?
c# for decrypt
private static readonly string AES_KEY = "asdfasdfasdfasdf";
private static readonly int BUFFER_SIZE = 1024 * 4;
private static readonly int KEY_SIZE = 128;
private static readonly int BLOCK_SIZE = 128;
static public string Composite(string value)
{
using (AesManaged aes = new AesManaged())
using (MemoryStream ims = new MemoryStream(Convert.FromBase64String(value), false))
{
aes.KeySize = KEY_SIZE;
aes.BlockSize = BLOCK_SIZE;
aes.Mode = CipherMode.CBC;
aes.Key = Encoding.UTF8.GetBytes(AES_KEY);
byte[] iv = new byte[aes.IV.Length];
ims.Read(iv, 0, iv.Length);
aes.IV = iv;
using (ICryptoTransform ce = aes.CreateDecryptor(aes.Key, aes.IV))
using (CryptoStream cs = new CryptoStream(ims, ce, CryptoStreamMode.Read))
using (DeflateStream ds = new DeflateStream(cs, CompressionMode.Decompress))
using (MemoryStream oms = new MemoryStream())
{
byte[] buf = new byte[BUFFER_SIZE];
for (int size = ds.Read(buf, 0, buf.Length); size > 0; size = ds.Read(buf, 0, buf.Length))
{
oms.Write(buf, 0, size);
}
return Encoding.UTF8.GetString(oms.ToArray());
}
}
}
objective-c for encrypt
- (NSString *)AES128EncryptWithKey:(NSString *)key
{
NSData *plainData = [self dataUsingEncoding:NSUTF8StringEncoding];
NSData *encryptedData = [plainData AES128EncryptWithKey:key];
NSString *encryptedString = [encryptedData stringUsingEncodingBase64];
return encryptedString;
}
#import "NSData+AESCrypt.h"
#import <CommonCrypto/CommonCryptor.h>
static char encodingTable[64] =
{
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P',
'Q','R','S','T','U','V','W','X','Y','Z','a','b','c','d','e','f',
'g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v',
'w','x','y','z','0','1','2','3','4','5','6','7','8','9','+','/'
};
#implementation NSData (AESCrypt)
- (NSData *)AES128EncryptWithKey:(NSString *)key
{
// 'key' should be 16 bytes for AES128
char keyPtr[kCCKeySizeAES128 + 1]; // room for terminator (unused)
bzero( keyPtr, sizeof( keyPtr ) ); // fill with zeroes (for padding)
// fetch key data
[key getCString:keyPtr maxLength:sizeof( keyPtr ) encoding:NSUTF8StringEncoding];
NSUInteger dataLength = [self length];
//See the doc: For block ciphers, the output size will always be less than or
//equal to the input size plus the size of one block.
//That's why we need to add the size of one block here
size_t bufferSize = dataLength + kCCBlockSizeAES128;
void *buffer = malloc( bufferSize );
size_t numBytesEncrypted = 0;
CCCryptorStatus cryptStatus = CCCrypt( kCCEncrypt, kCCAlgorithmAES128, kCCModeCBC | kCCOptionPKCS7Padding,
keyPtr, kCCKeySizeAES128,
NULL /* initialization vector (optional) */,
[self bytes], dataLength, /* input */
buffer, bufferSize, /* output */
&numBytesEncrypted );
if( cryptStatus == kCCSuccess )
{
//the returned NSData takes ownership of the buffer and will free it on deallocation
return [NSData dataWithBytesNoCopy:buffer length:numBytesEncrypted];
}
free( buffer ); //free the buffer
return nil;
}
- (NSString *)base64Encoding
{
const unsigned char *bytes = [self bytes];
NSMutableString *result = [NSMutableString stringWithCapacity:self.length];
unsigned long ixtext = 0;
unsigned long lentext = self.length;
long ctremaining = 0;
unsigned char inbuf[3], outbuf[4];
unsigned short i = 0;
unsigned short charsonline = 0, ctcopy = 0;
unsigned long ix = 0;
while( YES )
{
ctremaining = lentext - ixtext;
if( ctremaining <= 0 ) break;
for( i = 0; i < 3; i++ )
{
ix = ixtext + i;
if( ix < lentext ) inbuf[i] = bytes[ix];
else inbuf [i] = 0;
}
outbuf [0] = (inbuf [0] & 0xFC) >> 2;
outbuf [1] = ((inbuf [0] & 0x03) << 4) | ((inbuf [1] & 0xF0) >> 4);
outbuf [2] = ((inbuf [1] & 0x0F) << 2) | ((inbuf [2] & 0xC0) >> 6);
outbuf [3] = inbuf [2] & 0x3F;
ctcopy = 4;
switch( ctremaining )
{
case 1:
ctcopy = 2;
break;
case 2:
ctcopy = 3;
break;
}
for( i = 0; i < ctcopy; i++ )
[result appendFormat:#"%c", encodingTable[outbuf[i]]];
for( i = ctcopy; i < 4; i++ )
[result appendString:#"="];
ixtext += 3;
charsonline += 4;
}
return [NSString stringWithString:result];
}
------------------------------------------------------------
------------------------------------------------------------

Using CMSampleTimingInfo, CMSampleBuffer and AudioBufferList from raw PCM 16000 sample rate stream

I recevie audio data and size from outside, the audio appears to be linear PCM, signed int16, but when recording this using an AssetWriter it saves to the audio file highly distorted and higher pitch.
#define kSamplingRate 16000
#define kNumberChannels 1
UInt32 framesAlreadyWritten = 0;
-(AudioStreamBasicDescription) getAudioFormat {
AudioStreamBasicDescription format;
format.mSampleRate = kSamplingRate;
format.mFormatID = kAudioFormatLinearPCM;
format.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
format.mChannelsPerFrame = 1; // mono
format.mBitsPerChannel = 16;
format.mBytesPerFrame = sizeof(SInt16);
format.mFramesPerPacket = 1;
format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket;
format.mReserved = 0;
return format;
}
- (CMSampleBufferRef)createAudioSample:(const void *)audioData frames: (UInt32)len {
AudioStreamBasicDescription asbd = [self getAudioFormat];
CMSampleBufferRef buff = NULL;
static CMFormatDescriptionRef format = NULL;
OSStatus error = 0;
if(format == NULL) {
AudioChannelLayout acl;
bzero(&acl, sizeof(acl));
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
error = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &asbd, sizeof(acl), &acl, 0, NULL, NULL, &format);
}
CMTime duration = CMTimeMake(1, kSamplingRate);
CMTime pts = CMTimeMake(framesAlreadyWritten, kSamplingRate);
NSLog(#"-----------pts");
CMTimeShow(pts);
CMSampleTimingInfo timing = {duration , pts, kCMTimeInvalid };
error = CMSampleBufferCreate(kCFAllocatorDefault, NULL, false, NULL, NULL, format, len, 1, &timing, 0, NULL, &buff);
framesAlreadyWritten += len;
if (error) {
NSLog(#"CMSampleBufferCreate returned error: %ld", (long)error);
return NULL;
}
AudioBufferList audioBufferList;
audioBufferList.mNumberBuffers = 1;
audioBufferList.mBuffers[0].mNumberChannels = asbd.mChannelsPerFrame;
audioBufferList.mBuffers[0].mDataByteSize = (UInt32)(number_of_frames * audioFormat.mBytesPerFrame);
audioBufferList.mBuffers[0].mData = audioData;
error = CMSampleBufferSetDataBufferFromAudioBufferList(buff, kCFAllocatorDefault, kCFAllocatorDefault, 0, &audioBufferList);
if(error) {
NSLog(#"CMSampleBufferSetDataBufferFromAudioBufferList returned error: %ld", (long)error);
return NULL;
}
return buff;
}
Not sure why you're dividing len by two, but your time should progress instead of being constant, something like
CMTime time = CMTimeMake(framesAlreadyWritten , kSamplingRate);

How to resemple pcm data in iOS

I want to use AudioConverterFillComplexBuffer to convert sample rate for a pcm buffer(32k to 44.1k)。But i didn't know why the voice seems changed(too many noise)。Here is the main code:
struct AudioFrame {
int samples; //number of samples in this frame. e.g. 320
int bytesPerSample; //number of bytes per sample: 2 for PCM16.
int channels; //number of channels (data are interleaved if stereo)
int samplesPerSec; //sampling rate
void* buffer; //data buffer
};
-(void)convertAudioFrame:(AudioFrame *)buffer outPutData:(unsigned char **)outPutData outPutDataSize:(UInt32 *)outPutDataSize{
if (buffer->bytesPerSample != self.unitDescription.mBitsPerChannel ||
buffer->channels != self.unitDescription.mChannelsPerFrame ||
buffer->samplesPerSec != self.unitDescription.mSampleRate){
// describe the input format's description
AudioStreamBasicDescription inputDescription = {0};
inputDescription.mFormatID = kAudioFormatLinearPCM;
inputDescription.mFormatFlags = kLinearPCMFormatFlagIsPacked | kLinearPCMFormatFlagIsSignedInteger;
inputDescription.mChannelsPerFrame = buffer->channels;
inputDescription.mSampleRate = buffer->samplesPerSec;
inputDescription.mBitsPerChannel = 16;
inputDescription.mBytesPerFrame = (inputDescription.mBitsPerChannel/8) * inputDescription.mChannelsPerFrame;
inputDescription.mFramesPerPacket = 1;
inputDescription.mBytesPerPacket = inputDescription.mBytesPerFrame;
AudioStreamBasicDescription outputDescription = {0};
outputDescription.mSampleRate = 44100;
outputDescription.mFormatID = kAudioFormatLinearPCM;
outputDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
outputDescription.mChannelsPerFrame = 1;
outputDescription.mFramesPerPacket = 1;
outputDescription.mBitsPerChannel = 16;
outputDescription.mBytesPerFrame = (outputDescription.mBitsPerChannel/8) * outputDescription.mChannelsPerFrame;
outputDescription.mBytesPerPacket = outputDescription.mBytesPerFrame;
// create an audio converter
AudioConverterRef audioConverter;
OSStatus status = AudioConverterNew(&inputDescription, &outputDescription, &audioConverter);
[self checkError:status errorMsg:#"AudioConverterNew error"];
if(!audioConverter)
{
*outPutDataSize = 0;
return;
}
UInt32 outputBytes = outputDescription.mBytesPerPacket * (buffer->samples*buffer->bytesPerSample / inputDescription.mBytesPerPacket);
unsigned char *outputBuffer = (unsigned char*)malloc(outputBytes);
memset(outputBuffer, 0, outputBytes);
AudioBuffer inputBuffer;
inputBuffer.mNumberChannels = inputDescription.mChannelsPerFrame;
inputBuffer.mDataByteSize = buffer->samples*buffer->bytesPerSample;
inputBuffer.mData = buffer->buffer;
AudioBufferList outputBufferList;
outputBufferList.mNumberBuffers = 1;
outputBufferList.mBuffers[0].mNumberChannels = outputDescription.mChannelsPerFrame;
outputBufferList.mBuffers[0].mDataByteSize = outputBytes;
outputBufferList.mBuffers[0].mData = outputBuffer;
UInt32 outputDataPacketSize = outputBytes / outputDescription.mBytesPerPacket;
self.currentBuffer = &inputBuffer;
self.currentInputDescription = inputDescription;
// convert
OSStatus result = AudioConverterFillComplexBuffer(audioConverter,
converterComplexInputDataProc,
(__bridge void*)self,
&outputDataPacketSize,
&outputBufferList,
NULL);
[self checkError:result errorMsg:#"AudioConverterConvertBuffer error"];
*outPutData = outputBuffer;
*outPutDataSize = outputBytes;
AudioConverterDispose(audioConverter);
}
}
//convert callback
OSStatus converterComplexInputDataProc(AudioConverterRef inAudioConverter,
UInt32* ioNumberDataPackets, AudioBufferList* ioData, AudioStreamPacketDescription** ioDataPacketDescription, void* inUserData){
XMMicAudioManager *self = (__bridge XMMicAudioManager *)inUserData;
ioData->mNumberBuffers = 1;
ioData->mBuffers[0] = *(self.currentBuffer);
*ioNumberDataPackets = ioData->mBuffers[0].mDataByteSize / self.currentInputDescription.mBytesPerPacket;
return 0;
}

Output is not generated AudioConverterFillComplexBuffer to convert from AAC to PCM?

Hi I am trying to convert AAC buffer to PCM using AudioConverterFillComplexBuffer..Here is my code
-(void)initDecoder{
AudioStreamBasicDescription outAudioStreamBasicDescription;
outAudioStreamBasicDescription.mSampleRate = 44100.0;
outAudioStreamBasicDescription.mFormatID = kAudioFormatLinearPCM;
outAudioStreamBasicDescription.mFormatFlags = 0xc;
outAudioStreamBasicDescription.mBytesPerPacket = 2;
outAudioStreamBasicDescription.mFramesPerPacket = 1;
outAudioStreamBasicDescription.mBytesPerFrame = 2;
outAudioStreamBasicDescription.mChannelsPerFrame = 1;
outAudioStreamBasicDescription.mBitsPerChannel = 16;
AudioStreamBasicDescription inAudioStreamBasicDescription;
inAudioStreamBasicDescription.mSampleRate = 44100;
inAudioStreamBasicDescription.mFormatID = kAudioFormatMPEG4AAC;
inAudioStreamBasicDescription.mFormatFlags = kMPEG4Object_AAC_SSR;
inAudioStreamBasicDescription.mBytesPerPacket = 0;
inAudioStreamBasicDescription.mFramesPerPacket = 1024;
inAudioStreamBasicDescription.mBytesPerFrame = 0;
inAudioStreamBasicDescription.mChannelsPerFrame = 1;
inAudioStreamBasicDescription.mBitsPerChannel = 0;
inAudioStreamBasicDescription.mReserved = 0;
AudioClassDescription audioClassDescription;
memset(&audioClassDescription, 0, sizeof(audioClassDescription));
audioClassDescription.mManufacturer = kAppleSoftwareAudioCodecManufacturer;
audioClassDescription.mSubType = outAudioStreamBasicDescription.mFormatID;
audioClassDescription.mType = kAudioFormatLinearPCM;
NSAssert(audioClassDescription.mSubType == outAudioStreamBasicDescription.mFormatID && audioClassDescription.mManufacturer == kAppleSoftwareAudioCodecManufacturer, nil);
NSAssert(AudioConverterNewSpecific(&inAudioStreamBasicDescription, &outAudioStreamBasicDescription, 1, &audioClassDescription, &audioConverterDecode) == 0, nil);
}
OSStatus inInputDataProc(AudioConverterRef inAudioConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData, AudioStreamPacketDescription **outDataPacketDescription, void *inUserData)
{
AudioBufferList audioBufferList = *(AudioBufferList *)inUserData;
ioData->mBuffers[0].mData = audioBufferList.mBuffers[0].mData;
ioData->mBuffers[0].mDataByteSize = audioBufferList.mBuffers[0].mDataByteSize;
return noErr;
}
-(void)decodeSample:(AudioBufferList)inAaudioBufferList{
//inAaudioBufferList is the AAC buffer
if (!audioConverterDecode) {
[self initDecoder];
}
NSAssert(inAaudioBufferList.mNumberBuffers == 1, nil);
uint32_t bufferSize = 1024*2;//inAaudioBufferList.mBuffers[0].mDataByteSize;
uint8_t *buffer = (uint8_t *)malloc(1024*2);
memset(buffer, 0, bufferSize);
AudioBufferList outAudioBufferList;
outAudioBufferList.mNumberBuffers = 1;
outAudioBufferList.mBuffers[0].mNumberChannels = 1;
outAudioBufferList.mBuffers[0].mDataByteSize = bufferSize;
outAudioBufferList.mBuffers[0].mData = buffer;
UInt32 ioOutputDataPacketSize = bufferSize;
OSStatus ret = AudioConverterFillComplexBuffer(audioConverterDecode, inInputDataProc, &inAaudioBufferList, &ioOutputDataPacketSize, &outAudioBufferList, NULL) ;//== 0, nil);
NSData *data = [NSData dataWithBytes:outAudioBufferList.mBuffers[0].mData length:outAudioBufferList.mBuffers[0].mDataByteSize];
DLog(#"Rev Size = %d",(unsigned int)outAudioBufferList.mBuffers[0].mDataByteSize);
free(buffer);
}
The decoded output length is zero and the OSStatus code for AudioConverterFillComplexBuffer is 561015652
So what could be wrong..?
This is a shot in the dark and you probably already found the solution to this but I believe you need to change this
UInt32 ioOutputDataPacketSize = bufferSize;
to this
UInt32 ioOutputDataPacketSize = bufferSize/2;
Personally for me I do this in the inInputDataProc method and always sent
UInt32 ioOutputDataPacketSize = 1;
into the AudioConverterFillComplexBuffer method and then set it within the inInputDataProc like this.
UInt32 maxPackets = audioBufferList.mBuffers[0].mDataByteSize / 2;
*ioNumberDataPackets = maxPackets;
Hope this helps.

How can I modify this AudioUnit code so that it has stereo output?

I can't seem to find what I'm looking for in the documentation. This code works great, but I want stereo output.
- (void)createToneUnit
{
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, #"Can't find default output");
// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &_toneUnit);
NSAssert1(_toneUnit, #"Error creating unit: %d", err);
// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = (__bridge void*)self;
err = AudioUnitSetProperty(_toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
NSAssert1(err == noErr, #"Error setting callback: %d", err);
// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = kSampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (_toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, #"Error setting stream format: %dd", err);
}
And here is the callback:
OSStatus RenderTone( void* inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
// Get the tone parameters out of the view controller
VWWSynthesizerC *synth = (__bridge VWWSynthesizerC *)inRefCon;
double theta = synth.theta;
double theta_increment = 2.0 * M_PI * synth.frequency / kSampleRate;
// This is a mono tone generator so we only need the first buffer
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
// Generate the samples
for (UInt32 frame = 0; frame < inNumberFrames; frame++)
{
if(synth.muted){
buffer[frame] = 0;
}
else{
switch(synth.waveType){
case VWWWaveTypeSine:{
buffer[frame] = sin(theta) * synth.amplitude;
break;
}
case VWWWaveTypeSquare:{
buffer[frame] = square(theta) * synth.amplitude;
break;
}
case VWWWaveTypeSawtooth:{
buffer[frame] = sawtooth(theta) * synth.amplitude;
break;
}
case VWWWaveTypeTriangle:{
buffer[frame] = triangle(theta) * synth.amplitude;
break;
}
default:
break;
}
}
theta += theta_increment;
if (theta > 2.0 * M_PI)
{
theta -= 2.0 * M_PI;
}
}
synth.theta = theta;
return noErr;
}
If there is a different or better way to render this data, I'm open to suggestions. I'm rendering sine, square, triangle, sawtooth, etc... waves.

Resources