Playing PCM data using Audio Queues - ios

I have reffered to this to play a PCM file using Audio Queues.
The code is as follows:
#import "PlayPCM.h"
AudioFileID audioFile;
SInt64 inStartingPacket = 0;
AudioQueueRef audioQueue;
#implementation PlayPCM
void AudioOutputCallback(
void* inUserData,
AudioQueueRef outAQ,
AudioQueueBufferRef outBuffer)
{
AudioStreamPacketDescription* packetDescs;
UInt32 bytesRead;
UInt32 numPackets = 8000;
OSStatus status;
status = AudioFileReadPackets(audioFile,
false,
&bytesRead,
packetDescs,
inStartingPacket,
&numPackets,
outBuffer->mAudioData);
if(numPackets)
{
outBuffer->mAudioDataByteSize = bytesRead;
status = AudioQueueEnqueueBuffer(audioQueue,
outBuffer,
0,
packetDescs);
inStartingPacket += numPackets;
}
else
{
NSLog(#"number of packets = null ") ;
AudioQueueFreeBuffer(audioQueue, outBuffer);
}
}
-(id)init{
if (self = [super init]) {
}
return self;
}
- (void)setupAudioFormat
{
NSLog(#"setting format");
format.mFormatID = kAudioFormatLinearPCM;
format.mSampleRate = 44100;
format.mFramesPerPacket = 1;
format.mChannelsPerFrame = 1;
format.mBytesPerFrame = 2;
format.mBytesPerPacket = 2;
format.mBitsPerChannel = 16;
format.mFormatFlags = kLinearPCMFormatFlagIsBigEndian |
kLinearPCMFormatFlagIsSignedInteger |
kLinearPCMFormatFlagIsPacked;
}
- (void)startPlayback
{
int counter = 0;
[self setupAudioFormat];
OSStatus status;
NSString *documentsDirectory = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) objectAtIndex:0];
NSString *filePath = [documentsDirectory stringByAppendingPathComponent:# "test1.wav"];
NSLog(#"file path = %#",filePath);
//fUrl = [NSURL URLWithPath:#"file:///Users/Inscripts/Desktop/test1.wav"];
fUrl = [NSURL fileURLWithPath:filePath];
//CFURLRef fileURL = (__bridge CFURLRef)(fUrl);
CFURLRef fileURL = CFURLCreateWithString(NULL, (CFStringRef) filePath, NULL);
status = AudioFileOpenURL(fileURL, kAudioFileReadPermission, 0,&audioFile);
NSLog(#"file opening status = %d",(int)status);
if(status == 0)
{ NSLog(#"file opened");
status = AudioQueueNewOutput(&(format),
AudioOutputCallback,
(__bridge void *)(self),
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&audioQueue);
NSLog(#"audio queue create status = %d",(int)status);
if(status == 0)
{
AudioQueueAllocateBuffer(audioQueue, 1600000, &audioQueueBuffer);
AudioOutputCallback((__bridge void *)(self), audioQueue, audioQueueBuffer);
[self performSelector:#selector(startQueue) withObject:self afterDelay:50];
}
}
if(status != 0)
{
NSLog(#"failed");
// labelStatus.text = #"Play failed";
}
}
-(void)startQueue{
NSLog(#"start queue called");
OSStatus status = AudioQueueStart(audioQueue, NULL);
if(status == 0)
{
NSLog(#"ok");
// labelStatus.text = #"Playing";
}
}
test1.wav file is PCM encoded 16 bits per sample, sampling rate 44100 Hertz, stereo.
I can successfully create audio queue and read the file but all I can hear is crackling noise.
Can someone tell me what's the issue?

Is the sound really big endian data - i doubt with WAVE files.
See your format flags, and change them to use little endian data, so: !kLinearPCMFormatFlagIsBigEndian
Also consider using AudioFileOpenURLor related since that will read the actual wave format and you don't have to rely on your audio stream description.

After preparing more audio queue buffers, no more crackling noise.
please refer to apple's doc
...
/* AudioQueueAllocateBuffer(audioQueue, 1600000, &audioQueueBuffer);
AudioOutputCallback((__bridge void *)(self), audioQueue, audioQueueBuffer);*/
/* add more audio queue buffers, ex:3 */
int kNumberOfBuffers = 3;
AudioQueueBufferRef audioQueueBuffer[kNumberOfBuffers];
for (int i = 0; i<kNumberOfBuffers; i++) {
AudioQueueAllocateBuffer(audioQueue, 1600000, &audioQueueBuffer[i]);
AudioOutputCallback((__bridge void *)(self), audioQueue, audioQueueBuffer[i]);
}
[self performSelector:#selector(startQueue) withObject:self afterDelay:50];
...

Related

Audio playback with Audio Queue produces only white noise

I'm trying to simply playback an audio file in my bundle with Audio Queue.
I've got this code from a tutorial where audio was first recorded and then played back, which worked fine.
After modifying the sound file path to point at my sound file there is only white noise when playing back. I've tried different formats and played around with my audio format settings but I'm obviously missing something.
Also, I've been learning iOS with Swift, and I failed to translate this code to Swift, so I bridged it in my Swift app.
There seem to be not many examples online, the Apple example project links linked to are dead.
Any advice much appreciated!
#import "ViewController.h"
typedef NS_ENUM(NSUInteger, AudioQueueState) {
AudioQueueState_Idle,
AudioQueueState_Recording,
AudioQueueState_Playing,
};
#import AVFoundation;
#interface ViewController ()
#property AudioQueueState currentState;
#property (strong, nonatomic) NSURL *audioFileURL;
#end
#define NUM_BUFFERS 10
#implementation ViewController
void AudioOutputCallback(void *inUserData,
AudioQueueRef outAQ, // a reference to the audio queue
AudioQueueBufferRef outBuffer) { // the buffers
ViewController *viewController = (__bridge ViewController*)inUserData;
if (viewController.currentState != AudioQueueState_Playing) {
return;
}
// Read the data out of the audio file in order to fill the buffers with it.
UInt32 numBytes = 16000;
OSStatus status = AudioFileReadBytes(audioFileID, false, currentByte, &numBytes, outBuffer->mAudioData);
if (status != noErr && status != kAudioFileEndOfFileError) {
printf("Error\n");
return;
}
// If data has been read successfully tell the audio queue that the buffer is ready to play.
if (numBytes > 0) {
outBuffer->mAudioDataByteSize = numBytes;
OSStatus statusOfEnqueue = AudioQueueEnqueueBuffer(queue, outBuffer, 0, NULL);
if (statusOfEnqueue != noErr) {
printf("Error\n");
return;
}
currentByte += numBytes;
}
// Check if it's at the end of the file.
if (numBytes == 0 || status == kAudioFileEndOfFileError) {
AudioQueueStop(queue, false);
AudioFileClose(audioFileID);
viewController.currentState = AudioQueueState_Idle;
}
}
static SInt64 currentByte;
static AudioStreamBasicDescription audioFormat;
static AudioQueueRef queue;
static AudioQueueBufferRef buffers [NUM_BUFFERS];
static AudioFileID audioFileID;
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
[self setupAudio];
}
- (void) setupAudio {
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(SInt16);
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame;
self.currentState = AudioQueueState_Idle;
}
- (IBAction)playButtonPressed:(id)sender {
// Set up the audio session.
[[AVAudioSession sharedInstance] setActive:YES error:&error];
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayback error:&error];
[self startPlayback];
}
- (void) startPlayback {
NSString *resourcePath = [[NSBundle mainBundle] pathForResource:#"MyAudioFileName" ofType:#"wav"];
NSLog(#"path: %#", resourcePath);
self.audioFileURL = [NSURL fileURLWithPath:resourcePath];
currentByte = 0;
OSStatus status = AudioFileOpenURL((__bridge CFURLRef) (self.audioFileURL), kAudioFileReadPermission, kAudioFileWAVEType, &audioFileID);
status = AudioQueueNewOutput(&audioFormat, AudioOutputCallback, (__bridge void*)self, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &queue);
self.currentState = AudioQueueState_Playing;
for (int i = 0; i < NUM_BUFFERS && self.currentState == AudioQueueState_Playing; i++) {
status = AudioQueueAllocateBuffer(queue, 16000, &buffers[i]);
AudioOutputCallback((__bridge void*)self, queue, buffers[i]);
}
status = AudioQueueStart(queue, NULL);
}
- (void) stopPlayback {
self.currentState = AudioQueueState_Idle;
for (int i = 0; i < NUM_BUFFERS; i++) {
AudioQueueFreeBuffer(queue, buffers[i]);
}
AudioQueueDispose(queue, true);
AudioFileClose(audioFileID);
}
#end

Can I use AVAudioEngine to read from a file, process with an audio unit and write to a file, faster than real-time?

I am working on an iOS app that uses AVAudioEngine for various things, including recording audio to a file, applying effects to that audio using audio units, and playing back the audio with the effect applied. I use a tap to also write the output to a file. When this is done it writes to the file in real time as the audio is playing back.
Is it possible to set up an AVAudioEngine graph that reads from a file, processes the sound with an audio unit, and outputs to a file, but faster than real time (ie., as fast as the hardware can process it)? The use case for this would be to output a few minutes of audio with effects applied, and I certainly wouldn't want to wait for a few minutes for it to be processed.
Edit: here's the code that I'm using to set up the AVAudioEngine's graph, and play a sound file:
AVAudioEngine* engine = [[AVAudioEngine alloc] init];
AVAudioPlayerNode* player = [[AVAudioPlayerNode alloc] init];
[engine attachNode:player];
self.player = player;
self.engine = engine;
if (!self.distortionEffect) {
self.distortionEffect = [[AVAudioUnitDistortion alloc] init];
[self.engine attachNode:self.distortionEffect];
[self.engine connect:self.player to:self.distortionEffect format:[self.distortionEffect outputFormatForBus:0]];
AVAudioMixerNode* mixer = [self.engine mainMixerNode];
[self.engine connect:self.distortionEffect to:mixer format:[mixer outputFormatForBus:0]];
}
[self.distortionEffect loadFactoryPreset:AVAudioUnitDistortionPresetDrumsBitBrush];
NSError* error;
if (![self.engine startAndReturnError:&error]) {
NSLog(#"error: %#", error);
} else {
NSURL* fileURL = [[NSBundle mainBundle] URLForResource:#"test2" withExtension:#"mp3"];
AVAudioFile* file = [[AVAudioFile alloc] initForReading:fileURL error:&error];
if (error) {
NSLog(#"error: %#", error);
} else {
[self.player scheduleFile:file atTime:nil completionHandler:nil];
[self.player play];
}
}
The above code plays the sound in the test2.mp3 file, with the AVAudioUnitDistortionPresetDrumsBitBrush distortion preset applied, in real time.
I then modified the above code by adding these lines after [self.player play]:
[self.engine stop];
[self renderAudioAndWriteToFile];
I modified the renderAudioAndWriteToFile method that Vladimir provided so that instead of allocating a new AVAudioEngine in the first line, it simply uses self.engine that has already been set up.
However, in renderAudioAndWriteToFile, it's logging "Can not render audio unit" because AudioUnitRender is returning a status of kAudioUnitErr_Uninitialized.
Edit 2: I should mention that I'm perfectly happy to convert the AVAudioEngine code I posted to use the C apis if that would make things easier. However, I would want the code to produce the same output as the AVAudioEngine code (including the use of the factory preset shown above).
Configure your engine and player node.
Call play method for your player node.
Pause your engine.
Get an audio unit from your AVAudioOutputNode (audioEngine.outputNode)
with this method.
Render from audio unit with AudioUnitRender in cycle and write audio buffer list to file with Extended Audio File Services.
Example:
Audio engine configuration
- (void)configureAudioEngine {
self.engine = [[AVAudioEngine alloc] init];
self.playerNode = [[AVAudioPlayerNode alloc] init];
[self.engine attachNode:self.playerNode];
AVAudioUnitDistortion *distortionEffect = [[AVAudioUnitDistortion alloc] init];
[self.engine attachNode:distortionEffect];
[self.engine connect:self.playerNode to:distortionEffect format:[distortionEffect outputFormatForBus:0]];
self.mixer = [self.engine mainMixerNode];
[self.engine connect:distortionEffect to:self.mixer format:[self.mixer outputFormatForBus:0]];
[distortionEffect loadFactoryPreset:AVAudioUnitDistortionPresetDrumsBitBrush];
NSError* error;
if (![self.engine startAndReturnError:&error])
NSLog(#"Can't start engine: %#", error);
else
[self scheduleFileToPlay];
}
- (void)scheduleFileToPlay {
NSError* error;
NSURL *fileURL = [[NSBundle mainBundle] URLForResource:#"filename" withExtension:#"m4a"];
self.file = [[AVAudioFile alloc] initForReading:fileURL error:&error];
if (self.file)
[self.playerNode scheduleFile:self.file atTime:nil completionHandler:nil];
else
NSLog(#"Can't read file: %#", error);
}
Rendering methods
- (void)renderAudioAndWriteToFile {
[self.playerNode play];
[self.engine pause];
AVAudioOutputNode *outputNode = self.engine.outputNode;
AudioStreamBasicDescription const *audioDescription = [outputNode outputFormatForBus:0].streamDescription;
NSString *path = [self filePath];
ExtAudioFileRef audioFile = [self createAndSetupExtAudioFileWithASBD:audioDescription andFilePath:path];
if (!audioFile)
return;
AVURLAsset *asset = [AVURLAsset assetWithURL:self.file.url];
NSTimeInterval duration = CMTimeGetSeconds(asset.duration);
NSUInteger lengthInFrames = duration * audioDescription->mSampleRate;
const NSUInteger kBufferLength = 4096;
AudioBufferList *bufferList = AEAllocateAndInitAudioBufferList(*audioDescription, kBufferLength);
AudioTimeStamp timeStamp;
memset (&timeStamp, 0, sizeof(timeStamp));
timeStamp.mFlags = kAudioTimeStampSampleTimeValid;
OSStatus status = noErr;
for (NSUInteger i = kBufferLength; i < lengthInFrames; i += kBufferLength) {
status = [self renderToBufferList:bufferList writeToFile:audioFile bufferLength:kBufferLength timeStamp:&timeStamp];
if (status != noErr)
break;
}
if (status == noErr && timeStamp.mSampleTime < lengthInFrames) {
NSUInteger restBufferLength = (NSUInteger) (lengthInFrames - timeStamp.mSampleTime);
AudioBufferList *restBufferList = AEAllocateAndInitAudioBufferList(*audioDescription, restBufferLength);
status = [self renderToBufferList:restBufferList writeToFile:audioFile bufferLength:restBufferLength timeStamp:&timeStamp];
AEFreeAudioBufferList(restBufferList);
}
AEFreeAudioBufferList(bufferList);
ExtAudioFileDispose(audioFile);
if (status != noErr)
NSLog(#"An error has occurred");
else
NSLog(#"Finished writing to file at path: %#", path);
}
- (NSString *)filePath {
NSArray *documentsFolders =
NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *fileName = [NSString stringWithFormat:#"%#.m4a", [[NSUUID UUID] UUIDString]];
NSString *path = [documentsFolders[0] stringByAppendingPathComponent:fileName];
return path;
}
- (ExtAudioFileRef)createAndSetupExtAudioFileWithASBD:(AudioStreamBasicDescription const *)audioDescription
andFilePath:(NSString *)path {
AudioStreamBasicDescription destinationFormat;
memset(&destinationFormat, 0, sizeof(destinationFormat));
destinationFormat.mChannelsPerFrame = audioDescription->mChannelsPerFrame;
destinationFormat.mSampleRate = audioDescription->mSampleRate;
destinationFormat.mFormatID = kAudioFormatMPEG4AAC;
ExtAudioFileRef audioFile;
OSStatus status = ExtAudioFileCreateWithURL(
(__bridge CFURLRef) [NSURL fileURLWithPath:path],
kAudioFileM4AType,
&destinationFormat,
NULL,
kAudioFileFlags_EraseFile,
&audioFile
);
if (status != noErr) {
NSLog(#"Can not create ext audio file");
return nil;
}
UInt32 codecManufacturer = kAppleSoftwareAudioCodecManufacturer;
status = ExtAudioFileSetProperty(
audioFile, kExtAudioFileProperty_CodecManufacturer, sizeof(UInt32), &codecManufacturer
);
status = ExtAudioFileSetProperty(
audioFile, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), audioDescription
);
status = ExtAudioFileWriteAsync(audioFile, 0, NULL);
if (status != noErr) {
NSLog(#"Can not setup ext audio file");
return nil;
}
return audioFile;
}
- (OSStatus)renderToBufferList:(AudioBufferList *)bufferList
writeToFile:(ExtAudioFileRef)audioFile
bufferLength:(NSUInteger)bufferLength
timeStamp:(AudioTimeStamp *)timeStamp {
[self clearBufferList:bufferList];
AudioUnit outputUnit = self.engine.outputNode.audioUnit;
OSStatus status = AudioUnitRender(outputUnit, 0, timeStamp, 0, bufferLength, bufferList);
if (status != noErr) {
NSLog(#"Can not render audio unit");
return status;
}
timeStamp->mSampleTime += bufferLength;
status = ExtAudioFileWrite(audioFile, bufferLength, bufferList);
if (status != noErr)
NSLog(#"Can not write audio to file");
return status;
}
- (void)clearBufferList:(AudioBufferList *)bufferList {
for (int bufferIndex = 0; bufferIndex < bufferList->mNumberBuffers; bufferIndex++) {
memset(bufferList->mBuffers[bufferIndex].mData, 0, bufferList->mBuffers[bufferIndex].mDataByteSize);
}
}
I used some functions from this cool framework:
AudioBufferList *AEAllocateAndInitAudioBufferList(AudioStreamBasicDescription audioFormat, int frameCount) {
int numberOfBuffers = audioFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? audioFormat.mChannelsPerFrame : 1;
int channelsPerBuffer = audioFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? 1 : audioFormat.mChannelsPerFrame;
int bytesPerBuffer = audioFormat.mBytesPerFrame * frameCount;
AudioBufferList *audio = malloc(sizeof(AudioBufferList) + (numberOfBuffers-1)*sizeof(AudioBuffer));
if ( !audio ) {
return NULL;
}
audio->mNumberBuffers = numberOfBuffers;
for ( int i=0; i<numberOfBuffers; i++ ) {
if ( bytesPerBuffer > 0 ) {
audio->mBuffers[i].mData = calloc(bytesPerBuffer, 1);
if ( !audio->mBuffers[i].mData ) {
for ( int j=0; j<i; j++ ) free(audio->mBuffers[j].mData);
free(audio);
return NULL;
}
} else {
audio->mBuffers[i].mData = NULL;
}
audio->mBuffers[i].mDataByteSize = bytesPerBuffer;
audio->mBuffers[i].mNumberChannels = channelsPerBuffer;
}
return audio;
}
void AEFreeAudioBufferList(AudioBufferList *bufferList ) {
for ( int i=0; i<bufferList->mNumberBuffers; i++ ) {
if ( bufferList->mBuffers[i].mData ) free(bufferList->mBuffers[i].mData);
}
free(bufferList);
}

Playing Raw pcm audio data coming from NSStream

I am trying to play the pcm data from NSInputStream. Can anyone provide me the right approach or code to do so.
I got the Audio in StreamHasData event with following code.
uint8_t bytes[self.audioStreamReadMaxLength];
UInt32 length = [audioStream readData:bytes maxLength:self.audioStreamReadMaxLength];
Now how can i play bytes audio data in iphone?
I worked on a similar problem, and I in the end solved it.
Here is the basic of what I did. I am using a library for the sockets
The below class is responsible for getting the audio and making it available to connected clients.
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>
#interface AudioServer : NSObject <GCDAsyncSocketDelegate>
#property (nonatomic, strong)GCDAsyncSocket * serverSocket;
#property (nonatomic, strong)NSMutableArray *connectedClients;
#property (nonatomic) AudioComponentInstance audioUnit;
-(void) start;
-(void) stop;
-(void) writeDataToClients:(NSData*)data;
#end
#define kOutputBus 0
#define kInputBus 1
#import "AudioServer.h"
#import "SM_Utils.h"
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// TODO: Use inRefCon to access our interface object to do stuff
// Then, use inNumberFrames to figure out how much data is available, and make
// that much space available in buffers in an AudioBufferList.
AudioServer *server = (__bridge AudioServer*)inRefCon;
AudioBufferList bufferList;
SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);
// Then:
// Obtain recorded samples
OSStatus status;
status = AudioUnitRender(server.audioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
NSData *dataToSend = [NSData dataWithBytes:bufferList.mBuffers[0].mData length:bufferList.mBuffers[0].mDataByteSize];
[server writeDataToClients:dataToSend];
return noErr;
}
#implementation AudioServer
-(id) init
{
return [super init];
}
-(void) start
{
[UIApplication sharedApplication].idleTimerDisabled = YES;
// Create a new instance of AURemoteIO
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
AudioComponentInstanceNew(comp, &_audioUnit);
// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one));
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &one, sizeof(one));
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 32 bit floating point
AudioStreamBasicDescription audioFormat = [self getAudioDescription];
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat));
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat));
// Set the MaximumFramesPerSlice property. This property is used to describe to an audio unit the maximum number
// of samples it will be asked to produce on any single given call to AudioUnitRender
UInt32 maxFramesPerSlice = 4096;
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
// Get the property value back from AURemoteIO. We are going to use this value to allocate buffers accordingly
UInt32 propSize = sizeof(UInt32);
AudioUnitGetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, &propSize);
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = recordingCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &renderCallback, sizeof(renderCallback));
// Initialize the AURemoteIO instance
AudioUnitInitialize(_audioUnit);
AudioOutputUnitStart(_audioUnit);
_connectedClients = [[NSMutableArray alloc] init];
_serverSocket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue:dispatch_get_main_queue()];
[self startAcceptingConnections];
}
- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}
-(void) startAcceptingConnections
{
NSError *error = nil;
if(_serverSocket)
[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}
-(void)socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(_connectedClients)
[_connectedClients removeObject:sock];
}
- (void)socket:(GCDAsyncSocket *)socket didAcceptNewSocket:(GCDAsyncSocket *)newSocket {
NSLog(#"Accepted New Socket from %#:%hu", [newSocket connectedHost], [newSocket connectedPort]);
#synchronized(_connectedClients)
{
dispatch_async(dispatch_get_main_queue(), ^{
if(_connectedClients)
[_connectedClients addObject:newSocket];
});
}
NSError *error = nil;
if(_serverSocket)
[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}
-(void) writeDataToClients:(NSData *)data
{
if(_connectedClients)
{
for (GCDAsyncSocket *socket in _connectedClients) {
if([socket isConnected])
{
[socket writeData:data withTimeout:-1 tag:0];
}
else{
if([_connectedClients containsObject:socket])
[_connectedClients removeObject:socket];
}
}
}
}
-(void) stop
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}
-(void) dealloc
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}
#end
The following class is then responsible for retrieving the audio from the server and playing it
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>
#import "TPCircularBuffer.h"
#protocol AudioClientDelegate <NSObject>
-(void) connected;
-(void) animateSoundIndicator:(float) rms;
#end
#interface AudioClient : NSObject<GCDAsyncSocketDelegate>
{
NSString *ipAddress;
BOOL stopped;
}
#property (nonatomic) TPCircularBuffer circularBuffer;
#property (nonatomic) AudioComponentInstance audioUnit;
#property (nonatomic, strong) GCDAsyncSocket *socket;
#property (nonatomic, strong) id<AudioClientDelegate> delegate;
-(id) initWithDelegate:(id)delegate;
-(void) start:(NSString *)ip;
-(void) stop;
-(TPCircularBuffer *) outputShouldUseCircularBuffer;
#end
static OSStatus OutputRenderCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
AudioClient *output = (__bridge AudioClient*)inRefCon;
TPCircularBuffer *circularBuffer = [output outputShouldUseCircularBuffer];
if( !circularBuffer ){
AudioUnitSampleType *left = (AudioUnitSampleType*)ioData->mBuffers[0].mData;
for(int i = 0; i < inNumberFrames; i++ ){
left[ i ] = 0.0f;
}
return noErr;
};
int32_t bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16* outputBuffer = ioData->mBuffers[0].mData;
int32_t availableBytes;
SInt16 *sourceBuffer = TPCircularBufferTail(circularBuffer, &availableBytes);
int32_t amount = MIN(bytesToCopy,availableBytes);
memcpy(outputBuffer, sourceBuffer, amount);
TPCircularBufferConsume(circularBuffer,amount);
return noErr;
}
-(id) initWithDelegate:(id)delegate
{
if(!self)
{
self = [super init];
}
[self circularBuffer:&_circularBuffer withSize:24576*5];
_delegate = delegate;
stopped = NO;
return self;
}
-(void) start:(NSString *)ip
{
_socket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue: dispatch_get_main_queue()];
NSError *err;
ipAddress = ip;
[UIApplication sharedApplication].idleTimerDisabled = YES;
if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
{
}
[self setupAudioUnit];
}
-(void) setupAudioUnit
{
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
OSStatus status;
status = AudioComponentInstanceNew(comp, &_audioUnit);
if(status != noErr)
{
NSLog(#"Error creating AudioUnit instance");
}
// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
status = AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &one, sizeof(one));
if(status != noErr)
{
NSLog(#"Error enableling AudioUnit output bus");
}
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 16 bit int point
AudioStreamBasicDescription audioFormat = [self getAudioDescription];
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, sizeof(audioFormat));
if(status != noErr)
{
NSLog(#"Error setting audio format");
}
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = OutputRenderCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &renderCallback, sizeof(renderCallback));
if(status != noErr)
{
NSLog(#"Error setting rendering callback");
}
// Initialize the AURemoteIO instance
status = AudioUnitInitialize(_audioUnit);
if(status != noErr)
{
NSLog(#"Error initializing audio unit");
}
}
- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}
-(void) socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(!stopped)
if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
{
}
}
-(void) socket:(GCDAsyncSocket *)socket didReadData:(NSData *)data withTag:(long)tag
{
if(data.length > 0)
{
unsigned long len = [data length];
SInt16* byteData = (SInt16*)malloc(len);
memcpy(byteData, [data bytes], len);
double sum = 0.0;
for(int i = 0; i < len/2; i++) {
sum += byteData[i] * byteData[i];
}
double average = sum / len;
double rms = sqrt(average);
[_delegate animateSoundIndicator:rms];
Byte* soundData = (Byte*)malloc(len);
memcpy(soundData, [data bytes], len);
if(soundData)
{
AudioBufferList *theDataBuffer = (AudioBufferList*) malloc(sizeof(AudioBufferList) *1);
theDataBuffer->mNumberBuffers = 1;
theDataBuffer->mBuffers[0].mDataByteSize = (UInt32)len;
theDataBuffer->mBuffers[0].mNumberChannels = 1;
theDataBuffer->mBuffers[0].mData = (SInt16*)soundData;
[self appendDataToCircularBuffer:&_circularBuffer fromAudioBufferList:theDataBuffer];
}
}
[socket readDataToLength:18432 withTimeout:-1 tag:0];
}
-(void)circularBuffer:(TPCircularBuffer *)circularBuffer withSize:(int)size {
TPCircularBufferInit(circularBuffer,size);
}
-(void)appendDataToCircularBuffer:(TPCircularBuffer*)circularBuffer
fromAudioBufferList:(AudioBufferList*)audioBufferList {
TPCircularBufferProduceBytes(circularBuffer,
audioBufferList->mBuffers[0].mData,
audioBufferList->mBuffers[0].mDataByteSize);
}
-(void)freeCircularBuffer:(TPCircularBuffer *)circularBuffer {
TPCircularBufferClear(circularBuffer);
TPCircularBufferCleanup(circularBuffer);
}
-(void) socket:(GCDAsyncSocket *)socket didConnectToHost:(NSString *)host port:(uint16_t)port
{
OSStatus status = AudioOutputUnitStart(_audioUnit);
if(status != noErr)
{
NSLog(#"Error starting audio unit");
}
[socket readDataToLength:18432 withTimeout:-1 tag:0];
[_delegate connected];
}
-(TPCircularBuffer *) outputShouldUseCircularBuffer
{
return &_circularBuffer;
}
-(void) stop
{
OSStatus status = AudioOutputUnitStop(_audioUnit);
if(status != noErr)
{
NSLog(#"Error stopping audio unit");
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}
-(void) dealloc {
OSStatus status = AudioOutputUnitStop(_audioUnit);
if(status != noErr)
{
NSLog(#"Error stopping audio unit");
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}
#end
Some of the code is unique that my requirements but most of it can just be re-used, I hope this helps.
Apple has an example,doing same kind of stuff:-
void* MyGetOpenALAudioData(CFURLRef inFileURL, ALsizei *outDataSize, ALenum *outDataFormat, ALsizei* outSampleRate)
{
OSStatus err = noErr;
SInt64 theFileLengthInFrames = 0;
AudioStreamBasicDescription theFileFormat;
UInt32 thePropertySize = sizeof(theFileFormat);
ExtAudioFileRef extRef = NULL;
void* theData = NULL;
AudioStreamBasicDescription theOutputFormat;
// Open a file with ExtAudioFileOpen()
err = ExtAudioFileOpenURL(inFileURL, &extRef);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileOpenURL FAILED, Error = %ld\n", err); goto Exit; }
// Get the audio data format
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileDataFormat, &thePropertySize, &theFileFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
if (theFileFormat.mChannelsPerFrame > 2) { printf("MyGetOpenALAudioData - Unsupported Format, channel count is greater than stereo\n"); goto Exit;}
// Set the client format to 16 bit signed integer (native-endian) data
// Maintain the channel count and sample rate of the original source format
theOutputFormat.mSampleRate = theFileFormat.mSampleRate;
theOutputFormat.mChannelsPerFrame = theFileFormat.mChannelsPerFrame;
theOutputFormat.mFormatID = kAudioFormatLinearPCM;
theOutputFormat.mBytesPerPacket = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mFramesPerPacket = 1;
theOutputFormat.mBytesPerFrame = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mBitsPerChannel = 16;
theOutputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
// Set the desired client (output) data format
err = ExtAudioFileSetProperty(extRef, kExtAudioFileProperty_ClientDataFormat, sizeof(theOutputFormat), &theOutputFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileSetProperty(kExtAudioFileProperty_ClientDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
// Get the total frame count
thePropertySize = sizeof(theFileLengthInFrames);
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileLengthFrames) FAILED, Error = %ld\n", err); goto Exit; }
// Read all the data into memory
UInt32 theFramesToRead = (UInt32)theFileLengthInFrames;
UInt32 dataSize = theFramesToRead * theOutputFormat.mBytesPerFrame;;
theData = malloc(dataSize);
if (theData)
{
AudioBufferList theDataBuffer;
theDataBuffer.mNumberBuffers = 1;
theDataBuffer.mBuffers[0].mDataByteSize = dataSize;
theDataBuffer.mBuffers[0].mNumberChannels = theOutputFormat.mChannelsPerFrame;
theDataBuffer.mBuffers[0].mData = theData;
// Read the data into an AudioBufferList
err = ExtAudioFileRead(extRef, &theFramesToRead, &theDataBuffer);
if(err == noErr)
{
// success
*outDataSize = (ALsizei)dataSize;
*outDataFormat = (theOutputFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
*outSampleRate = (ALsizei)theOutputFormat.mSampleRate;
}
else
{
// failure
free (theData);
theData = NULL; // make sure to return NULL
printf("MyGetOpenALAudioData: ExtAudioFileRead FAILED, Error = %ld\n", err); goto Exit;
}
}
Exit:
// Dispose the ExtAudioFileRef, it is no longer needed
if (extRef) ExtAudioFileDispose(extRef);
return theData;
}
Find Sample Code Here,Hope this helps.

Convert audio linear pcm to mp3 ( using LAME ) with the help of AudioQueueServices example in iOS

I am new in ios developement.I am encoding a LinearPCM to MP3 in iOS.I'm trying to encode the raw PCM data from microphone to MP3 using AudioToolbox framework and Lame.And although everything seems to run fine if i record .caf format . i am getting only noise and distortions present in the encoded stream. I'm not sure that I setup AudioQueue correctly and also that I process the encoded buffer in the right wat... My code to setup audio recording:
sample project https://github.com/vecter/Audio-Queue-Services-Example
- (void)setupAudioFormat:(AudioStreamBasicDescription*)format
{
format->mSampleRate = 16000;
format->mFormatID = kAudioFormatLinearPCM;
format->mFramesPerPacket = 1;
format->mChannelsPerFrame = 1;
format->mBytesPerFrame = 2;
format->mBytesPerPacket = 2;
format->mBitsPerChannel = 16;
format->mReserved = 0;
format->mFormatFlags = kLinearPCMFormatFlagIsBigEndian |
kLinearPCMFormatFlagIsSignedInteger |
kLinearPCMFormatFlagIsPacked;
}
- (void)recordPressed:(id)sender
{
if (!playState.playing)
{
if (!recordState.recording)
{
printf("Starting recording\n");
self.mergedData =[[NSMutableData alloc] init];
[self startRecording];
}
else
{
printf("Stopping recording\n");
[self stopRecording];
}
}
else
{
printf("Can't start recording, currently playing\n");
}
}
- (void)startRecording
{
[self setupAudioFormat:&recordState.dataFormat];
recordState.currentPacket = 0;
recordState.pThis=self;
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
AudioInputCallback,
&recordState,
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&recordState.queue);
if (status == 0)
{
// Prime recording buffers with empty data
for (int i = 0; i < NUM_BUFFERS; i++)
{
AudioQueueAllocateBuffer(recordState.queue, 16000, &recordState.buffers[i]);
AudioQueueEnqueueBuffer (recordState.queue, recordState.buffers[i], 0, NULL);
}
status = AudioFileCreateWithURL(fileURL,
kAudioFileAIFFType,
&recordState.dataFormat,
kAudioFileFlags_EraseFile,
&recordState.audioFile);
gfp = lame_init();
lame_set_num_channels(gfp, 1);
lame_set_in_samplerate(gfp, recordState.dataFormat.mSampleRate);
lame_set_VBR(gfp, vbr_default);
lame_init_params(gfp);
if (status == 0)
{
recordState.recording = true;
status = AudioQueueStart(recordState.queue, NULL);
if (status == 0)
{
mergeData =[[NSMutableData alloc]init];
labelStatus.text = #"Recording";
}
}
}
if (status != 0)
{
[self stopRecording];
labelStatus.text = #"Record Failed";
}
}
- (void)stopRecording
{
recordState.recording = false;
AudioQueueStop(recordState.queue, true);
for(int i = 0; i < NUM_BUFFERS; i++)
{
AudioQueueFreeBuffer(recordState.queue, recordState.buffers[i]);
}
AudioQueueDispose(recordState.queue, true);
AudioFileClose(recordState.audioFile);
labelStatus.text = #"Idle";
}
Then the AudioQueue callback function calls to lame_encode_buffer and then writes the encoded buffer to file:
void AudioInputCallback(void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription * inPacketDescs)
{
RecordState * recordState = (RecordState*)inUserData;
if (!recordState->recording)
{
printf("Not recording, returning\n");
}
printf("Writing buffer %lld\n", recordState->currentPacket);
OSStatus status = AudioFileWritePackets(recordState->audioFile,
false,
inBuffer->mAudioDataByteSize,
inPacketDescs,
recordState->currentPacket,
&inNumberPacketDescriptions,
inBuffer->mAudioData);
if (status == 0)
{
recordState->currentPacket += inNumberPacketDescriptions;
}
AudioRecorderAppDelegate *this = recordState->pThis;
const int MP3_BUFFER_SIZE=inBuffer->mAudioDataByteSize*4;
unsigned char mEncodedBuffer[MP3_BUFFER_SIZE];
int encodedBytes=lame_encode_buffer_interleaved(this->gfp, (short int *)inBuffer->mAudioData , inNumberPacketDescriptions, mEncodedBuffer, MP3_BUFFER_SIZE);
NSData* data = [NSData dataWithBytes:mEncodedBuffer length:encodedBytes];
[this writeData:data];
lame_encode_flush(this->gfp, mEncodedBuffer, MP3_BUFFER_SIZE);
memset(&mEncodedBuffer, 0, sizeof(mEncodedBuffer));
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);
}
Appending data
- (void) writeData:(NSData *)data
{
[mergeData appendData:data];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
NSUserDomainMask, YES);
NSString* docDir = [paths objectAtIndex:0];
NSString* file = [docDir stringByAppendingString:#"/lame.mp3"];
[mergeData writeToFile:file atomically:YES];
NSLog(#"%#",file);
}
Can anybody advise what's wrong here?
else post already done sample project?
Try this
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
// NSLog(#"%f",inStartTime->mSampleTime);
try
{
if (inNumPackets > 0)
{
AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData);
aqr->mRecordPacket += inNumPackets;
int MP3_SIZE =inBuffer->mAudioDataByteSize * 4;
unsigned char mp3_buffer[MP3_SIZE];
AppDelegate *delegate =[[UIApplication sharedApplication]delegate];
lame_t lame = lame_init();
lame_set_in_samplerate(lame, 44100);
lame_set_VBR(lame, vbr_default);
lame_init_params(lame);
// int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
int encodedBytes = lame_encode_buffer(lame, (short*)inBuffer->mAudioData, (short*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes];
if (inBuffer->mAudioDataByteSize != 0) {
}
else
{
int encode=lame_encode_flush(lame, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encode];
}
lame_close(lame);
}
if (aqr->IsRunning())
{
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
} catch (CAXException e)
{
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
In my case this logic worked :
int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
NSMutableData *data1=[[NSMutableData alloc]initWithBytes:mp3_buffer length:encodedBytes];
[this writeData:data];

Audio service won't playback before AudioQueueNewInput

I need to play a beep sound when audio recording get started like iphone native memo app.
I used AudioServicesPlaySystemSound(SystemSoundID inSystemSoundID) method for playing short beep sound(Sound file is wav file).
After playing beep sound i started AudioQueueNewInput(defined in AudioToolbox framework) for recording audio.
Audio recording works fine but short beep sound is not playing.
This is my code.
typedef struct
{
AudioStreamBasicDescription dataFormat;
AudioQueueRef queue;
AudioQueueBufferRef buffers[NUM_BUFFERS];
AudioFileID audioFile;
SInt64 currentPacket;
bool recording;
} RecordState;
-(void)startRecording
{
CFBundleRef mainBundle = CFBundleGetMainBundle();
CFURLRef fileRef = CFBundleCopyResourceURL(mainBundle, CFSTR("startBeepSound"), CFSTR("wav"), NULL);
AudioServicesCreateSystemSoundID(fileRef, &startRecordingSound);
AudioServicesPlaySystemSound(startRecordingSound);
RecordState recordState;
recordState->mSampleRate = 12000.0;
recordState->mFormatID = kAudioFormatLinearPCM;
recordState->mFramesPerPacket = 1;
recordState->mChannelsPerFrame = 1;
recordState->mBytesPerFrame = 2;
recordState->mBytesPerPacket = 2;
recordState->mBitsPerChannel = 16;
recordState->mReserved = 0;
recordState->mFormatFlags = kLinearPCMFormatFlagIsBigEndian |
kLinearPCMFormatFlagIsSignedInteger |
kLinearPCMFormatFlagIsPacked;
recordState.currentPacket = 0;
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
AudioInputCallback,
&recordState,
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&recordState.queue);
if(status == 0)
{
for(int i = 0; i < NUM_BUFFERS; i++)
{
AudioQueueAllocateBuffer(recordState.queue,
16000, &recordState.buffers[i]);
AudioQueueEnqueueBuffer(recordState.queue,
recordState.buffers[i], 0, NULL);
}
status = AudioFileCreateWithURL(fileURL,
kAudioFileAIFFType,
&recordState.dataFormat,
kAudioFileFlags_EraseFile,
&recordState.audioFile);
if(status == 0)
{
recordState.recording = true;
status = AudioQueueStart(recordState.queue, NULL);
if(status == 0)
{
}
}
}
}

Resources