How to create AudioBuffer/Audio from NSdata - ios

I am a beginner in streaming application, I created NSdata from AudioBuffer and i am sending the nsdata to client(receiver). But i don't know how to convert NSdata to Audio Buffer.
I am using the following code to convert AudioBuffer to NSdata (This is working good)
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
AudioStreamBasicDescription audioFormat;
memset(&audioFormat, 0, sizeof(audioFormat));
audioFormat.mSampleRate = 8000.0;
audioFormat.mFormatID = kAudioFormatiLBC;
audioFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked | kAudioFormatFlagIsAlignedHigh;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mReserved = 0;
audioFormat.mBytesPerFrame = audioFormat.mBytesPerPacket = audioFormat.mChannelsPerFrame* sizeof(SInt16);
AudioBufferList audioBufferList;
NSMutableData *data=[[NSMutableData alloc] init];
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
for( int y=0; y<audioBufferList.mNumberBuffers; y++ )
{
AudioBuffer audioBuffer = audioBufferList.mBuffers[y];
Float32 *frame = (Float32*)audioBuffer.mData;
[data appendBytes:frame length:audioBuffer.mDataByteSize];
}
}
If this is not the proper way then please help me.... thanks.

You can create NSData from the CMSampleBufferRef using the following code and then play it with AVAudioPlayer.
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
AudioBufferList audioBufferList;
NSMutableData *data= [NSMutableData data];
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
for( int y=0; y< audioBufferList.mNumberBuffers; y++ ){
AudioBuffer audioBuffer = audioBufferList.mBuffers[y];
Float32 *frame = (Float32*)audioBuffer.mData;
[data appendBytes:frame length:audioBuffer.mDataByteSize];
}
CFRelease(blockBuffer);
CFRelease(ref);
AVAudioPlayer *player = [[AVAudioPlayer alloc] initWithData:data error:nil];
[player play];
}

This is how I did it, in case anyone else is caught in the same issue. You don't need to get the data out of AudioBufferList and instead use it as it is. In order to re-create the AudioBufferList out of NSData again I need number of samples info too, So I've appended it just before the actual data.
Here's how to get data out of CMSampleBufferRef:
AudioBufferList audioBufferList;
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);
CMItemCount numSamples = CMSampleBufferGetNumSamples(sampleBuffer);
NSUInteger size = sizeof(audioBufferList);
char buffer[size + 4];
((int*)buffer)[0] = (int)numSamples;
memcpy(buffer +4, &audioBufferList, size);
//This is the Audio data.
NSData *bufferData = [NSData dataWithBytes:buffer length:size + 4];
This is how you create the AudioSampleBufferRef out of this data:
const void *buffer = [bufferData bytes];
buffer = (char *)buffer;
CMSampleBufferRef sampleBuffer = NULL;
OSStatus status = -1;
/* Format Description */
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = 0xc;
audioFormat.mBytesPerPacket= 2;
audioFormat.mFramesPerPacket= 1;
audioFormat.mBytesPerFrame= 2;
audioFormat.mChannelsPerFrame= 1;
audioFormat.mBitsPerChannel= 16;
audioFormat.mReserved= 0;
CMFormatDescriptionRef format = NULL;
status = CMAudioFormatDescriptionCreate(kCFAllocatorDefault, &audioFormat, 0, nil, 0, nil, nil, &format);
CMFormatDescriptionRef formatdes = NULL;
status = CMFormatDescriptionCreate(NULL, kCMMediaType_Audio, 'lpcm', NULL, &formatdes);
if (status != noErr)
{
NSLog(#"Error in CMAudioFormatDescriptionCreater");
return;
}
/* Create sample Buffer */
CMSampleTimingInfo timing = {.duration= CMTimeMake(1, 44100), .presentationTimeStamp= kCMTimeZero, .decodeTimeStamp= kCMTimeInvalid};
CMItemCount framesCount = ((int*)buffer)[0];
status = CMSampleBufferCreate(kCFAllocatorDefault, nil , NO,nil,nil,format, framesCount, 1, &timing, 0, nil, &sampleBuffer);
if( status != noErr)
{
NSLog(#"Error in CMSampleBufferCreate");
return;
}
/* Copy BufferList to Sample Buffer */
AudioBufferList receivedAudioBufferList;
memcpy(&receivedAudioBufferList, buffer + 4, sizeof(receivedAudioBufferList));
status = CMSampleBufferSetDataBufferFromAudioBufferList(sampleBuffer, kCFAllocatorDefault , kCFAllocatorDefault, 0, &receivedAudioBufferList);
if (status != noErr) {
NSLog(#"Error in CMSampleBufferSetDataBufferFromAudioBufferList");
return;
}
//Use your sampleBuffer.
Let me know of any questions.

This is the code I have used to convert my audio data (audio file ) into floating point representation and saved into an array.firstly I get the audio data into AudioBufferList and then get the float value of the audio data. Check the below code if it help
-(void) PrintFloatDataFromAudioFile {
NSString * name = #"Filename"; //YOUR FILE NAME
NSString * source = [[NSBundle mainBundle] pathForResource:name ofType:#"m4a"]; // SPECIFY YOUR FILE FORMAT
const char *cString = [source cStringUsingEncoding:NSASCIIStringEncoding];
CFStringRef str = CFStringCreateWithCString(
NULL,
cString,
kCFStringEncodingMacRoman
);
CFURLRef inputFileURL = CFURLCreateWithFileSystemPath(
kCFAllocatorDefault,
str,
kCFURLPOSIXPathStyle,
false
);
ExtAudioFileRef fileRef;
ExtAudioFileOpenURL(inputFileURL, &fileRef);
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100; // GIVE YOUR SAMPLING RATE
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat;
audioFormat.mBitsPerChannel = sizeof(Float32) * 8;
audioFormat.mChannelsPerFrame = 1; // Mono
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(Float32); // == sizeof(Float32)
audioFormat.mFramesPerPacket = 1;
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame; // = sizeof(Float32)
// 3) Apply audio format to the Extended Audio File
ExtAudioFileSetProperty(
fileRef,
kExtAudioFileProperty_ClientDataFormat,
sizeof (AudioStreamBasicDescription), //= audioFormat
&audioFormat);
int numSamples = 1024; //How many samples to read in at a time
UInt32 sizePerPacket = audioFormat.mBytesPerPacket; // = sizeof(Float32) = 32bytes
UInt32 packetsPerBuffer = numSamples;
UInt32 outputBufferSize = packetsPerBuffer * sizePerPacket;
// So the lvalue of outputBuffer is the memory location where we have reserved space
UInt8 *outputBuffer = (UInt8 *)malloc(sizeof(UInt8 *) * outputBufferSize);
AudioBufferList convertedData ;//= malloc(sizeof(convertedData));
convertedData.mNumberBuffers = 1; // Set this to 1 for mono
convertedData.mBuffers[0].mNumberChannels = audioFormat.mChannelsPerFrame; //also = 1
convertedData.mBuffers[0].mDataByteSize = outputBufferSize;
convertedData.mBuffers[0].mData = outputBuffer; //
UInt32 frameCount = numSamples;
float *samplesAsCArray;
int j =0;
double floatDataArray[882000] ; // SPECIFY YOUR DATA LIMIT MINE WAS 882000 , SHOULD BE EQUAL TO OR MORE THAN DATA LIMIT
while (frameCount > 0) {
ExtAudioFileRead(
fileRef,
&frameCount,
&convertedData
);
if (frameCount > 0) {
AudioBuffer audioBuffer = convertedData.mBuffers[0];
samplesAsCArray = (float *)audioBuffer.mData; // CAST YOUR mData INTO FLOAT
for (int i =0; i<1024 /*numSamples */; i++) { //YOU CAN PUT numSamples INTEAD OF 1024
floatDataArray[j] = (double)samplesAsCArray[i] ; //PUT YOUR DATA INTO FLOAT ARRAY
printf("\n%f",floatDataArray[j]); //PRINT YOUR ARRAY'S DATA IN FLOAT FORM RANGING -1 TO +1
j++;
}
}
}}

I used the following code snippet to convert NSData (in my case of 800 bytes packet, but arguably could be of any size) to AudioBufferList:
-(AudioBufferList *) getBufferListFromData: (NSData *) data
{
if (data.length > 0)
{
NSUInteger len = [data length];
//I guess you can use Byte*, void* or Float32*. I am not sure if that makes any difference.
Byte * byteData = (Byte*) malloc (len);
memcpy (byteData, [data bytes], len);
if (byteData)
{
AudioBufferList * theDataBuffer =(AudioBufferList*)malloc(sizeof(AudioBufferList) * 1);
theDataBuffer->mNumberBuffers = 1;
theDataBuffer->mBuffers[0].mDataByteSize = len;
theDataBuffer->mBuffers[0].mNumberChannels = 1;
theDataBuffer->mBuffers[0].mData = byteData;
// Read the data into an AudioBufferList
return theDataBuffer;
}
}
return nil;
}

Related

Decoding AAC ADTS to PCM (weird sound)

I'm trying to decode AAC to PCM and everything seems to be working okay. The problem is that I can hear an strange sound in my audio file (Like a snake) :)
The audio file is this one: https://www.dropbox.com/s/2a6c1m38w1cu0km/myaudiofileWrite.m4a?dl=0
my method to decode AAC ADTS to PCM is this:
-(void) audioFileReaderWithData: (NSData *) audioData {
AudioFileID refAudioFileID;
ExtAudioFileRef inputFileID;
OSStatus result = AudioFileOpenWithCallbacks((__bridge void * _Nonnull)(audioData), readProc, NULL, getSizeProc, NULL, kAudioFileAAC_ADTSType, &refAudioFileID);
if(result != noErr){
NSLog(#"problem in theAudioFileReaderWithData function: result code %i \n", result);
}
//kAudioFilePermissionsError
result = ExtAudioFileWrapAudioFileID(refAudioFileID, false, &inputFileID);
if (result != noErr){
NSLog(#"problem in theAudioFileReaderWithData function Wraping the audio FileID: result code %i \n", result);
}
// Client Audio Format Description
AudioStreamBasicDescription clientFormat =[self getAudioDescription];
//Output Audio Format Description
AudioStreamBasicDescription outputFormat =[self getAACAudioDescription];
UInt32 codec = kAppleHardwareAudioCodecManufacturer;
int size = sizeof(codec);
result = ExtAudioFileSetProperty(outputFileID,
kExtAudioFileProperty_CodecManufacturer,
size,
&codec);
if(result) printf("ExtAudioFileSetProperty %d \n", (int)result);
// create the outputFile that we're writing to here....
UInt32 outputFormatSize = sizeof(outputFormat);
result = 0;
result = AudioFormatGetProperty(kAudioFormatProperty_FormatInfo, 0, NULL, &outputFormatSize, &outputFormat);
if(result != noErr)
NSLog(#"could not set the output format with status code %i \n",result);
size = sizeof(clientFormat);
result = 0;
result = ExtAudioFileSetProperty(inputFileID, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
if(result != noErr)
NSLog(#"error on ExtAudioFileSetProperty for input File with result code %i \n", result);
size = sizeof(clientFormat);
result = 0;
result = ExtAudioFileSetProperty(outputFileID, kExtAudioFileProperty_ClientDataFormat, size, &clientFormat);
if(result != noErr)
NSLog(#"error on ExtAudioFileSetProperty for output File with result code %i \n", result);
int totalFrames = 0;
UInt32 outputFilePacketPosition = 0; //in bytes
UInt32 encodedBytes = 0;
while (1) {
UInt32 bufferByteSize = 22050 * 4 * 2;
char srcBuffer[bufferByteSize];
UInt32 numFrames = (bufferByteSize/clientFormat.mBytesPerFrame);
AudioBufferList fillBufList;
fillBufList.mNumberBuffers = 1;
fillBufList.mBuffers[0].mNumberChannels = clientFormat.mChannelsPerFrame;
fillBufList.mBuffers[0].mDataByteSize = bufferByteSize;
fillBufList.mBuffers[0].mData = srcBuffer;
result = 0;
result = ExtAudioFileRead(inputFileID, &numFrames, &fillBufList);
if (result != noErr) {
NSLog(#"Error on ExtAudioFileRead with result code %i \n", result);
totalFrames = 0;
break;
}
if (!numFrames)
break;
totalFrames = totalFrames + numFrames;
result = 0;
result = ExtAudioFileWrite(outputFileID,
numFrames,
&fillBufList);
if(result!= noErr){
NSLog(#"ExtAudioFileWrite failed with code %i \n", result);
}
encodedBytes += numFrames * clientFormat.mBytesPerFrame;
}
//Clean up
ExtAudioFileDispose(inputFileID);
//ExtAudioFileDispose(outputFileID); I'm calling this in other method
AudioFileClose(refAudioFileID);
}
static OSStatus readProc(void* clientData,
SInt64 position,
UInt32 requestCount,
void* buffer,
UInt32* actualCount)
{
NSData *inAudioData = (__bridge NSData *) clientData;
size_t dataSize = inAudioData.length;
size_t bytesToRead = 0;
if(position < dataSize) {
size_t bytesAvailable = dataSize - position;
bytesToRead = requestCount <= bytesAvailable ? requestCount : bytesAvailable;
[inAudioData getBytes: buffer range:NSMakeRange(position, bytesToRead)];
} else {
NSLog(#"data was not read \n");
bytesToRead = 0;
}
if(actualCount)
*actualCount = bytesToRead;
return noErr;
}
static SInt64 getSizeProc(void* clientData) {
NSData *inAudioData = (__bridge NSData *)(clientData);
size_t dataSize = inAudioData.length;
return dataSize;
}
formats:
- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = SAMPLE_RATE_AUDIO;
return audioDescription;
}
- (AudioStreamBasicDescription)getAACAudioDescription {
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100;
audioFormat.mFormatID = kAudioFormatMPEG4AAC;
audioFormat.mFormatFlags = kMPEG4Object_AAC_LC;
audioFormat.mBytesPerPacket= 0;
audioFormat.mFramesPerPacket= 1024;
audioFormat.mBytesPerFrame= 0;
audioFormat.mChannelsPerFrame= 1;
audioFormat.mBitsPerChannel= 0;
audioFormat.mReserved= 0;
return audioFormat;
}
Do you know what could be happening?

AudioUnitRender returns null into AudioBufferList on iPad

I'm create audio file from AVAudioEngineOutput by AudioUnitRender. On iPhone this realization works fine, but on iPad I got void audio file with right duration. Why this can happen?
Main method
NSTimeInterval duration = CMTimeGetSeconds(asset.duration);
NSUInteger lengthInFrames = (NSUInteger) (duration * audioDescription->mSampleRate);
const NSUInteger kBufferLength = 1024; //3756;
AudioBufferList *bufferList = AEAllocateAndInitAudioBufferList(*audioDescription, kBufferLength);
AudioTimeStamp timeStamp;
memset (&timeStamp, 0, sizeof(timeStamp));
timeStamp.mFlags = kAudioTimeStampSampleTimeValid;
OSStatus status = noErr;
for (NSUInteger i = kBufferLength; i < lengthInFrames; i += kBufferLength) {
status = [self renderToBufferList:bufferList writeToFile:audioFile bufferLength:kBufferLength timeStamp:&timeStamp];
if (status != noErr)
break;
}
if (status == noErr && timeStamp.mSampleTime < lengthInFrames) {
NSUInteger restBufferLength = (NSUInteger) (lengthInFrames - timeStamp.mSampleTime);
AudioBufferList *restBufferList = AEAllocateAndInitAudioBufferList(*audioDescription, (Float32)restBufferLength);
status = [self renderToBufferList:restBufferList writeToFile:audioFile bufferLength:restBufferLength timeStamp:&timeStamp];
AEFreeAudioBufferList(restBufferList);
}
SInt64 fileLengthInFrames;
UInt32 size = sizeof(SInt64);
ExtAudioFileGetProperty(audioFile, kExtAudioFileProperty_FileLengthFrames, &size, &fileLengthInFrames);
AEFreeAudioBufferList(bufferList);
ExtAudioFileDispose(audioFile);
if (status != noErr)
[self showAlertWithTitle:#"Error" message:#"See logs for details"];
else {
NSLog(#"Finished writing to file at path: %# \n File size must be %f Mb", path,(tmpData.length/1024.0)/1024.0);
[self showAlertWithTitle:#"Success!" message:#"Now you can play a result file"];
}
Allocating of buffer
AudioBufferList *AEAllocateAndInitAudioBufferList(AudioStreamBasicDescription audioFormat, int frameCount) {
int numberOfBuffers = audioFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? audioFormat.mChannelsPerFrame : 1;
int channelsPerBuffer = audioFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? 1 : audioFormat.mChannelsPerFrame;
int bytesPerBuffer = audioFormat.mBytesPerFrame * frameCount;
AudioBufferList *audio = malloc(sizeof(AudioBufferList) + (numberOfBuffers - 1) * sizeof(AudioBuffer));
if (!audio) {
return NULL;
}
audio->mNumberBuffers = numberOfBuffers;
for (int i = 0; i < numberOfBuffers; i++) {
if (bytesPerBuffer > 0) {
audio->mBuffers[i].mData = calloc(bytesPerBuffer, 1);
if (!audio->mBuffers[i].mData) {
for (int j = 0; j < i; j++) free(audio->mBuffers[j].mData);
free(audio);
return NULL;
}
} else {
audio->mBuffers[i].mData = NULL;
}
audio->mBuffers[i].mDataByteSize = bytesPerBuffer;
audio->mBuffers[i].mNumberChannels = channelsPerBuffer;
}
return audio;
}
Rendering method
- (OSStatus)renderToBufferList:(AudioBufferList *)bufferList
writeToFile:(ExtAudioFileRef)audioFile
bufferLength:(NSUInteger)bufferLength
timeStamp:(AudioTimeStamp *)timeStamp {
[self clearBufferList:bufferList];
AudioUnit outputUnit = self.engine.outputNode.audioUnit;
OSStatus status =AudioUnitRender(outputUnit, 0, timeStamp, 0, (UInt32)bufferLength, bufferList);
[tmpData appendBytes:bufferList->mBuffers[0].mData length:bufferLength];
float *data1 = bufferList->mBuffers[0].mData;
float *data2 = bufferList->mBuffers[1].mData;;
for(int i=0; i<bufferLength/4; i++)
{
//On iPad data[i]==0 and data2[i] == 0
if(data1[i]!=0||data2[i]!=0)
NSLog(#"%f - %f",data1[i],data2[i]);
}
if (status != noErr) {
NSLog(#"Can not render audio unit");
return status;
}
timeStamp->mSampleTime += bufferLength;
status = ExtAudioFileWrite(audioFile, (UInt32)bufferLength, bufferList);
if (status != noErr)
NSLog(#"Can not write audio to file");
return status;
}
Problem occurs in the Rendering method

Playing Raw pcm audio data coming from NSStream

I am trying to play the pcm data from NSInputStream. Can anyone provide me the right approach or code to do so.
I got the Audio in StreamHasData event with following code.
uint8_t bytes[self.audioStreamReadMaxLength];
UInt32 length = [audioStream readData:bytes maxLength:self.audioStreamReadMaxLength];
Now how can i play bytes audio data in iphone?
I worked on a similar problem, and I in the end solved it.
Here is the basic of what I did. I am using a library for the sockets
The below class is responsible for getting the audio and making it available to connected clients.
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>
#interface AudioServer : NSObject <GCDAsyncSocketDelegate>
#property (nonatomic, strong)GCDAsyncSocket * serverSocket;
#property (nonatomic, strong)NSMutableArray *connectedClients;
#property (nonatomic) AudioComponentInstance audioUnit;
-(void) start;
-(void) stop;
-(void) writeDataToClients:(NSData*)data;
#end
#define kOutputBus 0
#define kInputBus 1
#import "AudioServer.h"
#import "SM_Utils.h"
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// TODO: Use inRefCon to access our interface object to do stuff
// Then, use inNumberFrames to figure out how much data is available, and make
// that much space available in buffers in an AudioBufferList.
AudioServer *server = (__bridge AudioServer*)inRefCon;
AudioBufferList bufferList;
SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);
// Then:
// Obtain recorded samples
OSStatus status;
status = AudioUnitRender(server.audioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
NSData *dataToSend = [NSData dataWithBytes:bufferList.mBuffers[0].mData length:bufferList.mBuffers[0].mDataByteSize];
[server writeDataToClients:dataToSend];
return noErr;
}
#implementation AudioServer
-(id) init
{
return [super init];
}
-(void) start
{
[UIApplication sharedApplication].idleTimerDisabled = YES;
// Create a new instance of AURemoteIO
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
AudioComponentInstanceNew(comp, &_audioUnit);
// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one));
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &one, sizeof(one));
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 32 bit floating point
AudioStreamBasicDescription audioFormat = [self getAudioDescription];
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat));
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat));
// Set the MaximumFramesPerSlice property. This property is used to describe to an audio unit the maximum number
// of samples it will be asked to produce on any single given call to AudioUnitRender
UInt32 maxFramesPerSlice = 4096;
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
// Get the property value back from AURemoteIO. We are going to use this value to allocate buffers accordingly
UInt32 propSize = sizeof(UInt32);
AudioUnitGetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, &propSize);
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = recordingCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &renderCallback, sizeof(renderCallback));
// Initialize the AURemoteIO instance
AudioUnitInitialize(_audioUnit);
AudioOutputUnitStart(_audioUnit);
_connectedClients = [[NSMutableArray alloc] init];
_serverSocket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue:dispatch_get_main_queue()];
[self startAcceptingConnections];
}
- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}
-(void) startAcceptingConnections
{
NSError *error = nil;
if(_serverSocket)
[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}
-(void)socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(_connectedClients)
[_connectedClients removeObject:sock];
}
- (void)socket:(GCDAsyncSocket *)socket didAcceptNewSocket:(GCDAsyncSocket *)newSocket {
NSLog(#"Accepted New Socket from %#:%hu", [newSocket connectedHost], [newSocket connectedPort]);
#synchronized(_connectedClients)
{
dispatch_async(dispatch_get_main_queue(), ^{
if(_connectedClients)
[_connectedClients addObject:newSocket];
});
}
NSError *error = nil;
if(_serverSocket)
[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}
-(void) writeDataToClients:(NSData *)data
{
if(_connectedClients)
{
for (GCDAsyncSocket *socket in _connectedClients) {
if([socket isConnected])
{
[socket writeData:data withTimeout:-1 tag:0];
}
else{
if([_connectedClients containsObject:socket])
[_connectedClients removeObject:socket];
}
}
}
}
-(void) stop
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}
-(void) dealloc
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}
#end
The following class is then responsible for retrieving the audio from the server and playing it
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>
#import "TPCircularBuffer.h"
#protocol AudioClientDelegate <NSObject>
-(void) connected;
-(void) animateSoundIndicator:(float) rms;
#end
#interface AudioClient : NSObject<GCDAsyncSocketDelegate>
{
NSString *ipAddress;
BOOL stopped;
}
#property (nonatomic) TPCircularBuffer circularBuffer;
#property (nonatomic) AudioComponentInstance audioUnit;
#property (nonatomic, strong) GCDAsyncSocket *socket;
#property (nonatomic, strong) id<AudioClientDelegate> delegate;
-(id) initWithDelegate:(id)delegate;
-(void) start:(NSString *)ip;
-(void) stop;
-(TPCircularBuffer *) outputShouldUseCircularBuffer;
#end
static OSStatus OutputRenderCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
AudioClient *output = (__bridge AudioClient*)inRefCon;
TPCircularBuffer *circularBuffer = [output outputShouldUseCircularBuffer];
if( !circularBuffer ){
AudioUnitSampleType *left = (AudioUnitSampleType*)ioData->mBuffers[0].mData;
for(int i = 0; i < inNumberFrames; i++ ){
left[ i ] = 0.0f;
}
return noErr;
};
int32_t bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16* outputBuffer = ioData->mBuffers[0].mData;
int32_t availableBytes;
SInt16 *sourceBuffer = TPCircularBufferTail(circularBuffer, &availableBytes);
int32_t amount = MIN(bytesToCopy,availableBytes);
memcpy(outputBuffer, sourceBuffer, amount);
TPCircularBufferConsume(circularBuffer,amount);
return noErr;
}
-(id) initWithDelegate:(id)delegate
{
if(!self)
{
self = [super init];
}
[self circularBuffer:&_circularBuffer withSize:24576*5];
_delegate = delegate;
stopped = NO;
return self;
}
-(void) start:(NSString *)ip
{
_socket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue: dispatch_get_main_queue()];
NSError *err;
ipAddress = ip;
[UIApplication sharedApplication].idleTimerDisabled = YES;
if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
{
}
[self setupAudioUnit];
}
-(void) setupAudioUnit
{
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
OSStatus status;
status = AudioComponentInstanceNew(comp, &_audioUnit);
if(status != noErr)
{
NSLog(#"Error creating AudioUnit instance");
}
// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
status = AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &one, sizeof(one));
if(status != noErr)
{
NSLog(#"Error enableling AudioUnit output bus");
}
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 16 bit int point
AudioStreamBasicDescription audioFormat = [self getAudioDescription];
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, sizeof(audioFormat));
if(status != noErr)
{
NSLog(#"Error setting audio format");
}
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = OutputRenderCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &renderCallback, sizeof(renderCallback));
if(status != noErr)
{
NSLog(#"Error setting rendering callback");
}
// Initialize the AURemoteIO instance
status = AudioUnitInitialize(_audioUnit);
if(status != noErr)
{
NSLog(#"Error initializing audio unit");
}
}
- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}
-(void) socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(!stopped)
if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
{
}
}
-(void) socket:(GCDAsyncSocket *)socket didReadData:(NSData *)data withTag:(long)tag
{
if(data.length > 0)
{
unsigned long len = [data length];
SInt16* byteData = (SInt16*)malloc(len);
memcpy(byteData, [data bytes], len);
double sum = 0.0;
for(int i = 0; i < len/2; i++) {
sum += byteData[i] * byteData[i];
}
double average = sum / len;
double rms = sqrt(average);
[_delegate animateSoundIndicator:rms];
Byte* soundData = (Byte*)malloc(len);
memcpy(soundData, [data bytes], len);
if(soundData)
{
AudioBufferList *theDataBuffer = (AudioBufferList*) malloc(sizeof(AudioBufferList) *1);
theDataBuffer->mNumberBuffers = 1;
theDataBuffer->mBuffers[0].mDataByteSize = (UInt32)len;
theDataBuffer->mBuffers[0].mNumberChannels = 1;
theDataBuffer->mBuffers[0].mData = (SInt16*)soundData;
[self appendDataToCircularBuffer:&_circularBuffer fromAudioBufferList:theDataBuffer];
}
}
[socket readDataToLength:18432 withTimeout:-1 tag:0];
}
-(void)circularBuffer:(TPCircularBuffer *)circularBuffer withSize:(int)size {
TPCircularBufferInit(circularBuffer,size);
}
-(void)appendDataToCircularBuffer:(TPCircularBuffer*)circularBuffer
fromAudioBufferList:(AudioBufferList*)audioBufferList {
TPCircularBufferProduceBytes(circularBuffer,
audioBufferList->mBuffers[0].mData,
audioBufferList->mBuffers[0].mDataByteSize);
}
-(void)freeCircularBuffer:(TPCircularBuffer *)circularBuffer {
TPCircularBufferClear(circularBuffer);
TPCircularBufferCleanup(circularBuffer);
}
-(void) socket:(GCDAsyncSocket *)socket didConnectToHost:(NSString *)host port:(uint16_t)port
{
OSStatus status = AudioOutputUnitStart(_audioUnit);
if(status != noErr)
{
NSLog(#"Error starting audio unit");
}
[socket readDataToLength:18432 withTimeout:-1 tag:0];
[_delegate connected];
}
-(TPCircularBuffer *) outputShouldUseCircularBuffer
{
return &_circularBuffer;
}
-(void) stop
{
OSStatus status = AudioOutputUnitStop(_audioUnit);
if(status != noErr)
{
NSLog(#"Error stopping audio unit");
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}
-(void) dealloc {
OSStatus status = AudioOutputUnitStop(_audioUnit);
if(status != noErr)
{
NSLog(#"Error stopping audio unit");
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}
#end
Some of the code is unique that my requirements but most of it can just be re-used, I hope this helps.
Apple has an example,doing same kind of stuff:-
void* MyGetOpenALAudioData(CFURLRef inFileURL, ALsizei *outDataSize, ALenum *outDataFormat, ALsizei* outSampleRate)
{
OSStatus err = noErr;
SInt64 theFileLengthInFrames = 0;
AudioStreamBasicDescription theFileFormat;
UInt32 thePropertySize = sizeof(theFileFormat);
ExtAudioFileRef extRef = NULL;
void* theData = NULL;
AudioStreamBasicDescription theOutputFormat;
// Open a file with ExtAudioFileOpen()
err = ExtAudioFileOpenURL(inFileURL, &extRef);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileOpenURL FAILED, Error = %ld\n", err); goto Exit; }
// Get the audio data format
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileDataFormat, &thePropertySize, &theFileFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
if (theFileFormat.mChannelsPerFrame > 2) { printf("MyGetOpenALAudioData - Unsupported Format, channel count is greater than stereo\n"); goto Exit;}
// Set the client format to 16 bit signed integer (native-endian) data
// Maintain the channel count and sample rate of the original source format
theOutputFormat.mSampleRate = theFileFormat.mSampleRate;
theOutputFormat.mChannelsPerFrame = theFileFormat.mChannelsPerFrame;
theOutputFormat.mFormatID = kAudioFormatLinearPCM;
theOutputFormat.mBytesPerPacket = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mFramesPerPacket = 1;
theOutputFormat.mBytesPerFrame = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mBitsPerChannel = 16;
theOutputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
// Set the desired client (output) data format
err = ExtAudioFileSetProperty(extRef, kExtAudioFileProperty_ClientDataFormat, sizeof(theOutputFormat), &theOutputFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileSetProperty(kExtAudioFileProperty_ClientDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
// Get the total frame count
thePropertySize = sizeof(theFileLengthInFrames);
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileLengthFrames) FAILED, Error = %ld\n", err); goto Exit; }
// Read all the data into memory
UInt32 theFramesToRead = (UInt32)theFileLengthInFrames;
UInt32 dataSize = theFramesToRead * theOutputFormat.mBytesPerFrame;;
theData = malloc(dataSize);
if (theData)
{
AudioBufferList theDataBuffer;
theDataBuffer.mNumberBuffers = 1;
theDataBuffer.mBuffers[0].mDataByteSize = dataSize;
theDataBuffer.mBuffers[0].mNumberChannels = theOutputFormat.mChannelsPerFrame;
theDataBuffer.mBuffers[0].mData = theData;
// Read the data into an AudioBufferList
err = ExtAudioFileRead(extRef, &theFramesToRead, &theDataBuffer);
if(err == noErr)
{
// success
*outDataSize = (ALsizei)dataSize;
*outDataFormat = (theOutputFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
*outSampleRate = (ALsizei)theOutputFormat.mSampleRate;
}
else
{
// failure
free (theData);
theData = NULL; // make sure to return NULL
printf("MyGetOpenALAudioData: ExtAudioFileRead FAILED, Error = %ld\n", err); goto Exit;
}
}
Exit:
// Dispose the ExtAudioFileRef, it is no longer needed
if (extRef) ExtAudioFileDispose(extRef);
return theData;
}
Find Sample Code Here,Hope this helps.

Playing PCM data using Audio Queues

I have reffered to this to play a PCM file using Audio Queues.
The code is as follows:
#import "PlayPCM.h"
AudioFileID audioFile;
SInt64 inStartingPacket = 0;
AudioQueueRef audioQueue;
#implementation PlayPCM
void AudioOutputCallback(
void* inUserData,
AudioQueueRef outAQ,
AudioQueueBufferRef outBuffer)
{
AudioStreamPacketDescription* packetDescs;
UInt32 bytesRead;
UInt32 numPackets = 8000;
OSStatus status;
status = AudioFileReadPackets(audioFile,
false,
&bytesRead,
packetDescs,
inStartingPacket,
&numPackets,
outBuffer->mAudioData);
if(numPackets)
{
outBuffer->mAudioDataByteSize = bytesRead;
status = AudioQueueEnqueueBuffer(audioQueue,
outBuffer,
0,
packetDescs);
inStartingPacket += numPackets;
}
else
{
NSLog(#"number of packets = null ") ;
AudioQueueFreeBuffer(audioQueue, outBuffer);
}
}
-(id)init{
if (self = [super init]) {
}
return self;
}
- (void)setupAudioFormat
{
NSLog(#"setting format");
format.mFormatID = kAudioFormatLinearPCM;
format.mSampleRate = 44100;
format.mFramesPerPacket = 1;
format.mChannelsPerFrame = 1;
format.mBytesPerFrame = 2;
format.mBytesPerPacket = 2;
format.mBitsPerChannel = 16;
format.mFormatFlags = kLinearPCMFormatFlagIsBigEndian |
kLinearPCMFormatFlagIsSignedInteger |
kLinearPCMFormatFlagIsPacked;
}
- (void)startPlayback
{
int counter = 0;
[self setupAudioFormat];
OSStatus status;
NSString *documentsDirectory = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) objectAtIndex:0];
NSString *filePath = [documentsDirectory stringByAppendingPathComponent:# "test1.wav"];
NSLog(#"file path = %#",filePath);
//fUrl = [NSURL URLWithPath:#"file:///Users/Inscripts/Desktop/test1.wav"];
fUrl = [NSURL fileURLWithPath:filePath];
//CFURLRef fileURL = (__bridge CFURLRef)(fUrl);
CFURLRef fileURL = CFURLCreateWithString(NULL, (CFStringRef) filePath, NULL);
status = AudioFileOpenURL(fileURL, kAudioFileReadPermission, 0,&audioFile);
NSLog(#"file opening status = %d",(int)status);
if(status == 0)
{ NSLog(#"file opened");
status = AudioQueueNewOutput(&(format),
AudioOutputCallback,
(__bridge void *)(self),
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&audioQueue);
NSLog(#"audio queue create status = %d",(int)status);
if(status == 0)
{
AudioQueueAllocateBuffer(audioQueue, 1600000, &audioQueueBuffer);
AudioOutputCallback((__bridge void *)(self), audioQueue, audioQueueBuffer);
[self performSelector:#selector(startQueue) withObject:self afterDelay:50];
}
}
if(status != 0)
{
NSLog(#"failed");
// labelStatus.text = #"Play failed";
}
}
-(void)startQueue{
NSLog(#"start queue called");
OSStatus status = AudioQueueStart(audioQueue, NULL);
if(status == 0)
{
NSLog(#"ok");
// labelStatus.text = #"Playing";
}
}
test1.wav file is PCM encoded 16 bits per sample, sampling rate 44100 Hertz, stereo.
I can successfully create audio queue and read the file but all I can hear is crackling noise.
Can someone tell me what's the issue?
Is the sound really big endian data - i doubt with WAVE files.
See your format flags, and change them to use little endian data, so: !kLinearPCMFormatFlagIsBigEndian
Also consider using AudioFileOpenURLor related since that will read the actual wave format and you don't have to rely on your audio stream description.
After preparing more audio queue buffers, no more crackling noise.
please refer to apple's doc
...
/* AudioQueueAllocateBuffer(audioQueue, 1600000, &audioQueueBuffer);
AudioOutputCallback((__bridge void *)(self), audioQueue, audioQueueBuffer);*/
/* add more audio queue buffers, ex:3 */
int kNumberOfBuffers = 3;
AudioQueueBufferRef audioQueueBuffer[kNumberOfBuffers];
for (int i = 0; i<kNumberOfBuffers; i++) {
AudioQueueAllocateBuffer(audioQueue, 1600000, &audioQueueBuffer[i]);
AudioOutputCallback((__bridge void *)(self), audioQueue, audioQueueBuffer[i]);
}
[self performSelector:#selector(startQueue) withObject:self afterDelay:50];
...

Convert audio linear pcm to mp3 ( using LAME ) with the help of AudioQueueServices example in iOS

I am new in ios developement.I am encoding a LinearPCM to MP3 in iOS.I'm trying to encode the raw PCM data from microphone to MP3 using AudioToolbox framework and Lame.And although everything seems to run fine if i record .caf format . i am getting only noise and distortions present in the encoded stream. I'm not sure that I setup AudioQueue correctly and also that I process the encoded buffer in the right wat... My code to setup audio recording:
sample project https://github.com/vecter/Audio-Queue-Services-Example
- (void)setupAudioFormat:(AudioStreamBasicDescription*)format
{
format->mSampleRate = 16000;
format->mFormatID = kAudioFormatLinearPCM;
format->mFramesPerPacket = 1;
format->mChannelsPerFrame = 1;
format->mBytesPerFrame = 2;
format->mBytesPerPacket = 2;
format->mBitsPerChannel = 16;
format->mReserved = 0;
format->mFormatFlags = kLinearPCMFormatFlagIsBigEndian |
kLinearPCMFormatFlagIsSignedInteger |
kLinearPCMFormatFlagIsPacked;
}
- (void)recordPressed:(id)sender
{
if (!playState.playing)
{
if (!recordState.recording)
{
printf("Starting recording\n");
self.mergedData =[[NSMutableData alloc] init];
[self startRecording];
}
else
{
printf("Stopping recording\n");
[self stopRecording];
}
}
else
{
printf("Can't start recording, currently playing\n");
}
}
- (void)startRecording
{
[self setupAudioFormat:&recordState.dataFormat];
recordState.currentPacket = 0;
recordState.pThis=self;
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
AudioInputCallback,
&recordState,
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&recordState.queue);
if (status == 0)
{
// Prime recording buffers with empty data
for (int i = 0; i < NUM_BUFFERS; i++)
{
AudioQueueAllocateBuffer(recordState.queue, 16000, &recordState.buffers[i]);
AudioQueueEnqueueBuffer (recordState.queue, recordState.buffers[i], 0, NULL);
}
status = AudioFileCreateWithURL(fileURL,
kAudioFileAIFFType,
&recordState.dataFormat,
kAudioFileFlags_EraseFile,
&recordState.audioFile);
gfp = lame_init();
lame_set_num_channels(gfp, 1);
lame_set_in_samplerate(gfp, recordState.dataFormat.mSampleRate);
lame_set_VBR(gfp, vbr_default);
lame_init_params(gfp);
if (status == 0)
{
recordState.recording = true;
status = AudioQueueStart(recordState.queue, NULL);
if (status == 0)
{
mergeData =[[NSMutableData alloc]init];
labelStatus.text = #"Recording";
}
}
}
if (status != 0)
{
[self stopRecording];
labelStatus.text = #"Record Failed";
}
}
- (void)stopRecording
{
recordState.recording = false;
AudioQueueStop(recordState.queue, true);
for(int i = 0; i < NUM_BUFFERS; i++)
{
AudioQueueFreeBuffer(recordState.queue, recordState.buffers[i]);
}
AudioQueueDispose(recordState.queue, true);
AudioFileClose(recordState.audioFile);
labelStatus.text = #"Idle";
}
Then the AudioQueue callback function calls to lame_encode_buffer and then writes the encoded buffer to file:
void AudioInputCallback(void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription * inPacketDescs)
{
RecordState * recordState = (RecordState*)inUserData;
if (!recordState->recording)
{
printf("Not recording, returning\n");
}
printf("Writing buffer %lld\n", recordState->currentPacket);
OSStatus status = AudioFileWritePackets(recordState->audioFile,
false,
inBuffer->mAudioDataByteSize,
inPacketDescs,
recordState->currentPacket,
&inNumberPacketDescriptions,
inBuffer->mAudioData);
if (status == 0)
{
recordState->currentPacket += inNumberPacketDescriptions;
}
AudioRecorderAppDelegate *this = recordState->pThis;
const int MP3_BUFFER_SIZE=inBuffer->mAudioDataByteSize*4;
unsigned char mEncodedBuffer[MP3_BUFFER_SIZE];
int encodedBytes=lame_encode_buffer_interleaved(this->gfp, (short int *)inBuffer->mAudioData , inNumberPacketDescriptions, mEncodedBuffer, MP3_BUFFER_SIZE);
NSData* data = [NSData dataWithBytes:mEncodedBuffer length:encodedBytes];
[this writeData:data];
lame_encode_flush(this->gfp, mEncodedBuffer, MP3_BUFFER_SIZE);
memset(&mEncodedBuffer, 0, sizeof(mEncodedBuffer));
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);
}
Appending data
- (void) writeData:(NSData *)data
{
[mergeData appendData:data];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
NSUserDomainMask, YES);
NSString* docDir = [paths objectAtIndex:0];
NSString* file = [docDir stringByAppendingString:#"/lame.mp3"];
[mergeData writeToFile:file atomically:YES];
NSLog(#"%#",file);
}
Can anybody advise what's wrong here?
else post already done sample project?
Try this
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
// NSLog(#"%f",inStartTime->mSampleTime);
try
{
if (inNumPackets > 0)
{
AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData);
aqr->mRecordPacket += inNumPackets;
int MP3_SIZE =inBuffer->mAudioDataByteSize * 4;
unsigned char mp3_buffer[MP3_SIZE];
AppDelegate *delegate =[[UIApplication sharedApplication]delegate];
lame_t lame = lame_init();
lame_set_in_samplerate(lame, 44100);
lame_set_VBR(lame, vbr_default);
lame_init_params(lame);
// int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
int encodedBytes = lame_encode_buffer(lame, (short*)inBuffer->mAudioData, (short*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes];
if (inBuffer->mAudioDataByteSize != 0) {
}
else
{
int encode=lame_encode_flush(lame, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encode];
}
lame_close(lame);
}
if (aqr->IsRunning())
{
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
} catch (CAXException e)
{
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
In my case this logic worked :
int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
NSMutableData *data1=[[NSMutableData alloc]initWithBytes:mp3_buffer length:encodedBytes];
[this writeData:data];

Resources