I need to play a beep sound when audio recording get started like iphone native memo app.
I used AudioServicesPlaySystemSound(SystemSoundID inSystemSoundID) method for playing short beep sound(Sound file is wav file).
After playing beep sound i started AudioQueueNewInput(defined in AudioToolbox framework) for recording audio.
Audio recording works fine but short beep sound is not playing.
This is my code.
typedef struct
{
AudioStreamBasicDescription dataFormat;
AudioQueueRef queue;
AudioQueueBufferRef buffers[NUM_BUFFERS];
AudioFileID audioFile;
SInt64 currentPacket;
bool recording;
} RecordState;
-(void)startRecording
{
CFBundleRef mainBundle = CFBundleGetMainBundle();
CFURLRef fileRef = CFBundleCopyResourceURL(mainBundle, CFSTR("startBeepSound"), CFSTR("wav"), NULL);
AudioServicesCreateSystemSoundID(fileRef, &startRecordingSound);
AudioServicesPlaySystemSound(startRecordingSound);
RecordState recordState;
recordState->mSampleRate = 12000.0;
recordState->mFormatID = kAudioFormatLinearPCM;
recordState->mFramesPerPacket = 1;
recordState->mChannelsPerFrame = 1;
recordState->mBytesPerFrame = 2;
recordState->mBytesPerPacket = 2;
recordState->mBitsPerChannel = 16;
recordState->mReserved = 0;
recordState->mFormatFlags = kLinearPCMFormatFlagIsBigEndian |
kLinearPCMFormatFlagIsSignedInteger |
kLinearPCMFormatFlagIsPacked;
recordState.currentPacket = 0;
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
AudioInputCallback,
&recordState,
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&recordState.queue);
if(status == 0)
{
for(int i = 0; i < NUM_BUFFERS; i++)
{
AudioQueueAllocateBuffer(recordState.queue,
16000, &recordState.buffers[i]);
AudioQueueEnqueueBuffer(recordState.queue,
recordState.buffers[i], 0, NULL);
}
status = AudioFileCreateWithURL(fileURL,
kAudioFileAIFFType,
&recordState.dataFormat,
kAudioFileFlags_EraseFile,
&recordState.audioFile);
if(status == 0)
{
recordState.recording = true;
status = AudioQueueStart(recordState.queue, NULL);
if(status == 0)
{
}
}
}
}
Related
I'm trying to simply playback an audio file in my bundle with Audio Queue.
I've got this code from a tutorial where audio was first recorded and then played back, which worked fine.
After modifying the sound file path to point at my sound file there is only white noise when playing back. I've tried different formats and played around with my audio format settings but I'm obviously missing something.
Also, I've been learning iOS with Swift, and I failed to translate this code to Swift, so I bridged it in my Swift app.
There seem to be not many examples online, the Apple example project links linked to are dead.
Any advice much appreciated!
#import "ViewController.h"
typedef NS_ENUM(NSUInteger, AudioQueueState) {
AudioQueueState_Idle,
AudioQueueState_Recording,
AudioQueueState_Playing,
};
#import AVFoundation;
#interface ViewController ()
#property AudioQueueState currentState;
#property (strong, nonatomic) NSURL *audioFileURL;
#end
#define NUM_BUFFERS 10
#implementation ViewController
void AudioOutputCallback(void *inUserData,
AudioQueueRef outAQ, // a reference to the audio queue
AudioQueueBufferRef outBuffer) { // the buffers
ViewController *viewController = (__bridge ViewController*)inUserData;
if (viewController.currentState != AudioQueueState_Playing) {
return;
}
// Read the data out of the audio file in order to fill the buffers with it.
UInt32 numBytes = 16000;
OSStatus status = AudioFileReadBytes(audioFileID, false, currentByte, &numBytes, outBuffer->mAudioData);
if (status != noErr && status != kAudioFileEndOfFileError) {
printf("Error\n");
return;
}
// If data has been read successfully tell the audio queue that the buffer is ready to play.
if (numBytes > 0) {
outBuffer->mAudioDataByteSize = numBytes;
OSStatus statusOfEnqueue = AudioQueueEnqueueBuffer(queue, outBuffer, 0, NULL);
if (statusOfEnqueue != noErr) {
printf("Error\n");
return;
}
currentByte += numBytes;
}
// Check if it's at the end of the file.
if (numBytes == 0 || status == kAudioFileEndOfFileError) {
AudioQueueStop(queue, false);
AudioFileClose(audioFileID);
viewController.currentState = AudioQueueState_Idle;
}
}
static SInt64 currentByte;
static AudioStreamBasicDescription audioFormat;
static AudioQueueRef queue;
static AudioQueueBufferRef buffers [NUM_BUFFERS];
static AudioFileID audioFileID;
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
[self setupAudio];
}
- (void) setupAudio {
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(SInt16);
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame;
self.currentState = AudioQueueState_Idle;
}
- (IBAction)playButtonPressed:(id)sender {
// Set up the audio session.
[[AVAudioSession sharedInstance] setActive:YES error:&error];
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayback error:&error];
[self startPlayback];
}
- (void) startPlayback {
NSString *resourcePath = [[NSBundle mainBundle] pathForResource:#"MyAudioFileName" ofType:#"wav"];
NSLog(#"path: %#", resourcePath);
self.audioFileURL = [NSURL fileURLWithPath:resourcePath];
currentByte = 0;
OSStatus status = AudioFileOpenURL((__bridge CFURLRef) (self.audioFileURL), kAudioFileReadPermission, kAudioFileWAVEType, &audioFileID);
status = AudioQueueNewOutput(&audioFormat, AudioOutputCallback, (__bridge void*)self, CFRunLoopGetCurrent(), kCFRunLoopCommonModes, 0, &queue);
self.currentState = AudioQueueState_Playing;
for (int i = 0; i < NUM_BUFFERS && self.currentState == AudioQueueState_Playing; i++) {
status = AudioQueueAllocateBuffer(queue, 16000, &buffers[i]);
AudioOutputCallback((__bridge void*)self, queue, buffers[i]);
}
status = AudioQueueStart(queue, NULL);
}
- (void) stopPlayback {
self.currentState = AudioQueueState_Idle;
for (int i = 0; i < NUM_BUFFERS; i++) {
AudioQueueFreeBuffer(queue, buffers[i]);
}
AudioQueueDispose(queue, true);
AudioFileClose(audioFileID);
}
#end
I'm create audio file from AVAudioEngineOutput by AudioUnitRender. On iPhone this realization works fine, but on iPad I got void audio file with right duration. Why this can happen?
Main method
NSTimeInterval duration = CMTimeGetSeconds(asset.duration);
NSUInteger lengthInFrames = (NSUInteger) (duration * audioDescription->mSampleRate);
const NSUInteger kBufferLength = 1024; //3756;
AudioBufferList *bufferList = AEAllocateAndInitAudioBufferList(*audioDescription, kBufferLength);
AudioTimeStamp timeStamp;
memset (&timeStamp, 0, sizeof(timeStamp));
timeStamp.mFlags = kAudioTimeStampSampleTimeValid;
OSStatus status = noErr;
for (NSUInteger i = kBufferLength; i < lengthInFrames; i += kBufferLength) {
status = [self renderToBufferList:bufferList writeToFile:audioFile bufferLength:kBufferLength timeStamp:&timeStamp];
if (status != noErr)
break;
}
if (status == noErr && timeStamp.mSampleTime < lengthInFrames) {
NSUInteger restBufferLength = (NSUInteger) (lengthInFrames - timeStamp.mSampleTime);
AudioBufferList *restBufferList = AEAllocateAndInitAudioBufferList(*audioDescription, (Float32)restBufferLength);
status = [self renderToBufferList:restBufferList writeToFile:audioFile bufferLength:restBufferLength timeStamp:&timeStamp];
AEFreeAudioBufferList(restBufferList);
}
SInt64 fileLengthInFrames;
UInt32 size = sizeof(SInt64);
ExtAudioFileGetProperty(audioFile, kExtAudioFileProperty_FileLengthFrames, &size, &fileLengthInFrames);
AEFreeAudioBufferList(bufferList);
ExtAudioFileDispose(audioFile);
if (status != noErr)
[self showAlertWithTitle:#"Error" message:#"See logs for details"];
else {
NSLog(#"Finished writing to file at path: %# \n File size must be %f Mb", path,(tmpData.length/1024.0)/1024.0);
[self showAlertWithTitle:#"Success!" message:#"Now you can play a result file"];
}
Allocating of buffer
AudioBufferList *AEAllocateAndInitAudioBufferList(AudioStreamBasicDescription audioFormat, int frameCount) {
int numberOfBuffers = audioFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? audioFormat.mChannelsPerFrame : 1;
int channelsPerBuffer = audioFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved ? 1 : audioFormat.mChannelsPerFrame;
int bytesPerBuffer = audioFormat.mBytesPerFrame * frameCount;
AudioBufferList *audio = malloc(sizeof(AudioBufferList) + (numberOfBuffers - 1) * sizeof(AudioBuffer));
if (!audio) {
return NULL;
}
audio->mNumberBuffers = numberOfBuffers;
for (int i = 0; i < numberOfBuffers; i++) {
if (bytesPerBuffer > 0) {
audio->mBuffers[i].mData = calloc(bytesPerBuffer, 1);
if (!audio->mBuffers[i].mData) {
for (int j = 0; j < i; j++) free(audio->mBuffers[j].mData);
free(audio);
return NULL;
}
} else {
audio->mBuffers[i].mData = NULL;
}
audio->mBuffers[i].mDataByteSize = bytesPerBuffer;
audio->mBuffers[i].mNumberChannels = channelsPerBuffer;
}
return audio;
}
Rendering method
- (OSStatus)renderToBufferList:(AudioBufferList *)bufferList
writeToFile:(ExtAudioFileRef)audioFile
bufferLength:(NSUInteger)bufferLength
timeStamp:(AudioTimeStamp *)timeStamp {
[self clearBufferList:bufferList];
AudioUnit outputUnit = self.engine.outputNode.audioUnit;
OSStatus status =AudioUnitRender(outputUnit, 0, timeStamp, 0, (UInt32)bufferLength, bufferList);
[tmpData appendBytes:bufferList->mBuffers[0].mData length:bufferLength];
float *data1 = bufferList->mBuffers[0].mData;
float *data2 = bufferList->mBuffers[1].mData;;
for(int i=0; i<bufferLength/4; i++)
{
//On iPad data[i]==0 and data2[i] == 0
if(data1[i]!=0||data2[i]!=0)
NSLog(#"%f - %f",data1[i],data2[i]);
}
if (status != noErr) {
NSLog(#"Can not render audio unit");
return status;
}
timeStamp->mSampleTime += bufferLength;
status = ExtAudioFileWrite(audioFile, (UInt32)bufferLength, bufferList);
if (status != noErr)
NSLog(#"Can not write audio to file");
return status;
}
Problem occurs in the Rendering method
I am using this real time pitch detection program:
https://github.com/fotock/PitchDetectorExample/tree/1c68491f9c9bff2e851f5711c47e1efe4092f4de
For my purposes it work very well; it has a frequency label and when you sing a pitch the label registers a frequency and when you sing a little higher pitch the frequency label increases.
The problem is when I am NOT singing into the microphone, the frequency label still registers a frequency, usual around 70Hz, but it jumps up to 200Hz sometimes even when the mic is off.
Is there a way to have the microphone only turn on when the volume / DB is loud enough? An event listener that would only trigger when the mic receives a preset amplitude. Basically, I need an audio gate, if the DB is low, just line noise, then the mic is off.
Here is the pitch detection code from the above app - unabridged. I tried to no avail, to add vDSP code to this code and read the amplitude of the incoming frequency to turn the mic on and off.
PitchDetector.m
#import "PitchDetector.h"
#import <Accelerate/Accelerate.h>
#define PD_SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(v) ([[[UIDevice currentDevice] systemVersion] compare:v options:NSNumericSearch] != NSOrderedAscending)
#implementation PitchDetector
#synthesize lowBoundFrequency, hiBoundFrequency, sampleRate, delegate, running;
#pragma mark Initialize Methods
-(id) initWithSampleRate: (float) rate andDelegate: (id<PitchDetectorDelegate>) initDelegate {
return [self initWithSampleRate:rate lowBoundFreq:40 hiBoundFreq:4500 andDelegate:initDelegate];
}
-(id) initWithSampleRate: (float) rate lowBoundFreq: (int) low hiBoundFreq: (int) hi andDelegate: (id<PitchDetectorDelegate>) initDelegate {
self.lowBoundFrequency = low;
self.hiBoundFrequency = hi;
self.sampleRate = rate;
self.delegate = initDelegate;
bufferLength = self.sampleRate/self.lowBoundFrequency;
hann = (float*) malloc(sizeof(float)*bufferLength);
vDSP_hann_window(hann, bufferLength, vDSP_HANN_NORM);
sampleBuffer = (SInt16*) malloc(512);
samplesInSampleBuffer = 0;
result = (float*) malloc(sizeof(float)*bufferLength);
return self;
}
#pragma mark Insert Samples
- (void) addSamples:(SInt16 *)samples inNumberFrames:(int)frames {
int newLength = frames;
if(samplesInSampleBuffer>0) {
newLength += samplesInSampleBuffer;
}
SInt16 *newBuffer = (SInt16*) malloc(sizeof(SInt16)*newLength);
memcpy(newBuffer, sampleBuffer, samplesInSampleBuffer*sizeof(SInt16));
memcpy(&newBuffer[samplesInSampleBuffer], samples, frames*sizeof(SInt16));
free(sampleBuffer);
sampleBuffer = newBuffer;
samplesInSampleBuffer = newLength;
if(samplesInSampleBuffer>(self.sampleRate/self.lowBoundFrequency)) {
if(!self.running) {
[self performSelectorInBackground:#selector(performWithNumFrames:) withObject:[NSNumber numberWithInt:newLength]];
self.running = YES;
}
samplesInSampleBuffer = 0;
} else {
//printf("NOT ENOUGH SAMPLES: %d\n", newLength);
}
}
#pragma mark Perform Auto Correlation
-(void) performWithNumFrames: (NSNumber*) numFrames;
{
int n = numFrames.intValue;
float freq = 0;
SInt16 *samples;
if (PD_SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(#"7.1")) {
#synchronized(self) {
samples = malloc(sizeof(SInt16)*numFrames.intValue);
memcpy(&samples, &sampleBuffer, sizeof(samples));
}
} else {
samples = sampleBuffer;
}
int returnIndex = 0;
float sum;
bool goingUp = false;
float normalize = 0;
for(int i = 0; i<n; i++) {
sum = 0;
for(int j = 0; j<n; j++) {
sum += (samples[j]*samples[j+i])*hann[j];
}
if(i ==0 ) normalize = sum;
result[i] = sum/normalize;
}
for(int i = 0; i<n-8; i++) {
if(result[i]<0) {
i+=2; // no peaks below 0, skip forward at a faster rate
} else {
if(result[i]>result[i-1] && goingUp == false && i >1) {
//local min at i-1
goingUp = true;
} else if(goingUp == true && result[i]<result[i-1]) {
//local max at i-1
if(returnIndex==0 && result[i-1]>result[0]*0.95) {
returnIndex = i-1;
break;
//############### NOTE ##################################
// My implemenation breaks out of this loop when it finds the first peak.
// This is (probably) the greatest source of error, so if you would like to
// improve this algorithm, start here. the next else if() will trigger on
// future local maxima (if you first take out the break; above this paragraph)
//#######################################################
} else if(result[i-1]>result[0]*0.85) {
}
goingUp = false;
}
}
}
freq =self.sampleRate/interp(result[returnIndex-1], result[returnIndex], result[returnIndex+1], returnIndex);
if(freq >= self.lowBoundFrequency && freq <= self.hiBoundFrequency) {
dispatch_async(dispatch_get_main_queue(), ^{
[delegate updatedPitch:freq];
});
}
self.running = NO;
}
float interp(float y1, float y2, float y3, int k);
float interp(float y1, float y2, float y3, int k) {
float d, kp;
d = (y3 - y1) / (2 * (2 * y2 - y1 - y3));
//printf("%f = %d + %f\n", k+d, k, d);
kp = k + d;
return kp;
}
#end
Here is the AudioControll.m class that initializes that microphone - unabridged. Looking at other examples of turning the mic on and off according to DB, I tried many frameworks like Audio Queue Services and the Accelerate Framework to no avail.
AudioControll.m
#import "AudioController.h"
#define kOutputBus 0
#define kInputBus 1
#implementation AudioController
#synthesize rioUnit, audioFormat, delegate;
+ (AudioController *) sharedAudioManager
{
static AudioController *sharedAudioManager;
#synchronized(self)
{
if (!sharedAudioManager) {
sharedAudioManager = [[AudioController alloc] init];
[sharedAudioManager startAudio];
}
return sharedAudioManager;
}
}
void checkStatus(OSStatus status);
void checkStatus(OSStatus status) {
if(status!=0)
printf("Error: %ld\n", status);
}
#pragma mark init
- (id)init
{
OSStatus status;
status = AudioSessionInitialize(NULL, NULL, NULL, (__bridge void*) self);
checkStatus(status);
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &rioUnit);
checkStatus(status);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(rioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Describe format
audioFormat.mSampleRate= 44100.0;
audioFormat.mFormatID= kAudioFormatLinearPCM;
audioFormat.mFormatFlags= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket= 1;
audioFormat.mChannelsPerFrame= 1;
audioFormat.mBitsPerChannel= 16;
audioFormat.mBytesPerPacket= 2;
audioFormat.mBytesPerFrame= 2;
// Apply format
status = AudioUnitSetProperty(rioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
status = AudioUnitSetProperty(rioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
status = AudioUnitSetProperty(rioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Disable buffer allocation for the recorder
flag = 0;
status = AudioUnitSetProperty(rioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Global, kInputBus, &flag, sizeof(flag));
// Initialise
UInt32 category = kAudioSessionCategory_PlayAndRecord;
status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
checkStatus(status);
status = 0;
status = AudioSessionSetActive(YES);
checkStatus(status);
status = AudioUnitInitialize(rioUnit);
checkStatus(status);
return self;
}
#pragma mark Recording Callback
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
AudioController *THIS = (__bridge AudioController*) inRefCon;
THIS->bufferList.mNumberBuffers = 1;
THIS->bufferList.mBuffers[0].mDataByteSize = sizeof(SInt16)*inNumberFrames;
THIS->bufferList.mBuffers[0].mNumberChannels = 1;
THIS->bufferList.mBuffers[0].mData = (SInt16*) malloc(sizeof(SInt16)*inNumberFrames);
OSStatus status;
status = AudioUnitRender(THIS->rioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&(THIS->bufferList));
checkStatus(status);
dispatch_async(dispatch_get_main_queue(), ^{
[THIS.delegate receivedAudioSamples:(SInt16*)THIS->bufferList.mBuffers[0].mData length:inNumberFrames];
});
return noErr;
}
-(void) startAudio
{
OSStatus status = AudioOutputUnitStart(rioUnit);
checkStatus(status);
printf("Audio Initialized - sampleRate: %f\n", audioFormat.mSampleRate);
}
#end
I have reffered to this to play a PCM file using Audio Queues.
The code is as follows:
#import "PlayPCM.h"
AudioFileID audioFile;
SInt64 inStartingPacket = 0;
AudioQueueRef audioQueue;
#implementation PlayPCM
void AudioOutputCallback(
void* inUserData,
AudioQueueRef outAQ,
AudioQueueBufferRef outBuffer)
{
AudioStreamPacketDescription* packetDescs;
UInt32 bytesRead;
UInt32 numPackets = 8000;
OSStatus status;
status = AudioFileReadPackets(audioFile,
false,
&bytesRead,
packetDescs,
inStartingPacket,
&numPackets,
outBuffer->mAudioData);
if(numPackets)
{
outBuffer->mAudioDataByteSize = bytesRead;
status = AudioQueueEnqueueBuffer(audioQueue,
outBuffer,
0,
packetDescs);
inStartingPacket += numPackets;
}
else
{
NSLog(#"number of packets = null ") ;
AudioQueueFreeBuffer(audioQueue, outBuffer);
}
}
-(id)init{
if (self = [super init]) {
}
return self;
}
- (void)setupAudioFormat
{
NSLog(#"setting format");
format.mFormatID = kAudioFormatLinearPCM;
format.mSampleRate = 44100;
format.mFramesPerPacket = 1;
format.mChannelsPerFrame = 1;
format.mBytesPerFrame = 2;
format.mBytesPerPacket = 2;
format.mBitsPerChannel = 16;
format.mFormatFlags = kLinearPCMFormatFlagIsBigEndian |
kLinearPCMFormatFlagIsSignedInteger |
kLinearPCMFormatFlagIsPacked;
}
- (void)startPlayback
{
int counter = 0;
[self setupAudioFormat];
OSStatus status;
NSString *documentsDirectory = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES) objectAtIndex:0];
NSString *filePath = [documentsDirectory stringByAppendingPathComponent:# "test1.wav"];
NSLog(#"file path = %#",filePath);
//fUrl = [NSURL URLWithPath:#"file:///Users/Inscripts/Desktop/test1.wav"];
fUrl = [NSURL fileURLWithPath:filePath];
//CFURLRef fileURL = (__bridge CFURLRef)(fUrl);
CFURLRef fileURL = CFURLCreateWithString(NULL, (CFStringRef) filePath, NULL);
status = AudioFileOpenURL(fileURL, kAudioFileReadPermission, 0,&audioFile);
NSLog(#"file opening status = %d",(int)status);
if(status == 0)
{ NSLog(#"file opened");
status = AudioQueueNewOutput(&(format),
AudioOutputCallback,
(__bridge void *)(self),
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&audioQueue);
NSLog(#"audio queue create status = %d",(int)status);
if(status == 0)
{
AudioQueueAllocateBuffer(audioQueue, 1600000, &audioQueueBuffer);
AudioOutputCallback((__bridge void *)(self), audioQueue, audioQueueBuffer);
[self performSelector:#selector(startQueue) withObject:self afterDelay:50];
}
}
if(status != 0)
{
NSLog(#"failed");
// labelStatus.text = #"Play failed";
}
}
-(void)startQueue{
NSLog(#"start queue called");
OSStatus status = AudioQueueStart(audioQueue, NULL);
if(status == 0)
{
NSLog(#"ok");
// labelStatus.text = #"Playing";
}
}
test1.wav file is PCM encoded 16 bits per sample, sampling rate 44100 Hertz, stereo.
I can successfully create audio queue and read the file but all I can hear is crackling noise.
Can someone tell me what's the issue?
Is the sound really big endian data - i doubt with WAVE files.
See your format flags, and change them to use little endian data, so: !kLinearPCMFormatFlagIsBigEndian
Also consider using AudioFileOpenURLor related since that will read the actual wave format and you don't have to rely on your audio stream description.
After preparing more audio queue buffers, no more crackling noise.
please refer to apple's doc
...
/* AudioQueueAllocateBuffer(audioQueue, 1600000, &audioQueueBuffer);
AudioOutputCallback((__bridge void *)(self), audioQueue, audioQueueBuffer);*/
/* add more audio queue buffers, ex:3 */
int kNumberOfBuffers = 3;
AudioQueueBufferRef audioQueueBuffer[kNumberOfBuffers];
for (int i = 0; i<kNumberOfBuffers; i++) {
AudioQueueAllocateBuffer(audioQueue, 1600000, &audioQueueBuffer[i]);
AudioOutputCallback((__bridge void *)(self), audioQueue, audioQueueBuffer[i]);
}
[self performSelector:#selector(startQueue) withObject:self afterDelay:50];
...
I am new in ios developement.I am encoding a LinearPCM to MP3 in iOS.I'm trying to encode the raw PCM data from microphone to MP3 using AudioToolbox framework and Lame.And although everything seems to run fine if i record .caf format . i am getting only noise and distortions present in the encoded stream. I'm not sure that I setup AudioQueue correctly and also that I process the encoded buffer in the right wat... My code to setup audio recording:
sample project https://github.com/vecter/Audio-Queue-Services-Example
- (void)setupAudioFormat:(AudioStreamBasicDescription*)format
{
format->mSampleRate = 16000;
format->mFormatID = kAudioFormatLinearPCM;
format->mFramesPerPacket = 1;
format->mChannelsPerFrame = 1;
format->mBytesPerFrame = 2;
format->mBytesPerPacket = 2;
format->mBitsPerChannel = 16;
format->mReserved = 0;
format->mFormatFlags = kLinearPCMFormatFlagIsBigEndian |
kLinearPCMFormatFlagIsSignedInteger |
kLinearPCMFormatFlagIsPacked;
}
- (void)recordPressed:(id)sender
{
if (!playState.playing)
{
if (!recordState.recording)
{
printf("Starting recording\n");
self.mergedData =[[NSMutableData alloc] init];
[self startRecording];
}
else
{
printf("Stopping recording\n");
[self stopRecording];
}
}
else
{
printf("Can't start recording, currently playing\n");
}
}
- (void)startRecording
{
[self setupAudioFormat:&recordState.dataFormat];
recordState.currentPacket = 0;
recordState.pThis=self;
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
AudioInputCallback,
&recordState,
CFRunLoopGetCurrent(),
kCFRunLoopCommonModes,
0,
&recordState.queue);
if (status == 0)
{
// Prime recording buffers with empty data
for (int i = 0; i < NUM_BUFFERS; i++)
{
AudioQueueAllocateBuffer(recordState.queue, 16000, &recordState.buffers[i]);
AudioQueueEnqueueBuffer (recordState.queue, recordState.buffers[i], 0, NULL);
}
status = AudioFileCreateWithURL(fileURL,
kAudioFileAIFFType,
&recordState.dataFormat,
kAudioFileFlags_EraseFile,
&recordState.audioFile);
gfp = lame_init();
lame_set_num_channels(gfp, 1);
lame_set_in_samplerate(gfp, recordState.dataFormat.mSampleRate);
lame_set_VBR(gfp, vbr_default);
lame_init_params(gfp);
if (status == 0)
{
recordState.recording = true;
status = AudioQueueStart(recordState.queue, NULL);
if (status == 0)
{
mergeData =[[NSMutableData alloc]init];
labelStatus.text = #"Recording";
}
}
}
if (status != 0)
{
[self stopRecording];
labelStatus.text = #"Record Failed";
}
}
- (void)stopRecording
{
recordState.recording = false;
AudioQueueStop(recordState.queue, true);
for(int i = 0; i < NUM_BUFFERS; i++)
{
AudioQueueFreeBuffer(recordState.queue, recordState.buffers[i]);
}
AudioQueueDispose(recordState.queue, true);
AudioFileClose(recordState.audioFile);
labelStatus.text = #"Idle";
}
Then the AudioQueue callback function calls to lame_encode_buffer and then writes the encoded buffer to file:
void AudioInputCallback(void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumberPacketDescriptions,
const AudioStreamPacketDescription * inPacketDescs)
{
RecordState * recordState = (RecordState*)inUserData;
if (!recordState->recording)
{
printf("Not recording, returning\n");
}
printf("Writing buffer %lld\n", recordState->currentPacket);
OSStatus status = AudioFileWritePackets(recordState->audioFile,
false,
inBuffer->mAudioDataByteSize,
inPacketDescs,
recordState->currentPacket,
&inNumberPacketDescriptions,
inBuffer->mAudioData);
if (status == 0)
{
recordState->currentPacket += inNumberPacketDescriptions;
}
AudioRecorderAppDelegate *this = recordState->pThis;
const int MP3_BUFFER_SIZE=inBuffer->mAudioDataByteSize*4;
unsigned char mEncodedBuffer[MP3_BUFFER_SIZE];
int encodedBytes=lame_encode_buffer_interleaved(this->gfp, (short int *)inBuffer->mAudioData , inNumberPacketDescriptions, mEncodedBuffer, MP3_BUFFER_SIZE);
NSData* data = [NSData dataWithBytes:mEncodedBuffer length:encodedBytes];
[this writeData:data];
lame_encode_flush(this->gfp, mEncodedBuffer, MP3_BUFFER_SIZE);
memset(&mEncodedBuffer, 0, sizeof(mEncodedBuffer));
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);
}
Appending data
- (void) writeData:(NSData *)data
{
[mergeData appendData:data];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
NSUserDomainMask, YES);
NSString* docDir = [paths objectAtIndex:0];
NSString* file = [docDir stringByAppendingString:#"/lame.mp3"];
[mergeData writeToFile:file atomically:YES];
NSLog(#"%#",file);
}
Can anybody advise what's wrong here?
else post already done sample project?
Try this
void AQRecorder::MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
AQRecorder *aqr = (AQRecorder *)inUserData;
// NSLog(#"%f",inStartTime->mSampleTime);
try
{
if (inNumPackets > 0)
{
AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData);
aqr->mRecordPacket += inNumPackets;
int MP3_SIZE =inBuffer->mAudioDataByteSize * 4;
unsigned char mp3_buffer[MP3_SIZE];
AppDelegate *delegate =[[UIApplication sharedApplication]delegate];
lame_t lame = lame_init();
lame_set_in_samplerate(lame, 44100);
lame_set_VBR(lame, vbr_default);
lame_init_params(lame);
// int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
int encodedBytes = lame_encode_buffer(lame, (short*)inBuffer->mAudioData, (short*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes];
if (inBuffer->mAudioDataByteSize != 0) {
}
else
{
int encode=lame_encode_flush(lame, mp3_buffer, MP3_SIZE);
[delegate.mp3AudioData appendBytes:mp3_buffer length:encode];
}
lame_close(lame);
}
if (aqr->IsRunning())
{
AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
}
} catch (CAXException e)
{
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
In my case this logic worked :
int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);
NSMutableData *data1=[[NSMutableData alloc]initWithBytes:mp3_buffer length:encodedBytes];
[this writeData:data];