I am using this real time pitch detection program:
https://github.com/fotock/PitchDetectorExample/tree/1c68491f9c9bff2e851f5711c47e1efe4092f4de
For my purposes it work very well; it has a frequency label and when you sing a pitch the label registers a frequency and when you sing a little higher pitch the frequency label increases.
The problem is when I am NOT singing into the microphone, the frequency label still registers a frequency, usual around 70Hz, but it jumps up to 200Hz sometimes even when the mic is off.
Is there a way to have the microphone only turn on when the volume / DB is loud enough? An event listener that would only trigger when the mic receives a preset amplitude. Basically, I need an audio gate, if the DB is low, just line noise, then the mic is off.
Here is the pitch detection code from the above app - unabridged. I tried to no avail, to add vDSP code to this code and read the amplitude of the incoming frequency to turn the mic on and off.
PitchDetector.m
#import "PitchDetector.h"
#import <Accelerate/Accelerate.h>
#define PD_SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(v) ([[[UIDevice currentDevice] systemVersion] compare:v options:NSNumericSearch] != NSOrderedAscending)
#implementation PitchDetector
#synthesize lowBoundFrequency, hiBoundFrequency, sampleRate, delegate, running;
#pragma mark Initialize Methods
-(id) initWithSampleRate: (float) rate andDelegate: (id<PitchDetectorDelegate>) initDelegate {
return [self initWithSampleRate:rate lowBoundFreq:40 hiBoundFreq:4500 andDelegate:initDelegate];
}
-(id) initWithSampleRate: (float) rate lowBoundFreq: (int) low hiBoundFreq: (int) hi andDelegate: (id<PitchDetectorDelegate>) initDelegate {
self.lowBoundFrequency = low;
self.hiBoundFrequency = hi;
self.sampleRate = rate;
self.delegate = initDelegate;
bufferLength = self.sampleRate/self.lowBoundFrequency;
hann = (float*) malloc(sizeof(float)*bufferLength);
vDSP_hann_window(hann, bufferLength, vDSP_HANN_NORM);
sampleBuffer = (SInt16*) malloc(512);
samplesInSampleBuffer = 0;
result = (float*) malloc(sizeof(float)*bufferLength);
return self;
}
#pragma mark Insert Samples
- (void) addSamples:(SInt16 *)samples inNumberFrames:(int)frames {
int newLength = frames;
if(samplesInSampleBuffer>0) {
newLength += samplesInSampleBuffer;
}
SInt16 *newBuffer = (SInt16*) malloc(sizeof(SInt16)*newLength);
memcpy(newBuffer, sampleBuffer, samplesInSampleBuffer*sizeof(SInt16));
memcpy(&newBuffer[samplesInSampleBuffer], samples, frames*sizeof(SInt16));
free(sampleBuffer);
sampleBuffer = newBuffer;
samplesInSampleBuffer = newLength;
if(samplesInSampleBuffer>(self.sampleRate/self.lowBoundFrequency)) {
if(!self.running) {
[self performSelectorInBackground:#selector(performWithNumFrames:) withObject:[NSNumber numberWithInt:newLength]];
self.running = YES;
}
samplesInSampleBuffer = 0;
} else {
//printf("NOT ENOUGH SAMPLES: %d\n", newLength);
}
}
#pragma mark Perform Auto Correlation
-(void) performWithNumFrames: (NSNumber*) numFrames;
{
int n = numFrames.intValue;
float freq = 0;
SInt16 *samples;
if (PD_SYSTEM_VERSION_GREATER_THAN_OR_EQUAL_TO(#"7.1")) {
#synchronized(self) {
samples = malloc(sizeof(SInt16)*numFrames.intValue);
memcpy(&samples, &sampleBuffer, sizeof(samples));
}
} else {
samples = sampleBuffer;
}
int returnIndex = 0;
float sum;
bool goingUp = false;
float normalize = 0;
for(int i = 0; i<n; i++) {
sum = 0;
for(int j = 0; j<n; j++) {
sum += (samples[j]*samples[j+i])*hann[j];
}
if(i ==0 ) normalize = sum;
result[i] = sum/normalize;
}
for(int i = 0; i<n-8; i++) {
if(result[i]<0) {
i+=2; // no peaks below 0, skip forward at a faster rate
} else {
if(result[i]>result[i-1] && goingUp == false && i >1) {
//local min at i-1
goingUp = true;
} else if(goingUp == true && result[i]<result[i-1]) {
//local max at i-1
if(returnIndex==0 && result[i-1]>result[0]*0.95) {
returnIndex = i-1;
break;
//############### NOTE ##################################
// My implemenation breaks out of this loop when it finds the first peak.
// This is (probably) the greatest source of error, so if you would like to
// improve this algorithm, start here. the next else if() will trigger on
// future local maxima (if you first take out the break; above this paragraph)
//#######################################################
} else if(result[i-1]>result[0]*0.85) {
}
goingUp = false;
}
}
}
freq =self.sampleRate/interp(result[returnIndex-1], result[returnIndex], result[returnIndex+1], returnIndex);
if(freq >= self.lowBoundFrequency && freq <= self.hiBoundFrequency) {
dispatch_async(dispatch_get_main_queue(), ^{
[delegate updatedPitch:freq];
});
}
self.running = NO;
}
float interp(float y1, float y2, float y3, int k);
float interp(float y1, float y2, float y3, int k) {
float d, kp;
d = (y3 - y1) / (2 * (2 * y2 - y1 - y3));
//printf("%f = %d + %f\n", k+d, k, d);
kp = k + d;
return kp;
}
#end
Here is the AudioControll.m class that initializes that microphone - unabridged. Looking at other examples of turning the mic on and off according to DB, I tried many frameworks like Audio Queue Services and the Accelerate Framework to no avail.
AudioControll.m
#import "AudioController.h"
#define kOutputBus 0
#define kInputBus 1
#implementation AudioController
#synthesize rioUnit, audioFormat, delegate;
+ (AudioController *) sharedAudioManager
{
static AudioController *sharedAudioManager;
#synchronized(self)
{
if (!sharedAudioManager) {
sharedAudioManager = [[AudioController alloc] init];
[sharedAudioManager startAudio];
}
return sharedAudioManager;
}
}
void checkStatus(OSStatus status);
void checkStatus(OSStatus status) {
if(status!=0)
printf("Error: %ld\n", status);
}
#pragma mark init
- (id)init
{
OSStatus status;
status = AudioSessionInitialize(NULL, NULL, NULL, (__bridge void*) self);
checkStatus(status);
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &rioUnit);
checkStatus(status);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(rioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
checkStatus(status);
// Describe format
audioFormat.mSampleRate= 44100.0;
audioFormat.mFormatID= kAudioFormatLinearPCM;
audioFormat.mFormatFlags= kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket= 1;
audioFormat.mChannelsPerFrame= 1;
audioFormat.mBitsPerChannel= 16;
audioFormat.mBytesPerPacket= 2;
audioFormat.mBytesPerFrame= 2;
// Apply format
status = AudioUnitSetProperty(rioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
status = AudioUnitSetProperty(rioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
checkStatus(status);
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
status = AudioUnitSetProperty(rioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
checkStatus(status);
// Disable buffer allocation for the recorder
flag = 0;
status = AudioUnitSetProperty(rioUnit, kAudioUnitProperty_ShouldAllocateBuffer, kAudioUnitScope_Global, kInputBus, &flag, sizeof(flag));
// Initialise
UInt32 category = kAudioSessionCategory_PlayAndRecord;
status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
checkStatus(status);
status = 0;
status = AudioSessionSetActive(YES);
checkStatus(status);
status = AudioUnitInitialize(rioUnit);
checkStatus(status);
return self;
}
#pragma mark Recording Callback
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
AudioController *THIS = (__bridge AudioController*) inRefCon;
THIS->bufferList.mNumberBuffers = 1;
THIS->bufferList.mBuffers[0].mDataByteSize = sizeof(SInt16)*inNumberFrames;
THIS->bufferList.mBuffers[0].mNumberChannels = 1;
THIS->bufferList.mBuffers[0].mData = (SInt16*) malloc(sizeof(SInt16)*inNumberFrames);
OSStatus status;
status = AudioUnitRender(THIS->rioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&(THIS->bufferList));
checkStatus(status);
dispatch_async(dispatch_get_main_queue(), ^{
[THIS.delegate receivedAudioSamples:(SInt16*)THIS->bufferList.mBuffers[0].mData length:inNumberFrames];
});
return noErr;
}
-(void) startAudio
{
OSStatus status = AudioOutputUnitStart(rioUnit);
checkStatus(status);
printf("Audio Initialized - sampleRate: %f\n", audioFormat.mSampleRate);
}
#end
Related
First of all, we used an AudioUnit(SubType_VoiceProcessingIO) to record the sound captured by device's microphone(iPhone or iPad). The problem is that when we spoke loudly and got too close to the microphone, due to the build-in AGC, the volume of the sound would decrease very quickly. And if we turned off the build-in AGC, the sound would crack. We just want the volume to be normal and stable. But we had not found any way to change the parameter of the build-in AGC. This is troubling us for a long time because there is no such phenomenon on Android device.
Then we discover that we can use AUGraph to accomplish audio effect processing and maybe solve this problem. So we tried this way. Due to the specific format required by DynamicsProcessor(an AudioUnit subType, belong to the main type of kAudioUnitType_Effect), we use an AudioUnit(SubType_VoiceProcessingIO) for sound recording, two AudioUnits(SubType_AUConverter) for format convertion, an AudioUnit(SubType_DynamicsProcessor) for volume control. At last, we use AUGraph to connect all these AudioUnits.
The data flows like this:
AudioUnit instance1(VoiceProcessingIO) -> AudioUnit instance2(AUConverter) -> AudioUnit instance3(DynamicsProcessor) -> AudioUnit instance4(AUConverter).
But the result is that we could not get the data processed by the AudioUnit instance4(AUConverter) through its callback function, the function was never called.
Here are some questions
1.Are there any easier ways to solve this problem ?
2.How to get the data processed by the last AudioUnit instance4(AUConverter) ? We need to write the data into a localFile for analysis.
3.Are there some implicit things that we should know about kAudioUnitSubType_AUConverter or kAudioUnitType_FormatConverter ?
Latest Code
#import "ViewController.h"
#import <AVFoundation/AVFoundation.h>
#import <AudioToolbox/AudioToolbox.h>
#define OUTPUT_ELEMENT 0
#define INPUT_ELEMENT 1
FILE *g_recordFileHandle1;
FILE *g_resultFileHandle1;
static inline void CheckError(OSStatus status, NSString *functionDescription, NSString *errorMessage);
static OSStatus ConvertUnitRenderCallBack(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData);
#interface ViewController ()
#property (nonatomic, strong) UIButton *beginButton;
#property (nonatomic, strong) UIButton *endButton;
#end
#implementation ViewController
{
AUGraph m_pAudioGraph;
AUNode m_nRecordNode;
#public AudioUnit m_pRecordUnit;
#public AudioBufferList *m_pRecordBufferList;
AudioStreamBasicDescription m_stRecordFormat;
AUNode m_nConvertNode1;
#public AudioUnit m_pConvertUnit1;
AUNode m_nDPNode;
#public AudioUnit m_pDPUnit;
AudioStreamBasicDescription m_stDPInputFormat;
AudioStreamBasicDescription m_stDPOutputFormat;
AUNode m_nConvertNode2;
#public AudioUnit m_pConvertUnit2;
}
#pragma mark - Life Cycle
- (void)dealloc {
if (g_recordFileHandle1) {
fclose(g_recordFileHandle1);
g_recordFileHandle1 = NULL;
}
if (g_resultFileHandle1) {
fclose(g_resultFileHandle1);
g_resultFileHandle1 = NULL;
}
}
- (void)viewDidLoad {
[super viewDidLoad];
self.title = #"方案1";
self.view.backgroundColor = [UIColor whiteColor];
[self.view addSubview:self.beginButton];
[self.view addSubview:self.endButton];
NSString *recordFilePath = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES).firstObject stringByAppendingPathComponent:#"recordData1.pcm"];
g_recordFileHandle1 = fopen([recordFilePath UTF8String], "wb");
NSString *resultFilePath = [NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES).firstObject stringByAppendingPathComponent:#"resultData1.pcm"];
g_resultFileHandle1 = fopen([resultFilePath UTF8String], "wb");
[self configureAudioGraph];
}
#pragma mark - Private Methods
- (void)configureAudioGraph {
memset(&m_stRecordFormat, 0, sizeof(AudioStreamBasicDescription));
m_stRecordFormat.mSampleRate = 16000.0f;
m_stRecordFormat.mFormatID = kAudioFormatLinearPCM;
m_stRecordFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved;
m_stRecordFormat.mBitsPerChannel = 16;
m_stRecordFormat.mChannelsPerFrame = 1;
m_stRecordFormat.mBytesPerFrame = m_stRecordFormat.mBitsPerChannel * m_stRecordFormat.mChannelsPerFrame / 8;
m_stRecordFormat.mFramesPerPacket = 1;
m_stRecordFormat.mBytesPerPacket = m_stRecordFormat.mBytesPerFrame * m_stRecordFormat.mFramesPerPacket;
m_pAudioGraph = NULL;
CheckError(NewAUGraph(&m_pAudioGraph), #"NewAUGraph", #"fail");
// RecordNode
AudioComponentDescription recordDescription;
recordDescription.componentType = kAudioUnitType_Output;
recordDescription.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
recordDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
recordDescription.componentFlags = 0;
recordDescription.componentFlagsMask = 0;
CheckError(AUGraphAddNode(m_pAudioGraph, &recordDescription, &m_nRecordNode), #"AUGraphAddNode recordNode", #"fail");
// ConvertNode1
AudioComponentDescription convertDescription1;
convertDescription1.componentType = kAudioUnitType_FormatConverter;
convertDescription1.componentSubType = kAudioUnitSubType_AUConverter;
convertDescription1.componentManufacturer = kAudioUnitManufacturer_Apple;
convertDescription1.componentFlags = 0;
convertDescription1.componentFlagsMask = 0;
CheckError(AUGraphAddNode(m_pAudioGraph, &convertDescription1, &m_nConvertNode1), #"AUGraphAddNode convertNode1", #"fail");
// DynamicsProcessor
AudioComponentDescription dpDescription;
dpDescription.componentType = kAudioUnitType_Effect;
dpDescription.componentSubType = kAudioUnitSubType_DynamicsProcessor;
dpDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
dpDescription.componentFlags = 0;
dpDescription.componentFlagsMask = 0;
CheckError(AUGraphAddNode(m_pAudioGraph, &dpDescription, &m_nDPNode), #"AUGraphAddNode dpNode", #"fail");
// ConvertNode2
AudioComponentDescription convertDescription2;
convertDescription2.componentType = kAudioUnitType_FormatConverter;
convertDescription2.componentSubType = kAudioUnitSubType_AUConverter;
convertDescription2.componentManufacturer = kAudioUnitManufacturer_Apple;
convertDescription2.componentFlags = 0;
convertDescription2.componentFlagsMask = 0;
CheckError(AUGraphAddNode(m_pAudioGraph, &convertDescription2, &m_nConvertNode2), #"AUGraphAddNode convertNode2", #"fail");
CheckError(AUGraphOpen(m_pAudioGraph), #"AUGraphOpen", #"fail");
[self setupRecordUnit];
[self setupDynamicsProcessorUnit];
[self setupConvertUnit1];
[self setupConvertUnit2];
CheckError(AUGraphConnectNodeInput(m_pAudioGraph, m_nRecordNode, INPUT_ELEMENT, m_nConvertNode1, OUTPUT_ELEMENT), #"AUGraphConnectNodeInput m_nRecordNode->m_nConvertNode1", #"fail");
CheckError(AUGraphConnectNodeInput(m_pAudioGraph, m_nConvertNode1, OUTPUT_ELEMENT, m_nDPNode, OUTPUT_ELEMENT), #"AUGraphConnectNodeInput m_nConvertNode1->m_nDPNode", #"fail");
CheckError(AUGraphConnectNodeInput(m_pAudioGraph, m_nDPNode, OUTPUT_ELEMENT, m_nConvertNode2, OUTPUT_ELEMENT), #"AUGraphConnectNodeInput m_nDPNode->m_nConvertNode2", #"fail");
CheckError(AUGraphInitialize(m_pAudioGraph), #"AUGraphInitialize", #"fail");
CheckError(AUGraphUpdate(m_pAudioGraph, NULL), #"AUGraphUpdate", #"fail");
CAShow(m_pAudioGraph);
}
- (void)setupRecordUnit {
NSLog(#"");
NSLog(#"---------------setupRecordUnit begin---------------");
m_pRecordUnit = NULL;
CheckError(AUGraphNodeInfo(m_pAudioGraph, m_nRecordNode, NULL, &m_pRecordUnit), #"AUGraphNodeInfo recordNode", #"fail");
UInt32 inputFlag = 1;
CheckError(AudioUnitSetProperty(m_pRecordUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
INPUT_ELEMENT,
&inputFlag,
sizeof(inputFlag)),
#"AudioUnitSetProperty m_pRecordUnit kAudioOutputUnitProperty_EnableIO INPUT_ELEMENT kAudioUnitScope_Input",
#"fail");
UInt32 enableAGC = 1;
CheckError(AudioUnitSetProperty(m_pRecordUnit,
kAUVoiceIOProperty_VoiceProcessingEnableAGC,
kAudioUnitScope_Global,
INPUT_ELEMENT,
&enableAGC,
sizeof(enableAGC)),
#"AudioUnitSetProperty m_pRecordUnit kAUVoiceIOProperty_VoiceProcessingEnableAGC INPUT_ELEMENT kAudioUnitScope_Global",
#"fail");
CheckError(AudioUnitSetProperty(m_pRecordUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
INPUT_ELEMENT,
&m_stRecordFormat,
sizeof(m_stRecordFormat)),
#"AudioUnitSetProperty m_pRecordUnit kAudioUnitProperty_StreamFormat INPUT_ELEMENT kAudioUnitScope_Output",
#"fail");
NSLog(#"---------------setupRecordUnit end---------------");
NSLog(#"");
}
- (void)setupDynamicsProcessorUnit {
NSLog(#"");
NSLog(#"---------------setupDynamicsProcessorUnit begin---------------");
m_pDPUnit = NULL;
CheckError(AUGraphNodeInfo(m_pAudioGraph, m_nDPNode, NULL, &m_pDPUnit), #"AUGraphNodeInfo dpNode", #"fail");
UInt32 inputFormatSize = sizeof(m_stDPInputFormat);
CheckError(AudioUnitGetProperty(m_pDPUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
OUTPUT_ELEMENT,
&m_stDPInputFormat,
&inputFormatSize),
#"AudioUnitGetProperty m_pDPUnit kAudioUnitProperty_StreamFormat OUTPUT_ELEMENT kAudioUnitScope_Input",
#"fail");
UInt32 outputFormatSize = sizeof(m_stDPOutputFormat);
CheckError(AudioUnitGetProperty(m_pDPUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
OUTPUT_ELEMENT,
&m_stDPOutputFormat,
&outputFormatSize),
#"AudioUnitGetProperty m_pDPUnit kAudioUnitProperty_StreamFormat OUTPUT_ELEMENT kAudioUnitScope_Output",
#"fail");
NSLog(#"---------------setupDynamicsProcessorUnit end---------------");
NSLog(#"");
}
- (void)setupConvertUnit1 {
NSLog(#"");
NSLog(#"---------------setupConvertUnit1 begin---------------");
m_pConvertUnit1 = NULL;
CheckError(AUGraphNodeInfo(m_pAudioGraph, m_nConvertNode1, NULL, &m_pConvertUnit1), #"AUGraphNodeInfo convertNode1", #"fail");
CheckError(AudioUnitSetProperty(m_pConvertUnit1,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
OUTPUT_ELEMENT,
&m_stRecordFormat,
sizeof(m_stRecordFormat)),
#"AudioUnitSetProperty m_pConvertUnit1 kAudioUnitProperty_StreamFormat OUTPUT_ELEMENT kAudioUnitScope_Input",
#"fail");
CheckError(AudioUnitSetProperty(m_pConvertUnit1,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
OUTPUT_ELEMENT,
&m_stDPInputFormat,
sizeof(m_stDPInputFormat)),
#"AudioUnitSetProperty m_pConvertUnit1 kAudioUnitProperty_StreamFormat OUTPUT_ELEMENT kAudioUnitScope_Output",
#"fail");
NSLog(#"---------------setupConvertUnit1 end---------------");
NSLog(#"");
}
- (void)setupConvertUnit2 {
NSLog(#"");
NSLog(#"---------------setupConvertUnit2 begin---------------");
m_pConvertUnit2 = NULL;
CheckError(AUGraphNodeInfo(m_pAudioGraph, m_nConvertNode2, NULL, &m_pConvertUnit2), #"AUGraphNodeInfo convertNode2", #"fail");
CheckError(AudioUnitSetProperty(m_pConvertUnit2,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
OUTPUT_ELEMENT,
&m_stDPOutputFormat,
sizeof(m_stDPOutputFormat)),
#"AudioUnitSetProperty m_pConvertUnit2 kAudioUnitProperty_StreamFormat OUTPUT_ELEMENT kAudioUnitScope_Input",
#"fail");
CheckError(AudioUnitSetProperty(m_pConvertUnit2,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
OUTPUT_ELEMENT,
&m_stRecordFormat,
sizeof(m_stRecordFormat)),
#"AudioUnitSetProperty m_pConvertUnit2 kAudioUnitProperty_StreamFormat OUTPUT_ELEMENT kAudioUnitScope_Output",
#"fail");
CheckError(AudioUnitAddRenderNotify(m_pConvertUnit2,
ConvertUnitRenderCallBack,
(__bridge void *)self),
#"AudioUnitAddRenderNotify m_pConvertUnit2",
#"fail");
NSLog(#"---------------setupConvertUnit2 end---------------");
NSLog(#"");
}
#pragma mark - Action Methods
- (void)beginAudioCapture {
CheckError(AUGraphStart(m_pAudioGraph), #"AUGraphStart", #"fail");
}
- (void)endAudioCapture {
Boolean isRunning = false;
OSStatus status = AUGraphIsRunning(m_pAudioGraph, &isRunning);
if (isRunning) {
status = AUGraphStop(m_pAudioGraph);
CheckError(status, #"AUGraphStop", #"Could not stop AUGraph");
}
}
#pragma mark - Getter
- (UIButton *)beginButton {
if (!_beginButton) {
CGFloat screenWidth = [UIScreen mainScreen].bounds.size.width;
CGFloat screenHeight = [UIScreen mainScreen].bounds.size.height;
_beginButton = [[UIButton alloc] initWithFrame:CGRectMake((screenWidth - 80)/2, screenHeight - 300, 80, 50)];
[_beginButton setTitle:#"开始录制" forState:UIControlStateNormal];
[_beginButton setTitleColor:[UIColor blackColor] forState:UIControlStateNormal];
[_beginButton setTitleColor:[UIColor lightGrayColor] forState:UIControlStateHighlighted];
[_beginButton addTarget:self action:#selector(beginAudioCapture) forControlEvents:UIControlEventTouchUpInside];
}
return _beginButton;
}
- (UIButton *)endButton {
if (!_endButton) {
CGFloat screenWidth = [UIScreen mainScreen].bounds.size.width;
CGFloat screenHeight = [UIScreen mainScreen].bounds.size.height;
_endButton = [[UIButton alloc] initWithFrame:CGRectMake((screenWidth - 80)/2, screenHeight - 200, 80, 50)];
[_endButton setTitle:#"结束录制" forState:UIControlStateNormal];
[_endButton setTitleColor:[UIColor blackColor] forState:UIControlStateNormal];
[_endButton setTitleColor:[UIColor lightGrayColor] forState:UIControlStateHighlighted];
[_endButton addTarget:self action:#selector(endAudioCapture) forControlEvents:UIControlEventTouchUpInside];
}
return _endButton;
}
#end
#pragma mark - Functions
static void CheckError(OSStatus status, NSString *functionDescription, NSString *errorMessage)
{
if (status != noErr)
{
char fourCC[16];
*(UInt32 *)fourCC = CFSwapInt32HostToBig(status);
fourCC[4] = '\0';
if (isprint(fourCC[0]) && isprint(fourCC[1]) && isprint(fourCC[2]) && isprint(fourCC[3]))
{
NSLog(#"%# - %#: %s", functionDescription, errorMessage, fourCC);
}
else
{
NSLog(#"%# - %#: %d", functionDescription, errorMessage, (int)status);
}
}
else
{
NSLog(#"%# succeed", functionDescription);
}
}
static OSStatus ConvertUnitRenderCallBack(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
if (*ioActionFlags == kAudioUnitRenderAction_PreRender)
{
NSLog(#"ConvertUnitRenderCallBack flag = kAudioUnitRenderAction_PreRender");
}
if (*ioActionFlags == (kAudioUnitRenderAction_PostRender | kAudioUnitRenderAction_OutputIsSilence | kAudioUnitRenderAction_PostRenderError))
{
NSLog(#"ConvertUnitRenderCallBack flag = kAudioUnitRenderAction_PostRenderError");
}
if (*ioActionFlags == kAudioUnitRenderAction_PostRender)
{
NSLog(#"ConvertUnitRenderCallBack flag = kAudioUnitRenderAction_PostRender");
NSLog(#"ConvertUnitRenderCallBack dataLength = %lu", (unsigned long)ioData->mBuffers[0].mDataByteSize);
if (ioData->mBuffers[0].mData)
{
if (g_resultFileHandle1)
{
fwrite(ioData->mBuffers[0].mData, 1, ioData->mBuffers[0].mDataByteSize, g_resultFileHandle1);
}
}
}
if (ioData->mBuffers[0].mData)
{
memset(ioData->mBuffers[0].mData, 0, ioData->mBuffers[0].mDataByteSize);
}
return noErr;
}
Using Below code server start recording and start streaming to connected client and its working good.
AudioServer.h
#import <UIKit/UIKit.h>
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>
#interface AudioServer : NSObject<GCDAsyncSocketDelegate>
#property (nonatomic, strong)GCDAsyncSocket * serverSocket;
#property (nonatomic, strong)NSMutableArray *connectedClients;
#property (nonatomic) AudioComponentInstance audioUnit;
-(void) start;
-(void) stop;
`enter code here`-(void) writeDataToClients:(NSData*)data;
#end
AudioServer.m
#import "AudioServer.h"
#define kOutputBus 0
#define kInputBus 1
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// TODO: Use inRefCon to access our interface object to do stuff
// Then, use inNumberFrames to figure out how much data is available, and make
// that much space available in buffers in an AudioBufferList.
AudioServer *server = (__bridge AudioServer*)inRefCon;
AudioBufferList bufferList;
SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);
// Then:
// Obtain recorded samples
OSStatus status;
status = AudioUnitRender(server.audioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
NSData *dataToSend = [NSData dataWithBytes:bufferList.mBuffers[0].mData length:bufferList.mBuffers[0].mDataByteSize];
[server writeDataToClients:dataToSend];
return noErr;
}
#implementation AudioServer
-(id) init
{
return [super init];
}
-(void) start
{
[UIApplication sharedApplication].idleTimerDisabled = YES;
// Create a new instance of AURemoteIO
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
AudioComponentInstanceNew(comp, &_audioUnit);
// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one));
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &one, sizeof(one));
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 32 bit floating point
AudioStreamBasicDescription audioFormat = [self getAudioDescription];
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat));
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat));
// Set the MaximumFramesPerSlice property. This property is used to describe to an audio unit the maximum number
// of samples it will be asked to produce on any single given call to AudioUnitRender
UInt32 maxFramesPerSlice = 4096;
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
// Get the property value back from AURemoteIO. We are going to use this value to allocate buffers accordingly
UInt32 propSize = sizeof(UInt32);
AudioUnitGetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, &propSize);
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = recordingCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &renderCallback, sizeof(renderCallback));
// Initialize the AURemoteIO instance
AudioUnitInitialize(_audioUnit);
AudioOutputUnitStart(_audioUnit);
_connectedClients = [[NSMutableArray alloc] init];
_serverSocket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue:dispatch_get_main_queue()];
[self startAcceptingConnections];
}
- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}
-(void) startAcceptingConnections
{
NSError *error = nil;
if(_serverSocket)
//[_serverSocket acceptOnPort:<#(uint16_t)#> error:<#(NSError *__autoreleasing *)#>]
[_serverSocket acceptOnPort:2030 error:&error];
//TODO:- Change Here Port numbers
}
-(void)socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(_connectedClients)
[_connectedClients removeObject:sock];
}
- (void)socket:(GCDAsyncSocket *)socket didAcceptNewSocket:(GCDAsyncSocket *)newSocket {
NSLog(#"Accepted New Socket from %#:%hu", [newSocket connectedHost], [newSocket connectedPort]);
#synchronized(_connectedClients)
{
dispatch_async(dispatch_get_main_queue(), ^{
if(_connectedClients)
[_connectedClients addObject:newSocket];
});
}
NSError *error = nil;
if(_serverSocket)
//[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
[_serverSocket acceptOnPort:2030 error:&error];
//TODO:- Change Here Port numbers
}
-(void) writeDataToClients:(NSData *)data
{
if(_connectedClients)
{
for (GCDAsyncSocket *socket in _connectedClients) {
if([socket isConnected])
{
[socket writeData:data withTimeout:-1 tag:0];
}
else{
if([_connectedClients containsObject:socket])
[_connectedClients removeObject:socket];
}
}
}
}
-(void) stop
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}
-(void) dealloc
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}
#end
Here is Client side code :
AudioClient.h
#import <UIKit/UIKit.h>
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>
#import "TPCircularBuffer.h"
#protocol AudioClientDelegate <NSObject>
-(void) connected;
-(void) animateSoundIndicator:(float) rms;
#end
#interface AudioClient : NSObject<GCDAsyncSocketDelegate>
{
NSString *ipAddress;
BOOL stopped;
}
#property (nonatomic) TPCircularBuffer circularBuffer;
#property (nonatomic) AudioComponentInstance audioUnit;
#property (nonatomic, strong) GCDAsyncSocket *socket;
#property (nonatomic, strong) id<AudioClientDelegate> delegate;
-(id) initWithDelegate:(id)delegate;
-(void) start:(NSString *)ip;
-(void) stop;
-(TPCircularBuffer *) outputShouldUseCircularBuffer;
#end
AudioClient.m
#define kOutputBus 0
#define kInputBus 1
#import "AudioClient.h"
#implementation AudioClient
static OSStatus OutputRenderCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
AudioClient *output = (__bridge AudioClient*)inRefCon;
TPCircularBuffer *circularBuffer = [output outputShouldUseCircularBuffer];
if( !circularBuffer ){
AudioUnitSampleType *left = (AudioUnitSampleType*)ioData->mBuffers[0].mData;
for(int i = 0; i < inNumberFrames; i++ ){
left[ i ] = 0.0f;
}
return noErr;
};
int32_t bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16* outputBuffer = ioData->mBuffers[0].mData;
int32_t availableBytes;
SInt16 *sourceBuffer = TPCircularBufferTail(circularBuffer, &availableBytes);
int32_t amount = MIN(bytesToCopy,availableBytes);
memcpy(outputBuffer, sourceBuffer, amount);
TPCircularBufferConsume(circularBuffer,amount);
NSLog(#"Bufferiiii");
return noErr;
}
-(id) initWithDelegate:(id)delegate
{
if(!self)
{
self = [super init];
}
[self circularBuffer:&_circularBuffer withSize:24576*5];
_delegate = delegate;
stopped = NO;
return self;
}
-(void) start:(NSString *)ip
{
_socket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue: dispatch_get_main_queue()];
NSError *err;
ipAddress = ip;
[UIApplication sharedApplication].idleTimerDisabled = YES;
//if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
if(![_socket connectToHost:ipAddress onPort:2030 error:&err])
{
}
[self setupAudioUnit];
}
-(void) setupAudioUnit
{
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
OSStatus status;
status = AudioComponentInstanceNew(comp, &_audioUnit);
if(status != noErr)
{
NSLog(#"Error creating AudioUnit instance");
}
// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kOutputBus, &one, sizeof(one));
if(status != noErr)
{
NSLog(#"Error enableling AudioUnit output bus");
}
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 16 bit int point
AudioStreamBasicDescription audioFormat = [self getAudioDescription];
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, sizeof(audioFormat));
if(status != noErr)
{
NSLog(#"Error setting audio format");
}
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = OutputRenderCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &renderCallback, sizeof(renderCallback));
if(status != noErr)
{
NSLog(#"Error setting rendering callback");
}
// Initialize the AURemoteIO instance
status = AudioUnitInitialize(_audioUnit);
if(status != noErr)
{
NSLog(#"Error initializing audio unit");
}
}
- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}
-(void) socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(!stopped)
//if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
if(![_socket connectToHost:ipAddress onPort:2030 error:&err])
{
}
}
-(void) socket:(GCDAsyncSocket *)socket didReadData:(NSData *)data withTag:(long)tag
{
if(data.length > 0)
{
unsigned long len = [data length];
SInt16* byteData = (SInt16*)malloc(len);
memcpy(byteData, [data bytes], len);
double sum = 0.0;
for(int i = 0; i < len/2; i++) {
sum += byteData[i] * byteData[i];
}
double average = sum / len;
double rms = sqrt(average);
[_delegate animateSoundIndicator:rms];
Byte* soundData = (Byte*)malloc(len);
memcpy(soundData, [data bytes], len);
if(soundData)
{
AudioBufferList *theDataBuffer = (AudioBufferList*) malloc(sizeof(AudioBufferList) *1);
theDataBuffer->mNumberBuffers = 1;
theDataBuffer->mBuffers[0].mDataByteSize = (UInt32)len;
theDataBuffer->mBuffers[0].mNumberChannels = 1;
theDataBuffer->mBuffers[0].mData = (SInt16*)soundData;
[self appendDataToCircularBuffer:&_circularBuffer fromAudioBufferList:theDataBuffer];
}
}
[socket readDataToLength:18432 withTimeout:-1 tag:0];
}
-(void)circularBuffer:(TPCircularBuffer *)circularBuffer withSize:(int)size {
TPCircularBufferInit(circularBuffer,size);
}
-(void)appendDataToCircularBuffer:(TPCircularBuffer*)circularBuffer
fromAudioBufferList:(AudioBufferList*)audioBufferList {
TPCircularBufferProduceBytes(circularBuffer,
audioBufferList->mBuffers[0].mData,
audioBufferList->mBuffers[0].mDataByteSize);
}
-(void)freeCircularBuffer:(TPCircularBuffer *)circularBuffer {
TPCircularBufferClear(circularBuffer);
TPCircularBufferCleanup(circularBuffer);
}
-(void) socket:(GCDAsyncSocket *)socket didConnectToHost:(NSString *)host port:(uint16_t)port
{
OSStatus status = AudioOutputUnitStart(_audioUnit);
if(status != noErr)
{
NSLog(#"Error starting audio unit");
}
[socket readDataToLength:18432 withTimeout:-1 tag:0];
[_delegate connected];
}
-(TPCircularBuffer *) outputShouldUseCircularBuffer
{
return &_circularBuffer;
}
-(void) stop
{
OSStatus status = AudioOutputUnitStop(_audioUnit);
if(status != noErr)
{
NSLog(#"Error stopping audio unit");
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}
-(void) dealloc {
OSStatus status = AudioOutputUnitStop(_audioUnit);
if(status != noErr)
{
NSLog(#"Error stopping audio unit");
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}
#end
I want to stream Apple music library song like above, so use this code :
Output.h
#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>
#class Output;
#protocol OutputDataSource <NSObject>
- (void)readFrames:(UInt32)frames
audioBufferList:(AudioBufferList *)audioBufferList
bufferSize:(UInt32 *)bufferSize songData:(NSData *)songData;
#end
#interface Output : NSObject
#property (strong, nonatomic) id outputDataSource;
- (void)startOutputUnit;
- (void)stopOutputUnit;
#end
Output.m
#import "Output.h"
#import "Utilities.m"
#import "AudioServer.h"
static OSStatus OutputRenderCallback (void *inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inOutputBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData)
{
AudioBufferList bufferList;
SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);
NSData *dataToSend = [NSData dataWithBytes:bufferList.mBuffers[0].mData length:bufferList.mBuffers[0].mDataByteSize];
Output *self = (__bridge Output*)inRefCon;
if (self.outputDataSource)
{
if ([self.outputDataSource respondsToSelector:#selector(readFrames:audioBufferList:bufferSize:songData:)])
{
#autoreleasepool
{
UInt32 bufferSize;
//[self.outputDataSource readFrames:inNumberFrames audioBufferList:ioData bufferSize:&bufferSize];
[self.outputDataSource readFrames:inNumberFrames audioBufferList:ioData bufferSize:&bufferSize songData:dataToSend];
}
}
}
return noErr;
}
#interface Output()
#property (nonatomic) AudioUnit audioUnit;
#end
#implementation Output
- (id)init
{
self = [super init];
if (!self) {
return nil;
}
[self createAudioUnit];
return self;
}
#pragma mark - Audio Unit Setup
- (void)createAudioUnit
{
// create a component description
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
// use the description to find the component we're looking for
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &desc);
// create an instance of the component and have our _audioUnit property point to it
CheckError(AudioComponentInstanceNew(defaultOutput, &_audioUnit),
"AudioComponentInstanceNew Failed");
// describe the output audio format... here we're using LPCM 32 bit floating point samples
AudioStreamBasicDescription outputFormat;
outputFormat.mFormatID = kAudioFormatLinearPCM;
outputFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsFloat;
outputFormat.mSampleRate = 44100;
outputFormat.mChannelsPerFrame = 2;
outputFormat.mBitsPerChannel = 32;
outputFormat.mBytesPerPacket = (outputFormat.mBitsPerChannel / 8) * outputFormat.mChannelsPerFrame;
outputFormat.mFramesPerPacket = 1;
outputFormat.mBytesPerFrame = outputFormat.mBytesPerPacket;
// set the audio format on the input scope (kAudioUnitScope_Input) of the output bus (0) of the output unit - got that?
CheckError(AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &outputFormat, sizeof(outputFormat)),
"AudioUnitSetProperty StreamFormat Failed");
// set up a render callback struct consisting of our output render callback (above) and a reference to self (so we can access our outputDataSource reference from within the callback)
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = OutputRenderCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
// add the callback struct to the output unit (again, that's to the input scope of the output bus)
CheckError(AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, 0, &callbackStruct, sizeof(callbackStruct)),
"AudioUnitSetProperty SetRenderCallback Failed");
// initialize the unit
CheckError(AudioUnitInitialize(_audioUnit),
"AudioUnitInitializeFailed");
}
#pragma mark - Start/Stop
- (void)startOutputUnit
{
CheckError(AudioOutputUnitStart(_audioUnit), "Audio Output Unit Failed To Start");
}
- (void)stopOutputUnit
{
CheckError(AudioOutputUnitStop(_audioUnit), "Audio Output Unit Failed To Stop");
}
#end
The problem is client receiving songs data but not playing, i don't know what i'm doing wrong.
Please correct me, i'm stuck here.
Hey there I'm definitely out of my depth here, however unfortunately its too late to turn back as I was provided the project by a lecturer.
I'm trying to disable the system supplied signal processing applied to my input by using AVAudioSessionModeMeasurement within my project.
However I'm struggling to find any sources on doing this.
My desired outcome is that by enabling this I will be able to make more accurate readings within my application.
Here is the code:
#import "ViewController.h"
#import AudioToolbox;
#import AVFoundation;
#define kOutputBus 0
#define kInputBus 1
#interface ViewController ()
#property (nonatomic, weak) IBOutlet UILabel *dBSPLView2;
#end
#implementation ViewController
static AudioComponentInstance audioUnit;
- (void)viewDidLoad {
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
[self setupAudio];
}
- (void) setupAudio {
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
OSStatus status = AudioComponentInstanceNew(comp, &audioUnit);
if (status != noErr) {
NSAssert(status == noErr,#"Error");
}
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 96000.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerFrame = audioFormat.mChannelsPerFrame * sizeof(SInt16);
audioFormat.mBytesPerPacket = audioFormat.mFramesPerPacket * audioFormat.mBytesPerFrame;
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
if (status != noErr) {
NSAssert(status == noErr,#"Error");
}
flag = 0;
status = AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &flag, sizeof(flag));
if (status != noErr) {
NSAssert(status == noErr,#"Error");
}
status = AudioUnitSetProperty(audioUnit , kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &audioFormat, sizeof(audioFormat));
if (status != noErr) {
NSAssert(status == noErr,#"Error");
}
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
status = AudioUnitSetProperty(audioUnit , kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, kInputBus, &callbackStruct, sizeof(callbackStruct));
if (status != noErr) {
NSAssert(status == noErr,#"Error");
}
status = AudioUnitInitialize(audioUnit);
if (status != noErr) {
NSAssert(status == noErr,#"Error");
}
}
static OSStatus recordingCallback(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData
) {
AudioBuffer buffer;
buffer.mNumberChannels = 1;
buffer.mDataByteSize = inNumberFrames * sizeof(SInt16);
buffer.mData = malloc(buffer.mDataByteSize);
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0] = buffer;
OSStatus status = AudioUnitRender(audioUnit , ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);
if (status != noErr) {
printf("Error\n");
return -1;
}
SInt16 *frameBuffer = buffer.mData;
double totalAmplitude = 0;
for (int i = 0; i < inNumberFrames; i++) {
// printf("%i\n",frameBuffer[i]);
totalAmplitude += frameBuffer[i] * frameBuffer[i];
}
totalAmplitude /= inNumberFrames;
totalAmplitude = sqrt(totalAmplitude);
//Creates a negative number that goes no higher than zero
//float SPLFloat = totalAmplitude / (float)SHRT_MAX * 2;
float dBFloat = (20 * log10(totalAmplitude)) + 11;
dispatch_async(dispatch_get_main_queue(), ^{
ViewController *viewController = (__bridge ViewController*)inRefCon;
viewController.dBSPLView2.text = [NSString stringWithFormat:#"%.f", dBFloat];
});
return noErr;
}
- (IBAction)recordButtonPressed:(id)sender {
NSError *error;
[[AVAudioSession sharedInstance] setActive:YES error:&error];
if (error != nil) {
NSAssert(error == nil, #"Error");
}
[[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryRecord error:&error];
if (error != nil) {
NSAssert(error == nil, #"Error");
}
[[AVAudioSession sharedInstance] requestRecordPermission:^(BOOL granted) {
if (granted) {
OSStatus status = AudioOutputUnitStart(audioUnit);
if (status != noErr) {
NSAssert(status == noErr,#"Error");
}
} else {
NSAssert(NO, #"Error");
}
}];
}
- (IBAction)stopButtonPressed:(id)sender {
OSStatus status = AudioOutputUnitStop(audioUnit);
if (status != noErr) {
NSAssert(status == noErr,#"Error");
}
NSError *error;
[[AVAudioSession sharedInstance] setActive:NO error:&error];
if (error != nil) {
NSAssert(error == nil, #"Error");
}
}
- (void)didReceiveMemoryWarning {
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
- (void) dealloc {
OSStatus status = AudioComponentInstanceDispose(audioUnit);
if (status != noErr) {
NSAssert(status == noErr,#"Error");
}
}
#end
After you configure the category for the session:
[[AVAudioSession sharedInstance] setMode:AVAudioSessionModeMeasurement error:&error];
if (error != nil) {
NSAssert(error == nil, #"Error");
}
Also, your error handling doesn't follow the established pattern. You should check the return value of setMode:error:. The error in/out parameter is only guaranteed to be valid when the return value from the method is NO. (In practice, checking that error is nil probably works fine in most cases, but it's not documented to work that way - so you shouldn't rely on it.)
I am trying to play the pcm data from NSInputStream. Can anyone provide me the right approach or code to do so.
I got the Audio in StreamHasData event with following code.
uint8_t bytes[self.audioStreamReadMaxLength];
UInt32 length = [audioStream readData:bytes maxLength:self.audioStreamReadMaxLength];
Now how can i play bytes audio data in iphone?
I worked on a similar problem, and I in the end solved it.
Here is the basic of what I did. I am using a library for the sockets
The below class is responsible for getting the audio and making it available to connected clients.
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>
#interface AudioServer : NSObject <GCDAsyncSocketDelegate>
#property (nonatomic, strong)GCDAsyncSocket * serverSocket;
#property (nonatomic, strong)NSMutableArray *connectedClients;
#property (nonatomic) AudioComponentInstance audioUnit;
-(void) start;
-(void) stop;
-(void) writeDataToClients:(NSData*)data;
#end
#define kOutputBus 0
#define kInputBus 1
#import "AudioServer.h"
#import "SM_Utils.h"
static OSStatus recordingCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
// TODO: Use inRefCon to access our interface object to do stuff
// Then, use inNumberFrames to figure out how much data is available, and make
// that much space available in buffers in an AudioBufferList.
AudioServer *server = (__bridge AudioServer*)inRefCon;
AudioBufferList bufferList;
SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
memset (&samples, 0, sizeof (samples));
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = samples;
bufferList.mBuffers[0].mNumberChannels = 1;
bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);
// Then:
// Obtain recorded samples
OSStatus status;
status = AudioUnitRender(server.audioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
&bufferList);
NSData *dataToSend = [NSData dataWithBytes:bufferList.mBuffers[0].mData length:bufferList.mBuffers[0].mDataByteSize];
[server writeDataToClients:dataToSend];
return noErr;
}
#implementation AudioServer
-(id) init
{
return [super init];
}
-(void) start
{
[UIApplication sharedApplication].idleTimerDisabled = YES;
// Create a new instance of AURemoteIO
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
AudioComponentInstanceNew(comp, &_audioUnit);
// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &one, sizeof(one));
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &one, sizeof(one));
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 32 bit floating point
AudioStreamBasicDescription audioFormat = [self getAudioDescription];
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &audioFormat, sizeof(audioFormat));
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &audioFormat, sizeof(audioFormat));
// Set the MaximumFramesPerSlice property. This property is used to describe to an audio unit the maximum number
// of samples it will be asked to produce on any single given call to AudioUnitRender
UInt32 maxFramesPerSlice = 4096;
AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(UInt32));
// Get the property value back from AURemoteIO. We are going to use this value to allocate buffers accordingly
UInt32 propSize = sizeof(UInt32);
AudioUnitGetProperty(_audioUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, &propSize);
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = recordingCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);
AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &renderCallback, sizeof(renderCallback));
// Initialize the AURemoteIO instance
AudioUnitInitialize(_audioUnit);
AudioOutputUnitStart(_audioUnit);
_connectedClients = [[NSMutableArray alloc] init];
_serverSocket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue:dispatch_get_main_queue()];
[self startAcceptingConnections];
}
- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}
-(void) startAcceptingConnections
{
NSError *error = nil;
if(_serverSocket)
[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}
-(void)socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(_connectedClients)
[_connectedClients removeObject:sock];
}
- (void)socket:(GCDAsyncSocket *)socket didAcceptNewSocket:(GCDAsyncSocket *)newSocket {
NSLog(#"Accepted New Socket from %#:%hu", [newSocket connectedHost], [newSocket connectedPort]);
#synchronized(_connectedClients)
{
dispatch_async(dispatch_get_main_queue(), ^{
if(_connectedClients)
[_connectedClients addObject:newSocket];
});
}
NSError *error = nil;
if(_serverSocket)
[_serverSocket acceptOnPort:[SM_Utils serverPort] error:&error];
}
-(void) writeDataToClients:(NSData *)data
{
if(_connectedClients)
{
for (GCDAsyncSocket *socket in _connectedClients) {
if([socket isConnected])
{
[socket writeData:data withTimeout:-1 tag:0];
}
else{
if([_connectedClients containsObject:socket])
[_connectedClients removeObject:socket];
}
}
}
}
-(void) stop
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}
-(void) dealloc
{
if(_serverSocket)
{
_serverSocket = nil;
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
AudioOutputUnitStop(_audioUnit);
}
#end
The following class is then responsible for retrieving the audio from the server and playing it
#import <Foundation/Foundation.h>
#import "GCDAsyncSocket.h"
#import <AudioToolbox/AudioToolbox.h>
#import "TPCircularBuffer.h"
#protocol AudioClientDelegate <NSObject>
-(void) connected;
-(void) animateSoundIndicator:(float) rms;
#end
#interface AudioClient : NSObject<GCDAsyncSocketDelegate>
{
NSString *ipAddress;
BOOL stopped;
}
#property (nonatomic) TPCircularBuffer circularBuffer;
#property (nonatomic) AudioComponentInstance audioUnit;
#property (nonatomic, strong) GCDAsyncSocket *socket;
#property (nonatomic, strong) id<AudioClientDelegate> delegate;
-(id) initWithDelegate:(id)delegate;
-(void) start:(NSString *)ip;
-(void) stop;
-(TPCircularBuffer *) outputShouldUseCircularBuffer;
#end
static OSStatus OutputRenderCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData){
AudioClient *output = (__bridge AudioClient*)inRefCon;
TPCircularBuffer *circularBuffer = [output outputShouldUseCircularBuffer];
if( !circularBuffer ){
AudioUnitSampleType *left = (AudioUnitSampleType*)ioData->mBuffers[0].mData;
for(int i = 0; i < inNumberFrames; i++ ){
left[ i ] = 0.0f;
}
return noErr;
};
int32_t bytesToCopy = ioData->mBuffers[0].mDataByteSize;
SInt16* outputBuffer = ioData->mBuffers[0].mData;
int32_t availableBytes;
SInt16 *sourceBuffer = TPCircularBufferTail(circularBuffer, &availableBytes);
int32_t amount = MIN(bytesToCopy,availableBytes);
memcpy(outputBuffer, sourceBuffer, amount);
TPCircularBufferConsume(circularBuffer,amount);
return noErr;
}
-(id) initWithDelegate:(id)delegate
{
if(!self)
{
self = [super init];
}
[self circularBuffer:&_circularBuffer withSize:24576*5];
_delegate = delegate;
stopped = NO;
return self;
}
-(void) start:(NSString *)ip
{
_socket = [[GCDAsyncSocket alloc] initWithDelegate:self delegateQueue: dispatch_get_main_queue()];
NSError *err;
ipAddress = ip;
[UIApplication sharedApplication].idleTimerDisabled = YES;
if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
{
}
[self setupAudioUnit];
}
-(void) setupAudioUnit
{
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp = AudioComponentFindNext(NULL, &desc);
OSStatus status;
status = AudioComponentInstanceNew(comp, &_audioUnit);
if(status != noErr)
{
NSLog(#"Error creating AudioUnit instance");
}
// Enable input and output on AURemoteIO
// Input is enabled on the input scope of the input element
// Output is enabled on the output scope of the output element
UInt32 one = 1;
status = AudioUnitSetProperty(_audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &one, sizeof(one));
if(status != noErr)
{
NSLog(#"Error enableling AudioUnit output bus");
}
// Explicitly set the input and output client formats
// sample rate = 44100, num channels = 1, format = 16 bit int point
AudioStreamBasicDescription audioFormat = [self getAudioDescription];
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioFormat, sizeof(audioFormat));
if(status != noErr)
{
NSLog(#"Error setting audio format");
}
AURenderCallbackStruct renderCallback;
renderCallback.inputProc = OutputRenderCallback;
renderCallback.inputProcRefCon = (__bridge void *)(self);
status = AudioUnitSetProperty(_audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &renderCallback, sizeof(renderCallback));
if(status != noErr)
{
NSLog(#"Error setting rendering callback");
}
// Initialize the AURemoteIO instance
status = AudioUnitInitialize(_audioUnit);
if(status != noErr)
{
NSLog(#"Error initializing audio unit");
}
}
- (AudioStreamBasicDescription)getAudioDescription {
AudioStreamBasicDescription audioDescription = {0};
audioDescription.mFormatID = kAudioFormatLinearPCM;
audioDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked | kAudioFormatFlagsNativeEndian;
audioDescription.mChannelsPerFrame = 1;
audioDescription.mBytesPerPacket = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mFramesPerPacket = 1;
audioDescription.mBytesPerFrame = sizeof(SInt16)*audioDescription.mChannelsPerFrame;
audioDescription.mBitsPerChannel = 8 * sizeof(SInt16);
audioDescription.mSampleRate = 44100.0;
return audioDescription;
}
-(void) socketDidDisconnect:(GCDAsyncSocket *)sock withError:(NSError *)err
{
if(!stopped)
if(![_socket connectToHost:ipAddress onPort:[SM_Utils serverPort] error:&err])
{
}
}
-(void) socket:(GCDAsyncSocket *)socket didReadData:(NSData *)data withTag:(long)tag
{
if(data.length > 0)
{
unsigned long len = [data length];
SInt16* byteData = (SInt16*)malloc(len);
memcpy(byteData, [data bytes], len);
double sum = 0.0;
for(int i = 0; i < len/2; i++) {
sum += byteData[i] * byteData[i];
}
double average = sum / len;
double rms = sqrt(average);
[_delegate animateSoundIndicator:rms];
Byte* soundData = (Byte*)malloc(len);
memcpy(soundData, [data bytes], len);
if(soundData)
{
AudioBufferList *theDataBuffer = (AudioBufferList*) malloc(sizeof(AudioBufferList) *1);
theDataBuffer->mNumberBuffers = 1;
theDataBuffer->mBuffers[0].mDataByteSize = (UInt32)len;
theDataBuffer->mBuffers[0].mNumberChannels = 1;
theDataBuffer->mBuffers[0].mData = (SInt16*)soundData;
[self appendDataToCircularBuffer:&_circularBuffer fromAudioBufferList:theDataBuffer];
}
}
[socket readDataToLength:18432 withTimeout:-1 tag:0];
}
-(void)circularBuffer:(TPCircularBuffer *)circularBuffer withSize:(int)size {
TPCircularBufferInit(circularBuffer,size);
}
-(void)appendDataToCircularBuffer:(TPCircularBuffer*)circularBuffer
fromAudioBufferList:(AudioBufferList*)audioBufferList {
TPCircularBufferProduceBytes(circularBuffer,
audioBufferList->mBuffers[0].mData,
audioBufferList->mBuffers[0].mDataByteSize);
}
-(void)freeCircularBuffer:(TPCircularBuffer *)circularBuffer {
TPCircularBufferClear(circularBuffer);
TPCircularBufferCleanup(circularBuffer);
}
-(void) socket:(GCDAsyncSocket *)socket didConnectToHost:(NSString *)host port:(uint16_t)port
{
OSStatus status = AudioOutputUnitStart(_audioUnit);
if(status != noErr)
{
NSLog(#"Error starting audio unit");
}
[socket readDataToLength:18432 withTimeout:-1 tag:0];
[_delegate connected];
}
-(TPCircularBuffer *) outputShouldUseCircularBuffer
{
return &_circularBuffer;
}
-(void) stop
{
OSStatus status = AudioOutputUnitStop(_audioUnit);
if(status != noErr)
{
NSLog(#"Error stopping audio unit");
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}
-(void) dealloc {
OSStatus status = AudioOutputUnitStop(_audioUnit);
if(status != noErr)
{
NSLog(#"Error stopping audio unit");
}
[UIApplication sharedApplication].idleTimerDisabled = NO;
TPCircularBufferClear(&_circularBuffer);
_audioUnit = nil;
stopped = YES;
}
#end
Some of the code is unique that my requirements but most of it can just be re-used, I hope this helps.
Apple has an example,doing same kind of stuff:-
void* MyGetOpenALAudioData(CFURLRef inFileURL, ALsizei *outDataSize, ALenum *outDataFormat, ALsizei* outSampleRate)
{
OSStatus err = noErr;
SInt64 theFileLengthInFrames = 0;
AudioStreamBasicDescription theFileFormat;
UInt32 thePropertySize = sizeof(theFileFormat);
ExtAudioFileRef extRef = NULL;
void* theData = NULL;
AudioStreamBasicDescription theOutputFormat;
// Open a file with ExtAudioFileOpen()
err = ExtAudioFileOpenURL(inFileURL, &extRef);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileOpenURL FAILED, Error = %ld\n", err); goto Exit; }
// Get the audio data format
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileDataFormat, &thePropertySize, &theFileFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
if (theFileFormat.mChannelsPerFrame > 2) { printf("MyGetOpenALAudioData - Unsupported Format, channel count is greater than stereo\n"); goto Exit;}
// Set the client format to 16 bit signed integer (native-endian) data
// Maintain the channel count and sample rate of the original source format
theOutputFormat.mSampleRate = theFileFormat.mSampleRate;
theOutputFormat.mChannelsPerFrame = theFileFormat.mChannelsPerFrame;
theOutputFormat.mFormatID = kAudioFormatLinearPCM;
theOutputFormat.mBytesPerPacket = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mFramesPerPacket = 1;
theOutputFormat.mBytesPerFrame = 2 * theOutputFormat.mChannelsPerFrame;
theOutputFormat.mBitsPerChannel = 16;
theOutputFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
// Set the desired client (output) data format
err = ExtAudioFileSetProperty(extRef, kExtAudioFileProperty_ClientDataFormat, sizeof(theOutputFormat), &theOutputFormat);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileSetProperty(kExtAudioFileProperty_ClientDataFormat) FAILED, Error = %ld\n", err); goto Exit; }
// Get the total frame count
thePropertySize = sizeof(theFileLengthInFrames);
err = ExtAudioFileGetProperty(extRef, kExtAudioFileProperty_FileLengthFrames, &thePropertySize, &theFileLengthInFrames);
if(err) { printf("MyGetOpenALAudioData: ExtAudioFileGetProperty(kExtAudioFileProperty_FileLengthFrames) FAILED, Error = %ld\n", err); goto Exit; }
// Read all the data into memory
UInt32 theFramesToRead = (UInt32)theFileLengthInFrames;
UInt32 dataSize = theFramesToRead * theOutputFormat.mBytesPerFrame;;
theData = malloc(dataSize);
if (theData)
{
AudioBufferList theDataBuffer;
theDataBuffer.mNumberBuffers = 1;
theDataBuffer.mBuffers[0].mDataByteSize = dataSize;
theDataBuffer.mBuffers[0].mNumberChannels = theOutputFormat.mChannelsPerFrame;
theDataBuffer.mBuffers[0].mData = theData;
// Read the data into an AudioBufferList
err = ExtAudioFileRead(extRef, &theFramesToRead, &theDataBuffer);
if(err == noErr)
{
// success
*outDataSize = (ALsizei)dataSize;
*outDataFormat = (theOutputFormat.mChannelsPerFrame > 1) ? AL_FORMAT_STEREO16 : AL_FORMAT_MONO16;
*outSampleRate = (ALsizei)theOutputFormat.mSampleRate;
}
else
{
// failure
free (theData);
theData = NULL; // make sure to return NULL
printf("MyGetOpenALAudioData: ExtAudioFileRead FAILED, Error = %ld\n", err); goto Exit;
}
}
Exit:
// Dispose the ExtAudioFileRef, it is no longer needed
if (extRef) ExtAudioFileDispose(extRef);
return theData;
}
Find Sample Code Here,Hope this helps.
I would like to generate the tone with wave pattern as below ( last mid point should be at the bottom tough as previous set.)
When after chaining the frequency from 44.44Hz to 45.89 Hz, it becomes
Even I have changed the buffer length from 1024 to 960 , it stills shows that the buffer length is still at 1024 . Hence, it causes the problem that some of the remaining tails displays the mid-point tail instead of bottom .
The below is my code
OSStatus RenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
// Fixed amplitude is good enough for our purposes
const double amplitude = 2.7171;
// Get the tone parameters out of the view controller
ToneGeneratorViewController *viewController =
(ToneGeneratorViewController *)inRefCon;
double theta = viewController->theta; //992 f0r 44.44 , 959 for 45.89
double theta_increment = viewController->sampleRate / viewController->frequency;
int increment = ceil(theta_increment);
NSLog(#"increment= %i" , increment);
const int channel = 0;
Float32 *buffer = (Float32 *)ioData->mBuffers[channel].mData;
int squareIndex = 0;
for (UInt32 frame = 0; frame < 401; frame++)
{
buffer[frame] = amplitude;
}
for (UInt32 frame = 401; frame < 419; frame++)
{
buffer[frame] = -amplitude;
}
for (UInt32 frame = 419; frame < 468; frame++)
{
buffer[frame] = amplitude;
}
for (UInt32 frame = 468; frame < 487; frame++)
{
buffer[frame] = -amplitude;
}
for (UInt32 frame = 487; frame < 536; frame++)
{
buffer[frame] = amplitude;
}
for (UInt32 frame = 536; frame < 555; frame++)
{
buffer[frame] = -amplitude;
}
for (UInt32 frame = 555; frame < 604; frame++)
{
buffer[frame] = amplitude;
}
for (UInt32 frame = 604; frame < 622; frame++)
{
buffer[frame] = -amplitude;
}
for (UInt32 frame = 622; frame < 671; frame++)
{
buffer[frame] = amplitude;
}
for (UInt32 frame = 671; frame < 690; frame++)
{
buffer[frame] = -amplitude;
}
for (UInt32 frame = 690; frame < 739; frame++)
{
buffer[frame] = amplitude;
}
for (UInt32 frame = 739; frame < 757; frame++)
{
buffer[frame] = -amplitude;
}
for (UInt32 frame = 757; frame < 806; frame++)
{
buffer[frame] = amplitude;
}
for (UInt32 frame = 806; frame < 825; frame++)
{
buffer[frame] = -amplitude;
}
for (UInt32 frame = 825; frame < 874; frame++)
{
buffer[frame] = amplitude;
}
for (UInt32 frame = 874; frame < 892; frame++)
{
buffer[frame] = -amplitude;
}
for (UInt32 frame = 892; frame < 941; frame++)
{
buffer[frame] = amplitude;
}
for (UInt32 frame = 941; frame < increment; frame++)
{
buffer[frame] = -amplitude;
}
squareIndex += 1.0;
if(squareIndex >= theta_increment) squareIndex-=theta_increment;
viewController->theta = theta;
void ToneInterruptionListener(void *inClientData, UInt32 inInterruptionState)
{
ToneGeneratorViewController *viewController =
(ToneGeneratorViewController *)inClientData;
[viewController stop];
}
#implementation ToneGeneratorViewController
#synthesize frequencySlider;
#synthesize playButton;
#synthesize frequencyLabel;
- (IBAction)sliderChanged:(UISlider *)slider
{
frequency = 45.9;
frequencyLabel.text = [NSString stringWithFormat:#"%4.1f Hz", frequency];
}
- (void)createToneUnit
{
// Configure the search parameters to find the default playback output unit
// (called the kAudioUnitSubType_RemoteIO on iOS but
// kAudioUnitSubType_DefaultOutput on Mac OS X)
AudioComponentDescription defaultOutputDescription;
defaultOutputDescription.componentType = kAudioUnitType_Output;
defaultOutputDescription.componentSubType = kAudioUnitSubType_RemoteIO;
defaultOutputDescription.componentManufacturer = kAudioUnitManufacturer_Apple;
defaultOutputDescription.componentFlags = 0;
defaultOutputDescription.componentFlagsMask = 0;
// Get the default playback output unit
AudioComponent defaultOutput = AudioComponentFindNext(NULL, &defaultOutputDescription);
NSAssert(defaultOutput, #"Can't find default output");
// Create a new unit based on this that we'll use for output
OSErr err = AudioComponentInstanceNew(defaultOutput, &toneUnit);
NSAssert1(toneUnit, #"Error creating unit: %ld", err);
// Set our tone rendering function on the unit
AURenderCallbackStruct input;
input.inputProc = RenderTone;
input.inputProcRefCon = self;
err = AudioUnitSetProperty(toneUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input,
0,
&input,
sizeof(input));
NSAssert1(err == noErr, #"Error setting callback: %ld", err);
// Set the format to 32 bit, single channel, floating point, linear PCM
const int four_bytes_per_float = 4;
const int eight_bits_per_byte = 8;
AudioStreamBasicDescription streamFormat;
streamFormat.mSampleRate = sampleRate;
streamFormat.mFormatID = kAudioFormatLinearPCM;
streamFormat.mFormatFlags =
kAudioFormatFlagsNativeFloatPacked | kAudioFormatFlagIsNonInterleaved;
streamFormat.mBytesPerPacket = four_bytes_per_float;
streamFormat.mFramesPerPacket = 1;
streamFormat.mBytesPerFrame = four_bytes_per_float;
streamFormat.mChannelsPerFrame = 1;
streamFormat.mBitsPerChannel = four_bytes_per_float * eight_bits_per_byte;
err = AudioUnitSetProperty (toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
0,
&streamFormat,
sizeof(AudioStreamBasicDescription));
NSAssert1(err == noErr, #"Error setting stream format: %ld", err);
}
- (IBAction)togglePlay:(UIButton *)selectedButton
{
if (toneUnit)
{
AudioOutputUnitStop(toneUnit);
AudioUnitUninitialize(toneUnit);
AudioComponentInstanceDispose(toneUnit);
toneUnit = nil;
[selectedButton setTitle:NSLocalizedString(#"Play", nil) forState:0];
}
else
{
[self createToneUnit];
// Stop changing parameters on the unit
OSErr err = AudioUnitInitialize(toneUnit);
NSAssert1(err == noErr, #"Error initializing unit: %ld", err);
// Start playback
err = AudioOutputUnitStart(toneUnit);
NSAssert1(err == noErr, #"Error starting unit: %ld", err);
[selectedButton setTitle:NSLocalizedString(#"Stop", nil) forState:0];
}
}
- (void)stop
{
if (toneUnit)
{
[self togglePlay:playButton];
}
}
- (void)viewDidLoad {
[super viewDidLoad];
[self sliderChanged:frequencySlider];
sampleRate = 44100;
OSStatus result = AudioSessionInitialize(NULL, NULL, ToneInterruptionListener, self);
if (result == kAudioSessionNoError)
{
UInt32 sessionCategory = kAudioSessionCategory_MediaPlayback;
AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(sessionCategory), &sessionCategory);
}
AudioSessionSetActive(true);
}
- (void)viewDidUnload {
self.frequencyLabel = nil;
self.playButton = nil;
self.frequencySlider = nil;
AudioSessionSetActive(false);
}
When Core Audio calls your RenderTone callback it wants you to provide a particular number of audio frames per buffer. The 'inNumberFrames' parameter tells you what this number is.
(Core Audio does allow some adjustment of the hardware buffer size but this value may be altered to suit Core Audio e.g. by being rounded up to the next power of 2.)
So you can't adjust the callback buffer size to exactly fit one cycle of a waveform you want to generate: Instead you must keep track of where you currently are in the waveform so you can generate as much or as little of it as required, and then continue where you left off in the next callback.
In your example, if inNumberFrames is 1024 then in the first callback you would supply a complete 960 sample cycle AND an additional 64 samples from the next cycle. In the second callback you would provide the remaining 896 samples of your second cycle and 128 samples from the start of your third cycle, and so on.