UI processing causes audio glitches - should audio play on background thread? - ios
When you rotate the device, my app has quite a bit of logic to lay out the view differently for portrait/landscape - e.g. remove views, change frame sizes, redraw views, switch images etc.
Works flawlessly on new devices. On older devices if this is done while audio is playing it glitches considerably.
Is this most likely because the audio code is being blocked by the UI? Should my audio play on a background thread? The CPU even on an iPhone 4 is only about 20% during this transition so I don't it's a CPU issue.
I am using the loadPresetDemo example of AUSampler to play audio and my thread looks like:
-(void)start
{
playing = YES;
if([NSThread isMainThread]) {
NSThread *thread = [[NSThread alloc] initWithTarget:self selector:#selector(timerStart) object:nil];
[thread start];
return;
}
[[NSThread currentThread] setThreadPriority:1.0];
}
-(void)timerStart
{
NSRunLoop *timerRunLoop = [NSRunLoop currentRunLoop];
timer = [NSTimer scheduledTimerWithTimeInterval:intervalInMs/1000
target:self
selector:#selector(beat)
userInfo:nil
repeats:YES];
[timerRunLoop run];
}
- (void)beat
{
if(playing) {
[audioPlayer beat];
//UI thread
dispatch_async(dispatch_get_main_queue(), ^{
[mView setBeat:audioPlayer.currentBeat];
});
}
}
AUSampler:
#import "MainViewController.h"
#import <AssertMacros.h>
// some MIDI constants:
enum {
kMIDIMessage_NoteOn = 0x9,
kMIDIMessage_NoteOff = 0x8,
};
#define kLowNote 48
#define kHighNote 72
#define kMidNote 60
// private class extension
#interface MainViewController ()
#property (readwrite) Float64 graphSampleRate;
#property (readwrite) AUGraph processingGraph;
#property (readwrite) AudioUnit samplerUnit;
#property (readwrite) AudioUnit ioUnit;
- (OSStatus) loadSynthFromPresetURL:(NSURL *) presetURL;
- (void) registerForUIApplicationNotifications;
- (BOOL) createAUGraph;
- (void) configureAndStartAudioProcessingGraph: (AUGraph) graph;
- (void) stopAudioProcessingGraph;
- (void) restartAudioProcessingGraph;
#end
#implementation MainViewController
#synthesize graphSampleRate = _graphSampleRate;
#synthesize currentPresetLabel = _currentPresetLabel;
#synthesize presetOneButton = _presetOneButton;
#synthesize presetTwoButton = _presetTwoButton;
#synthesize lowNoteButton = _lowNoteButton;
#synthesize midNoteButton = _midNoteButton;
#synthesize highNoteButton = _highNoteButton;
#synthesize samplerUnit = _samplerUnit;
#synthesize ioUnit = _ioUnit;
#synthesize processingGraph = _processingGraph;
#pragma mark -
#pragma mark Audio setup
// Create an audio processing graph.
- (BOOL) createAUGraph {
OSStatus result = noErr;
AUNode samplerNode, ioNode;
// Specify the common portion of an audio unit's identify, used for both audio units
// in the graph.
AudioComponentDescription cd = {};
cd.componentManufacturer = kAudioUnitManufacturer_Apple;
cd.componentFlags = 0;
cd.componentFlagsMask = 0;
// Instantiate an audio processing graph
result = NewAUGraph (&_processingGraph);
NSCAssert (result == noErr, #"Unable to create an AUGraph object. Error code: %d '%.4s'", (int) result, (const char *)&result);
//Specify the Sampler unit, to be used as the first node of the graph
cd.componentType = kAudioUnitType_MusicDevice;
cd.componentSubType = kAudioUnitSubType_Sampler;
// Add the Sampler unit node to the graph
result = AUGraphAddNode (self.processingGraph, &cd, &samplerNode);
NSCAssert (result == noErr, #"Unable to add the Sampler unit to the audio processing graph. Error code: %d '%.4s'", (int) result, (const char *)&result);
// Specify the Output unit, to be used as the second and final node of the graph
cd.componentType = kAudioUnitType_Output;
cd.componentSubType = kAudioUnitSubType_RemoteIO;
// Add the Output unit node to the graph
result = AUGraphAddNode (self.processingGraph, &cd, &ioNode);
NSCAssert (result == noErr, #"Unable to add the Output unit to the audio processing graph. Error code: %d '%.4s'", (int) result, (const char *)&result);
// Open the graph
result = AUGraphOpen (self.processingGraph);
NSCAssert (result == noErr, #"Unable to open the audio processing graph. Error code: %d '%.4s'", (int) result, (const char *)&result);
// Connect the Sampler unit to the output unit
result = AUGraphConnectNodeInput (self.processingGraph, samplerNode, 0, ioNode, 0);
NSCAssert (result == noErr, #"Unable to interconnect the nodes in the audio processing graph. Error code: %d '%.4s'", (int) result, (const char *)&result);
// Obtain a reference to the Sampler unit from its node
result = AUGraphNodeInfo (self.processingGraph, samplerNode, 0, &_samplerUnit);
NSCAssert (result == noErr, #"Unable to obtain a reference to the Sampler unit. Error code: %d '%.4s'", (int) result, (const char *)&result);
// Obtain a reference to the I/O unit from its node
result = AUGraphNodeInfo (self.processingGraph, ioNode, 0, &_ioUnit);
NSCAssert (result == noErr, #"Unable to obtain a reference to the I/O unit. Error code: %d '%.4s'", (int) result, (const char *)&result);
return YES;
}
// Starting with instantiated audio processing graph, configure its
// audio units, initialize it, and start it.
- (void) configureAndStartAudioProcessingGraph: (AUGraph) graph {
OSStatus result = noErr;
UInt32 framesPerSlice = 0;
UInt32 framesPerSlicePropertySize = sizeof (framesPerSlice);
UInt32 sampleRatePropertySize = sizeof (self.graphSampleRate);
result = AudioUnitInitialize (self.ioUnit);
NSCAssert (result == noErr, #"Unable to initialize the I/O unit. Error code: %d '%.4s'", (int) result, (const char *)&result);
// Set the I/O unit's output sample rate.
result = AudioUnitSetProperty (
self.ioUnit,
kAudioUnitProperty_SampleRate,
kAudioUnitScope_Output,
0,
&_graphSampleRate,
sampleRatePropertySize
);
NSAssert (result == noErr, #"AudioUnitSetProperty (set Sampler unit output stream sample rate). Error code: %d '%.4s'", (int) result, (const char *)&result);
// Obtain the value of the maximum-frames-per-slice from the I/O unit.
result = AudioUnitGetProperty (
self.ioUnit,
kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global,
0,
&framesPerSlice,
&framesPerSlicePropertySize
);
NSCAssert (result == noErr, #"Unable to retrieve the maximum frames per slice property from the I/O unit. Error code: %d '%.4s'", (int) result, (const char *)&result);
// Set the Sampler unit's output sample rate.
result = AudioUnitSetProperty (
self.samplerUnit,
kAudioUnitProperty_SampleRate,
kAudioUnitScope_Output,
0,
&_graphSampleRate,
sampleRatePropertySize
);
NSAssert (result == noErr, #"AudioUnitSetProperty (set Sampler unit output stream sample rate). Error code: %d '%.4s'", (int) result, (const char *)&result);
// Set the Sampler unit's maximum frames-per-slice.
result = AudioUnitSetProperty (
self.samplerUnit,
kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global,
0,
&framesPerSlice,
framesPerSlicePropertySize
);
NSAssert( result == noErr, #"AudioUnitSetProperty (set Sampler unit maximum frames per slice). Error code: %d '%.4s'", (int) result, (const char *)&result);
if (graph) {
// Initialize the audio processing graph.
result = AUGraphInitialize (graph);
NSAssert (result == noErr, #"Unable to initialze AUGraph object. Error code: %d '%.4s'", (int) result, (const char *)&result);
// Start the graph
result = AUGraphStart (graph);
NSAssert (result == noErr, #"Unable to start audio processing graph. Error code: %d '%.4s'", (int) result, (const char *)&result);
// Print out the graph to the console
CAShow (graph);
}
}
// Load the Trombone preset
- (IBAction)loadPresetOne:(id)sender {
NSURL *presetURL = [[NSURL alloc] initFileURLWithPath:[[NSBundle mainBundle] pathForResource:#"Trombone" ofType:#"aupreset"]];
if (presetURL) {
NSLog(#"Attempting to load preset '%#'\n", [presetURL description]);
self.currentPresetLabel.text = #"Trombone";
}
else {
NSLog(#"COULD NOT GET PRESET PATH!");
}
[self loadSynthFromPresetURL: presetURL];
}
// Load the Vibraphone preset
- (IBAction)loadPresetTwo:(id)sender {
NSURL *presetURL = [[NSURL alloc] initFileURLWithPath:[[NSBundle mainBundle] pathForResource:#"Vibraphone" ofType:#"aupreset"]];
if (presetURL) {
NSLog(#"Attempting to load preset '%#'\n", [presetURL description]);
self.currentPresetLabel.text = #"Vibraphone"; }
else {
NSLog(#"COULD NOT GET PRESET PATH!");
}
[self loadSynthFromPresetURL: presetURL];
}
// Load a synthesizer preset file and apply it to the Sampler unit
- (OSStatus) loadSynthFromPresetURL: (NSURL *) presetURL {
CFDataRef propertyResourceData = 0;
Boolean status;
SInt32 errorCode = 0;
OSStatus result = noErr;
// Read from the URL and convert into a CFData chunk
status = CFURLCreateDataAndPropertiesFromResource (
kCFAllocatorDefault,
(__bridge CFURLRef) presetURL,
&propertyResourceData,
NULL,
NULL,
&errorCode
);
NSAssert (status == YES && propertyResourceData != 0, #"Unable to create data and properties from a preset. Error code: %d '%.4s'", (int) errorCode, (const char *)&errorCode);
// Convert the data object into a property list
CFPropertyListRef presetPropertyList = 0;
CFPropertyListFormat dataFormat = 0;
CFErrorRef errorRef = 0;
presetPropertyList = CFPropertyListCreateWithData (
kCFAllocatorDefault,
propertyResourceData,
kCFPropertyListImmutable,
&dataFormat,
&errorRef
);
// Set the class info property for the Sampler unit using the property list as the value.
if (presetPropertyList != 0) {
result = AudioUnitSetProperty(
self.samplerUnit,
kAudioUnitProperty_ClassInfo,
kAudioUnitScope_Global,
0,
&presetPropertyList,
sizeof(CFPropertyListRef)
);
CFRelease(presetPropertyList);
}
if (errorRef) CFRelease(errorRef);
CFRelease (propertyResourceData);
return result;
}
// Set up the audio session for this app.
- (BOOL) setupAudioSession {
AVAudioSession *mySession = [AVAudioSession sharedInstance];
// Specify that this object is the delegate of the audio session, so that
// this object's endInterruption method will be invoked when needed.
[mySession setDelegate: self];
// Assign the Playback category to the audio session. This category supports
// audio output with the Ring/Silent switch in the Silent position.
NSError *audioSessionError = nil;
[mySession setCategory: AVAudioSessionCategoryPlayback error: &audioSessionError];
if (audioSessionError != nil) {NSLog (#"Error setting audio session category."); return NO;}
// Request a desired hardware sample rate.
self.graphSampleRate = 44100.0; // Hertz
[mySession setPreferredHardwareSampleRate: self.graphSampleRate error: &audioSessionError];
if (audioSessionError != nil) {NSLog (#"Error setting preferred hardware sample rate."); return NO;}
// Activate the audio session
[mySession setActive: YES error: &audioSessionError];
if (audioSessionError != nil) {NSLog (#"Error activating the audio session."); return NO;}
// Obtain the actual hardware sample rate and store it for later use in the audio processing graph.
self.graphSampleRate = [mySession currentHardwareSampleRate];
return YES;
}
#pragma mark -
#pragma mark Audio control
// Play the low note
- (IBAction) startPlayLowNote:(id)sender {
UInt32 noteNum = kLowNote;
UInt32 onVelocity = 127;
UInt32 noteCommand = kMIDIMessage_NoteOn << 4 | 0;
OSStatus result = noErr;
require_noerr (result = MusicDeviceMIDIEvent (self.samplerUnit, noteCommand, noteNum, onVelocity, 0), logTheError);
logTheError:
if (result != noErr) NSLog (#"Unable to start playing the low note. Error code: %d '%.4s'\n", (int) result, (const char *)&result);
}
// Stop the low note
- (IBAction) stopPlayLowNote:(id)sender {
UInt32 noteNum = kLowNote;
UInt32 noteCommand = kMIDIMessage_NoteOff << 4 | 0;
OSStatus result = noErr;
require_noerr (result = MusicDeviceMIDIEvent (self.samplerUnit, noteCommand, noteNum, 0, 0), logTheError);
logTheError:
if (result != noErr) NSLog (#"Unable to stop playing the low note. Error code: %d '%.4s'\n", (int) result, (const char *)&result);
}
// Play the mid note
- (IBAction) startPlayMidNote:(id)sender {
UInt32 noteNum = kMidNote;
UInt32 onVelocity = 127;
UInt32 noteCommand = kMIDIMessage_NoteOn << 4 | 0;
OSStatus result = noErr;
require_noerr (result = MusicDeviceMIDIEvent(self.samplerUnit, noteCommand, noteNum, onVelocity, 0), logTheError);
logTheError:
if (result != noErr) NSLog (#"Unable to start playing the mid note. Error code: %d '%.4s'\n", (int) result, (const char *)&result);
}
// Stop the mid note
- (IBAction) stopPlayMidNote:(id)sender {
UInt32 noteNum = kMidNote;
UInt32 noteCommand = kMIDIMessage_NoteOff << 4 | 0;
OSStatus result = noErr;
require_noerr (result = MusicDeviceMIDIEvent(self.samplerUnit, noteCommand, noteNum, 0, 0), logTheError);
logTheError:
if (result != noErr) NSLog (#"Unable to stop playing the mid note. Error code: %d '%.4s'\n", (int) result, (const char *)&result);
}
// Play the high note
- (IBAction) startPlayHighNote:(id)sender {
UInt32 noteNum = kHighNote;
UInt32 onVelocity = 127;
UInt32 noteCommand = kMIDIMessage_NoteOn << 4 | 0;
OSStatus result = noErr;
require_noerr (result = MusicDeviceMIDIEvent(self.samplerUnit, noteCommand, noteNum, onVelocity, 0), logTheError);
logTheError:
if (result != noErr) NSLog (#"Unable to start playing the high note. Error code: %d '%.4s'\n", (int) result, (const char *)&result);
}
// Stop the high note
- (IBAction)stopPlayHighNote:(id)sender {
UInt32 noteNum = kHighNote;
UInt32 noteCommand = kMIDIMessage_NoteOff << 4 | 0;
OSStatus result = noErr;
require_noerr (result = MusicDeviceMIDIEvent(self.samplerUnit, noteCommand, noteNum, 0, 0), logTheError);
logTheError:
if (result != noErr) NSLog (#"Unable to stop playing the high note. Error code: %d '%.4s'", (int) result, (const char *)&result);
}
// Stop the audio processing graph
- (void) stopAudioProcessingGraph {
OSStatus result = noErr;
if (self.processingGraph) result = AUGraphStop(self.processingGraph);
NSAssert (result == noErr, #"Unable to stop the audio processing graph. Error code: %d '%.4s'", (int) result, (const char *)&result);
}
// Restart the audio processing graph
- (void) restartAudioProcessingGraph {
OSStatus result = noErr;
if (self.processingGraph) result = AUGraphStart (self.processingGraph);
NSAssert (result == noErr, #"Unable to restart the audio processing graph. Error code: %d '%.4s'", (int) result, (const char *)&result);
}
#pragma mark -
#pragma mark Audio session delegate methods
// Respond to an audio interruption, such as a phone call or a Clock alarm.
- (void) beginInterruption {
// Stop any notes that are currently playing.
[self stopPlayLowNote: self];
[self stopPlayMidNote: self];
[self stopPlayHighNote: self];
// Interruptions do not put an AUGraph object into a "stopped" state, so
// do that here.
[self stopAudioProcessingGraph];
}
// Respond to the ending of an audio interruption.
- (void) endInterruptionWithFlags: (NSUInteger) flags {
NSError *endInterruptionError = nil;
[[AVAudioSession sharedInstance] setActive: YES
error: &endInterruptionError];
if (endInterruptionError != nil) {
NSLog (#"Unable to reactivate the audio session.");
return;
}
if (flags & AVAudioSessionInterruptionFlags_ShouldResume) {
/*
In a shipping application, check here to see if the hardware sample rate changed from
its previous value by comparing it to graphSampleRate. If it did change, reconfigure
the ioInputStreamFormat struct to use the new sample rate, and set the new stream
format on the two audio units. (On the mixer, you just need to change the sample rate).
Then call AUGraphUpdate on the graph before starting it.
*/
[self restartAudioProcessingGraph];
}
}
#pragma mark - Application state management
// The audio processing graph should not run when the screen is locked or when the app has
// transitioned to the background, because there can be no user interaction in those states.
// (Leaving the graph running with the screen locked wastes a significant amount of energy.)
//
// Responding to these UIApplication notifications allows this class to stop and restart the
// graph as appropriate.
- (void) registerForUIApplicationNotifications {
NSNotificationCenter *notificationCenter = [NSNotificationCenter defaultCenter];
[notificationCenter addObserver: self
selector: #selector (handleResigningActive:)
name: UIApplicationWillResignActiveNotification
object: [UIApplication sharedApplication]];
[notificationCenter addObserver: self
selector: #selector (handleBecomingActive:)
name: UIApplicationDidBecomeActiveNotification
object: [UIApplication sharedApplication]];
}
- (void) handleResigningActive: (id) notification {
[self stopPlayLowNote: self];
[self stopPlayMidNote: self];
[self stopPlayHighNote: self];
[self stopAudioProcessingGraph];
}
- (void) handleBecomingActive: (id) notification {
[self restartAudioProcessingGraph];
}
- (id) initWithNibName: (NSString *) nibNameOrNil bundle: (NSBundle *) nibBundleOrNil {
self = [super initWithNibName: nibNameOrNil bundle: nibBundleOrNil];
// If object initialization fails, return immediately.
if (!self) {
return nil;
}
// Set up the audio session for this app, in the process obtaining the
// hardware sample rate for use in the audio processing graph.
BOOL audioSessionActivated = [self setupAudioSession];
NSAssert (audioSessionActivated == YES, #"Unable to set up audio session.");
// Create the audio processing graph; place references to the graph and to the Sampler unit
// into the processingGraph and samplerUnit instance variables.
[self createAUGraph];
[self configureAndStartAudioProcessingGraph: self.processingGraph];
return self;
}
- (void) viewDidLoad {
[super viewDidLoad];
// Load the Trombone preset so the app is ready to play upon launch.
[self loadPresetOne: self];
[self registerForUIApplicationNotifications];
}
- (void) viewDidUnload {
self.currentPresetLabel = nil;
self.presetOneButton = nil;
self.presetTwoButton = nil;
self.lowNoteButton = nil;
self.midNoteButton = nil;
self.highNoteButton = nil;
[super viewDidUnload];
}
- (BOOL) shouldAutorotateToInterfaceOrientation:(UIInterfaceOrientation)interfaceOrientation {
// Return YES for supported orientations
return (interfaceOrientation == UIInterfaceOrientationPortrait);
}
- (void) didReceiveMemoryWarning {
// Releases the view if it doesn't have a superview.
[super didReceiveMemoryWarning];
// Release any cached data, images, etc that aren't in use.
}
#end
An NSTimer, in any thread, is unsuitable for precise musical timing, or anything where the app needs a timing accuracy better than within 50 milliseconds.
It's likely that your high priority timer thread is interfering with the core audio thread on these older devices. Try moving to a lower priority and reducing the time interval of your timer to test this theory. This would be a bug in Apple's code – the audio thread is the highest priority thread on iOS, and the os is supposed to insure that no user code interrupts it long enough to cause stutters, but it does happen. That said, you probably shouldn't be using a timer like this to trigger audio for most kinds of musical applications, but should rather use the timestamps core audio provides in the render callback (see RemoteIO) to handle timing. Here's a pretty good discussion:
http://atastypixel.com/blog/experiments-with-precise-timing-in-ios/
"Also note that there are often ways to eliminate the need for precise timing of this nature, by architecting code appropriately — when it comes to audio, for example, CoreAudio provides a very accurate time base in render callbacks. For things like metronomes or audio synthesizers, it’s always better to establish a starting time, and use the difference between the current time and the starting time in order to determine state, rather than using a timer to advance the state."
Related
Why does this audio session fail to recognise an interruption?
My app synthesises audio from a lookup table. It plays audio successfully but crashes the moment I try to stop playing. Audio playback only needs to exit without restarting so the requirements for handling the interruption are basic. I reread Apple’s Audio Session Programming Guide including the section Responding to Interruptions. However the method handleAudioSessionInterruption does not seem to register an interrupt so I’m obviously missing something. EDIT See my answer. When I began work on this I knew next to nothing about NSNotificationCenter so I welcome any suggestion for improvement. Two methods set up the audio session to play in the foreground. - (void)setUpAudio { if (_playQueue == NULL) { if ([self setUpAudioSession] == TRUE) { [self setUpPlayQueue]; [self setUpPlayQueueBuffers]; } } } - (BOOL)setUpAudioSession { BOOL success = NO; NSError *audioSessionError = nil; AVAudioSession *session = [AVAudioSession sharedInstance]; // Set up notifications [[NSNotificationCenter defaultCenter] addObserver:self selector:#selector(handleAudioSessionInterruption:) name:AVAudioSessionInterruptionNotification object:session]; // Set category success = [session setCategory:AVAudioSessionCategoryPlayback error:&audioSessionError]; if (!success) { NSLog(#"%# Error setting category: %#", NSStringFromSelector(_cmd), [audioSessionError localizedDescription]); // Exit early return success; } // Set mode success = [session setMode:AVAudioSessionModeDefault error:&audioSessionError]; if (!success) { NSLog(#"%# Error setting mode: %#", NSStringFromSelector(_cmd), [audioSessionError localizedDescription]); // Exit early return success; } // Set some preferred values NSTimeInterval bufferDuration = .005; // I would prefer a 5ms buffer duration success = [session setPreferredIOBufferDuration:bufferDuration error:&audioSessionError]; if (audioSessionError) { NSLog(#"Error %ld, %# %i", (long)audioSessionError.code, audioSessionError.localizedDescription, success); } double sampleRate = _audioFormat.mSampleRate; // I would prefer a sample rate of 44.1kHz success = [session setPreferredSampleRate:sampleRate error:&audioSessionError]; if (audioSessionError) { NSLog(#"Error %ld, %# %i", (long)audioSessionError.code, audioSessionError.localizedDescription, success); } success = [session setActive:YES error:&audioSessionError]; if (!success) { NSLog(#"%# Error activating %#", NSStringFromSelector(_cmd), [audioSessionError localizedDescription]); } // Get current values sampleRate = session.sampleRate; bufferDuration = session.IOBufferDuration; NSLog(#"Sample Rate:%0.0fHz I/O Buffer Duration:%f", sampleRate, bufferDuration); return success; } And here is the method that handles the interruption when I press the stop button. However it does not respond. EDIT The correct method needs block, not selector. See my answer. - (void)handleAudioSessionInterruption:(NSNotification*)notification { if (_playQueue) { NSNumber *interruptionType = [[notification userInfo] objectForKey:AVAudioSessionInterruptionTypeKey]; NSNumber *interruptionOption = [[notification userInfo] objectForKey:AVAudioSessionInterruptionOptionKey]; NSLog(#"in-app Audio playback will be stopped by %# %lu", notification.name, (unsigned long)interruptionType.unsignedIntegerValue); switch (interruptionType.unsignedIntegerValue) { case AVAudioSessionInterruptionTypeBegan: { if (interruptionOption.unsignedIntegerValue == AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation) { NSLog(#"notify other apps that audio is now available"); } } break; default: break; } } }
Answer My method to handle AudioSessionInterruption did not subscribe the observer correctly with NSNotificationCentre. This has been fixed by adding observer using block, not selector. The solution replaces deprecated AVAudioSession delegate methods in AudioBufferPlayer, an extremely fit for purpose audio player initially developed for direct audio synthesis by Matthias Hollejmans. Several deprecated functions including InterruptionListenerCallback were later upgraded by Mario Diana. The solution (below) uses NSNotification allowing users to exit AVAudioSession gracefully by pressing a button. Here is the relevant code. PlayViewController.m UIButton action performs an orderly shutdown of synth, invalidates the timer and posts the notification that will exit AVAudioSession - (void)fromEscButton:(UIButton*)button { [self stopConcertClock]; ... // code for Exit PlayViewController not shown } - (void)stopConcertClock { [_synthLock lock]; [_synth stopAllNotes]; [_synthLock unlock]; [timer invalidate]; timer = nil; [self postAVAudioSessionInterruptionNotification]; NSLog(#"Esc button pressed or sequence ended. Exit PlayViewController "); } - (void) postAVAudioSessionInterruptionNotification { [[NSNotificationCenter defaultCenter] postNotificationName:#"AVAudioSessionInterruptionNotification" object:self]; } Initialising the AVAudioSession includes subscribing for a single interruption notification before starting startAudioPlayer in AudioBufferPlayer - (id)init { if (self = [super init]) { NSLog(#"PlayViewController starts MotionListener and AudioSession"); [self startAudioSession]; } return self; } - (void)startAudioSession { // Synth and the AudioBufferPlayer must use the same sample rate. _synthLock = [[NSLock alloc] init]; float sampleRate = 44100.0f; // Initialise synth to fill the audio buffer with audio samples. _synth = [[Synth alloc] initWithSampleRate:sampleRate]; // Initialise the audio buffer. _player = [[AudioBufferPlayer alloc] initWithSampleRate:sampleRate channels:1 bitsPerChannel:16 packetsPerBuffer:1024]; _player.gain = 0.9f; __block __weak PlayViewController *weakSelf = self; _player.block = ^(AudioQueueBufferRef buffer, AudioStreamBasicDescription audioFormat) { PlayViewController *blockSelf = weakSelf; if (blockSelf != nil) { // Lock access to the synth. This callback runs on an internal Audio Queue thread and we don't // want another thread to change the Synth's state while we're still filling up the audio buffer. [blockSelf -> _synthLock lock]; // Calculate how many packets fit into this buffer. Remember that a packet equals one frame // because we are dealing with uncompressed audio; a frame is a set of left+right samples // for stereo sound, or a single sample for mono sound. Each sample consists of one or more // bytes. So for 16-bit mono sound, each packet is 2 bytes. For stereo it would be 4 bytes. int packetsPerBuffer = buffer -> mAudioDataBytesCapacity / audioFormat.mBytesPerPacket; // Let the Synth write into the buffer. The Synth just knows how to fill up buffers // in a particular format and does not care where they come from. int packetsWritten = [blockSelf -> _synth fillBuffer:buffer->mAudioData frames:packetsPerBuffer]; // We have to tell the buffer how many bytes we wrote into it. buffer -> mAudioDataByteSize = packetsWritten * audioFormat.mBytesPerPacket; [blockSelf -> _synthLock unlock]; } }; // Set up notifications [self subscribeForBlockNotification]; [_player startAudioPlayer]; } - (void)subscribeForBlockNotification { NSNotificationCenter * __weak center = [NSNotificationCenter defaultCenter]; id __block token = [center addObserverForName:#"AVAudioSessionInterruptionNotification" object:nil queue:[NSOperationQueue mainQueue] usingBlock:^(NSNotification *note) { NSLog(#"Received the notification!"); [_player stopAudioPlayer]; [center removeObserver:token]; }]; } PlayViewController.h These are relevant interface settings #interface PlayViewController : UIViewController <EscButtonDelegate> { ... // Initialisation of audio player and synth AudioBufferPlayer* player; Synth* synth; NSLock* synthLock; } ... - (AudioBufferPlayer*)player; - (Synth*)synth; #end AudioBufferPlayer.m - (void)stopAudioPlayer { [self stopPlayQueue]; [self tearDownPlayQueue]; [self tearDownAudioSession]; } - (void)stopPlayQueue { if (_audioPlaybackQueue != NULL) { AudioQueuePause(_audioPlaybackQueue); AudioQueueReset(_audioPlaybackQueue); _playing = NO; } } - (void)tearDownPlayQueue { AudioQueueDispose(_audioPlaybackQueue, NO); _audioPlaybackQueue = NULL; } - (BOOL)tearDownAudioSession { NSError *deactivationError = nil; BOOL success = [[AVAudioSession sharedInstance] setActive:NO withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation error:nil]; if (!success) { NSLog(#"%s AVAudioSession Error: %#", __FUNCTION__, deactivationError); } return success; }
iOS 9 How to Detect Silent Mode?
As AudioSessionInitialize and AudioSessionGetProperty are deprecated, I am getting the wrong return values: CFStringRef state = nil; UInt32 propertySize = sizeof(CFStringRef); AudioSessionInitialize(NULL, NULL, NULL, NULL); OSStatus status = AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &state); [[AVAudioSession sharedInstance] setActive:YES error:nil]; if (status == kAudioSessionNoError) { return CFStringGetLength(state) == 0; // YES = silent } return NO; From this code (I found it here), I get the same incorrect result no matter what state is actually device on. How can I detect if the silent mode is ON on device right now?
The API is no longer available. But the work around is simple: Play a short audio and detect time that it finishes playing If the time that it finishes playing is shorter than the actual length of the audio, than the device is muted Hoishing posted a helper class MuteChecker on his blog. Use it as the following: self.muteChecker = [[MuteChecker alloc] initWithCompletionBlk:^(NSTimeInterval lapse, BOOL muted) { NSLog(#"muted: %d", muted); }]; [self.muteChecker check]; This is the complete code for the class, you can simple copy past to your project: MuteChecker.h #import <Foundation/Foundation.h> #import <AudioToolbox/AudioToolbox.h> typedef void (^MuteCheckCompletionHandler)(NSTimeInterval lapse, BOOL muted); // this class must use with a MuteChecker.caf (a 0.2 sec mute sound) in Bundle #interface MuteChecker : NSObject -(instancetype)initWithCompletionBlk:(MuteCheckCompletionHandler)completionBlk; -(void)check; #end MuteChecker.cpp #import "MuteChecker.h" void MuteCheckCompletionProc(SystemSoundID ssID, void* clientData); #interface MuteChecker () #property (nonatomic,assign) SystemSoundID soundId; #property (strong) MuteCheckCompletionHandler completionBlk; #property (nonatomic, strong)NSDate *startTime; -(void)completed; #end void MuteCheckCompletionProc(SystemSoundID ssID, void* clientData){ MuteChecker *obj = (__bridge MuteChecker *)clientData; [obj completed]; } #implementation MuteChecker -(void)playMuteSound { self.startTime = [NSDate date]; AudioServicesPlaySystemSound(self.soundId); } -(void)completed { NSDate *now = [NSDate date]; NSTimeInterval t = [now timeIntervalSinceDate:self.startTime]; BOOL muted = (t > 0.1)? NO : YES; self.completionBlk(t, muted); } -(void)check { if (self.startTime == nil) { [self playMuteSound]; } else { NSDate *now = [NSDate date]; NSTimeInterval lastCheck = [now timeIntervalSinceDate:self.startTime]; if (lastCheck > 1) { //prevent checking interval shorter then the sound length [self playMuteSound]; } } } - (instancetype)initWithCompletionBlk:(MuteCheckCompletionHandler)completionBlk { self = [self init]; if (self) { NSURL* url = [[NSBundle mainBundle] URLForResource:#"MuteChecker" withExtension:#"caf"]; if (AudioServicesCreateSystemSoundID((__bridge CFURLRef)url, &_soundId) == kAudioServicesNoError){ AudioServicesAddSystemSoundCompletion(self.soundId, CFRunLoopGetMain(), kCFRunLoopDefaultMode, MuteCheckCompletionProc,(__bridge void *)(self)); UInt32 yes = 1; AudioServicesSetProperty(kAudioServicesPropertyIsUISound, sizeof(_soundId),&_soundId,sizeof(yes), &yes); self.completionBlk = completionBlk; } else { NSLog(#"error setting up Sound ID"); } } return self; } - (void)dealloc { if (self.soundId != -1){ AudioServicesRemoveSystemSoundCompletion(self.soundId); AudioServicesDisposeSystemSoundID(self.soundId); } } #end Important note: you will also have to provide a short audio MuteChecker.caf for the code to work. You could download one from his blog directly or generate one yourself.
Why can't I record from RemoteIOUnit after changing AudioSession category from SoloAmbient to PlayAndRecord?
My app has both audio play and record features, and I want to only set the audio session's category to PlayAndRecord when the user initiates recording, so the standard audio playback will be muted by the mute switch, etc. I'm having a problem though, where my call to AudioUnitRender to record audio input is failing with errParam (-50) after I change the audio session category to PlayAndRecord. If I start my app using the PlayAndRecord category, then recording works correctly. #implementation MyAudioSession - (instancetype)init { NSError *error = nil; AVAudioSession *session = [AVAudioSession sharedInstance]; [session setCategory:AVAudioSessionCategorySoloAmbient]; [session setActive:YES error:&error]; } - (void)enableRecording { void (^setCategory)(void) = ^{ NSError *error; AVAudioSession *session = [AVAudioSession sharedInstance]; [session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error]; }; // Do I need to set the category from the main thread? if ([NSThread isMainThread]) { setCategory(); } else { dispatch_sync(dispatch_get_main_queue(), ^{ setCategory(); }); } } #end #interface MyRecorder { AudioUnit ioUnit_; AudioBufferList *tmpRecordListPtr_ #end #implementation MyRecorder - (instancetype)init { // Sets up AUGraph with just a RemoteIOUnit node, recording enabled, callback, etc. // Set up audio buffers tmpRecordListPtr_ = malloc(sizeof(AudioBufferList) + 64 * sizeof(AudioBuffer)); } - (OSStatus)doRecordCallback:(AudioUnitRenderActionFlags *)ioActionFlags timeStamp:(AudioTimeStamp *)inTimeStamp busNumber:(UInt32)busNumber numFrames:(UInt32)numFrames bufferOut:(AudioBufferList *)ioData { // Set up buffers... All this works fine if I initialize the audio session to // PlayAndRecord in -[MyAudioSession init] OSStatus status = AudioUnitRender(ioUnit_, ioActionFlags, inTimeStamp, busNumber, numFrames, tmpRecordListPtr_); // This fails with errParam, but only if I start my app in SoloAmbient and then // later change it to PlayAndRecord } #end OSStatus MyRecorderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, AudioTimeStamp *inTimestamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) { MyRecorder *recorder = (MyRecorder *)inRefCon; return [recorder doRecordCallback:ioActionFlags timeStamp:inTimestamp busNumber:inBusNumber numFrames:inNumberFrames bufferOut:ioData]; } I'm testing on an iPod touch (5th gen) running iOS 7.1.2. Has anybody else encountered this issue? Any suggestions for fixes or more info I can post? EDIT: Object lifecycle is similar to: - (void)startRecording { [mySession enableRecording]; [myRecorder release]; myRecorder = [[MyRecorder alloc] init]; [myRecorder start]; // starts the AUGraph }
Without looking at your code it is difficult to comment. But I am doing something similar in my app, and I found that it is important to pay careful attention to what audio session settings can be changed only when the audio session is inactive. // Get the app's audioSession singleton object AVAudioSession* session = [AVAudioSession sharedInstance]; //error handling NSError* audioSessionError = nil; SDR_DEBUGPRINT(("Setting session not active!\n")); [session setActive:NO error:&audioSessionError]; // shut down the audio session if it is active It is important to setActive to "NO" prior to changing the session category, for instance. Failure to do so might allow render callbacks to occur while the session is being configured. Looking at the lifecycle flow, I'm trying to see where you stop the AUGraph prior to setting up the audio session for recording. The code I use for stopping the AUGraph follows. I call it prior to any attempts to reconfigure the audio session. - (void)stopAUGraph { if(mAudioGraph != nil) { Boolean isRunning = FALSE; OSStatus result = AUGraphIsRunning(mAudioGraph, &isRunning); if(result) { DEBUGPRINT(("AUGraphIsRunning result %d %08X %4.4s\n", (int)result, (int)result, (char*)&result)); return; } if(isRunning) { result = AUGraphStop(mAudioGraph); if(result) { DEBUGPRINT(("AUGraphStop result %d %08X %4.4s\n", (int)result, (int)result, (char*)&result)); return; } else { DEBUGPRINT(("mAudioGraph has been stopped!\n")); } } else { DEBUGPRINT(("mAudioGraph is already stopped!\n")); } }
You need to make sure the RemoteIO Audio Unit (the audio graph) is stopped before deactivating and/or changing the audio session type. Then (re)initialize the RemoteIO Audio Unit after setting the new session type and before (re)starting the graph, as the new session type or options may change some of the allowable settings. Also, it helps to check all the prior audio unit and audio session call error codes before any graph (re)start.
Stuck in loop form CFRunLoop
ViewController.h #interface ViewController : UIViewController{ CFSocketRef s; int connectFlag; } ViewController.m void receiveDataCilent(CFSocketRef cs, CFSocketCallBackType type, CFDataRef address, const void *data, void *info) { CFDataRef df = (CFDataRef) data; int len = CFDataGetLength(df); if(len <= 0) return; CFRange range = CFRangeMake(0,len); UInt8 buffer[len]; NSLog(#"Received %d bytes from socket %d\n", len, CFSocketGetNative(cs)); CFDataGetBytes(df, range, buffer); NSLog(#"Client received: %s\n", buffer); NSLog(#"As UInt8 coding: %#", df); NSLog(#"len value: %d", len); } -(void) clientConnect:(int)sender;{ s = CFSocketCreate(NULL, PF_INET, SOCK_STREAM, IPPROTO_TCP, kCFSocketDataCallBack, receiveDataCilent, NULL); struct sockaddr_in sin; struct hostent *host; memset(&sin, 0, sizeof(sin)); host = gethostbyname("localhost"); memcpy(&(sin.sin_addr), host->h_addr,host->h_length); sin.sin_family = AF_INET; sin.sin_port = htons(6666); CFDataRef address; CFRunLoopSourceRef source; address = CFDataCreate(NULL, (UInt8 *)&sin, sizeof(sin)); CFSocketConnectToAddress(s, address, 0); CFRelease(address); source = CFSocketCreateRunLoopSource(NULL, s, 0); CFRunLoopAddSource(CFRunLoopGetCurrent(), source, kCFRunLoopDefaultMode); CFRelease(source); CFRunLoopRun(); } - (void)viewDidLoad { [super viewDidLoad]; } - (void)viewDidAppear:(BOOL)animated { [super viewDidAppear:animated]; if(connectFlag == 0 ){ [self clientConnect:1]; } } First I'm sorry for messing the concept of xcode programming and threading programming I'm very new to xcode and I need to finish my game project soon This code is suppose to connect to server and waiting for message from server. I try and test it and it work fine. In view I have IBAction that can press and change view. But after I implement it to my main project. It can run but I cannot press any button or do anything. After log it out. It seem it stuck around CFRunLoopRun(); I think it stuck in loop but why when I test it. it not stuck ? In my project this code is run from another view not from ViewController like I just test. I don't know why it stuck in my project. Any help would be appreciated.
Don't run the run loop yourself on the main thread. The main event loop will run it. Edit: I should say, "within the context of a GUI app". In a command-line tool, you do need to run the run loop yourself.
How to programmatically sense the iPhone mute switch?
I can't seem to find in the SDK how to programatically sense the mute button/switch on the iPhone. When my app plays background music, it responds properly to the volume button without me having any code to follow that but, when I use the mute switch, it just keeps playing away. How do I test the position of mute? (NOTE: My program has its own mute switch, but I'd like the physical switch to override that.)
Thanks, JPM. Indeed, the link you provide leads to the correct answer (eventually. ;) For completeness (because S.O. should be a source of QUICK answers! )... // "Ambient" makes it respect the mute switch // Must call this once to init session if (!gAudioSessionInited) { AudioSessionInterruptionListener inInterruptionListener = NULL; OSStatus error; if ((error = AudioSessionInitialize (NULL, NULL, inInterruptionListener, NULL))) { NSLog(#"*** Error *** error in AudioSessionInitialize: %d.", error); } else { gAudioSessionInited = YES; } } SInt32 ambient = kAudioSessionCategory_AmbientSound; if (AudioSessionSetProperty (kAudioSessionProperty_AudioCategory, sizeof (ambient), &ambient)) { NSLog(#"*** Error *** could not set Session property to ambient."); }
I answered a similar question here (link). The relevant code: -(BOOL)silenced { #if TARGET_IPHONE_SIMULATOR // return NO in simulator. Code causes crashes for some reason. return NO; #endif CFStringRef state; UInt32 propertySize = sizeof(CFStringRef); AudioSessionInitialize(NULL, NULL, NULL, NULL); AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &state); if(CFStringGetLength(state) > 0) return NO; else return YES; }
Some of the code in other answers (including the accepted answer) may not work if you aren't in the ambient mode, where the mute switch is respected. I wrote the routine below to switch to ambient, read the switch, and then return to the settings I need in my app. -(BOOL)muteSwitchEnabled { #if TARGET_IPHONE_SIMULATOR // set to NO in simulator. Code causes crashes for some reason. return NO; #endif // go back to Ambient to detect the switch AVAudioSession* sharedSession = [AVAudioSession sharedInstance]; [sharedSession setCategory:AVAudioSessionCategoryAmbient error:nil]; CFStringRef state; UInt32 propertySize = sizeof(CFStringRef); AudioSessionInitialize(NULL, NULL, NULL, NULL); AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &state); BOOL muteSwitch = (CFStringGetLength(state) <= 0); NSLog(#"Mute switch: %d",muteSwitch); // code below here is just restoring my own audio state, YMMV _hasMicrophone = [sharedSession inputIsAvailable]; NSError* setCategoryError = nil; if (_hasMicrophone) { [sharedSession setCategory: AVAudioSessionCategoryPlayAndRecord error: &setCategoryError]; // By default PlayAndRecord plays out over the internal speaker. We want the external speakers, thanks. UInt32 ASRoute = kAudioSessionOverrideAudioRoute_Speaker; AudioSessionSetProperty (kAudioSessionProperty_OverrideAudioRoute, sizeof (ASRoute), &ASRoute ); } else // Devices with no mike don't support PlayAndRecord - we don't get playback, so use just playback as we don't have a microphone anyway [sharedSession setCategory: AVAudioSessionCategoryPlayback error: &setCategoryError]; if (setCategoryError) NSLog(#"Error setting audio category! %#", setCategoryError); return muteSwitch; }
To find out the state of the mute switch and the volume control I wrote these two functions. These are ideal if you wish to warn the user before they try creating audio output. -(NSString*)audioRoute { CFStringRef state; UInt32 propertySize = sizeof(CFStringRef); OSStatus n = AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &state); if( n ) { // TODO: Throw an exception NSLog( #"AudioSessionGetProperty: %#", osString( n ) ); } NSString *result = (NSString*)state; [result autorelease]; return result; } -(Float32)audioVolume { Float32 state; UInt32 propertySize = sizeof(CFStringRef); OSStatus n = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareOutputVolume, &propertySize, &state); if( n ) { // TODO: Throw an exception NSLog( #"AudioSessionGetProperty: %#", osString( n ) ); } return state; }
-(BOOL)isDeviceMuted { CFStringRef state; UInt32 propertySize = sizeof(CFStringRef); AudioSessionInitialize(NULL, NULL, NULL, NULL); AudioSessionGetProperty(kAudioSessionProperty_AudioRoute, &propertySize, &state); return (CFStringGetLength(state) > 0 ? NO : YES); }
I followed the general theory here and got this to work http://inforceapps.wordpress.com/2009/07/08/detect-mute-switch-state-on-iphone/ Here is a recap: Play a short silent sound. Time how long it takes to play. If the mute switch is on, the playing of the sound will come back much shorter than the sound itself. I used a 500ms sound and if the sound played in less than this time, then the mute switch was on. I use Audio Services to play the silent sound (which always honors the mute switch). This article says that you can use AVAudioPlayer to play this sound. If you use AVAudioPlayer, I assume you'll need to setup your AVAudioSession's category to honor the mute switch, but I have not tried it`.
Using Ambient mode for playing a video and PlayAndRecord mode for recording a video on camera screen, resolves the issue in our case. The code in application:didFinishLaunchingWithOptions: NSError *error = nil; [[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryAmbient error:&error]; [[AVAudioSession sharedInstance] setMode:AVAudioSessionModeVideoRecording error:&error]; [[AVAudioSession sharedInstance] setActive:YES error:&error]; The code in viewWillAppear on cameraController, if you have to use camera or recording in your app [[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryPlayAndRecord error:nil]; The code in viewWillDisappear on cameraController [[AVAudioSession sharedInstance] setCategory:AVAudioSessionCategoryAmbient error:nil]; Using these lines our Application records and plays a video and mute switch works perfectly under both iOS8 and iOS7!!!
For Swift Below framework works perfectly in device https://github.com/akramhussein/Mute Just install using pod or download from Git pod 'Mute' and use like below code import UIKit import Mute class ViewController: UIViewController { #IBOutlet weak var label: UILabel! { didSet { self.label.text = "" } } override func viewDidLoad() { super.viewDidLoad() // Notify every 2 seconds Mute.shared.checkInterval = 2.0 // Always notify on interval Mute.shared.alwaysNotify = true // Update label when notification received Mute.shared.notify = { m in self.label.text = m ? "Muted" : "Not Muted" } // Stop after 5 seconds DispatchQueue.main.asyncAfter(deadline: .now() + 5.0) { Mute.shared.isPaused = true } // Re-start after 10 seconds DispatchQueue.main.asyncAfter(deadline: .now() + 10.0) { Mute.shared.isPaused = false } } }