I'm trying to set my AVAudioSession to inactive to get back to normal state.
My utterance function:
class SSpeech : NSObject, AVSpeechSynthesizerDelegate {
var group = DispatchGroup();
var queue = DispatchQueue(label: "co.xxxx.speech", attributes: [])
class var sharedInstance: SSpeech {
struct Static {
static var instance: SSpeech?
}
if !(Static.instance != nil) {
Static.instance = SSpeech()
}
return Static.instance!
}
required override init() {
super.init();
self.speechsynt.delegate = self;
}
deinit {
print("deinit SSpeech")
}
let audioSession = AVAudioSession.sharedInstance();
var speechsynt: AVSpeechSynthesizer = AVSpeechSynthesizer()
var queueTalks = SQueue<String>();
func pause() {
speechsynt.pauseSpeaking(at: .word)
}
func talk(_ sentence: String, languageCode code:String = SUtils.selectedLanguage.code, withEndPausing: Bool = false) {
if SUser.sharedInstance.currentUser.value!.speechOn != 1 {
return
}
queue.async{
self.queueTalks.enQueue(sentence)
do {
let category = AVAudioSessionCategoryPlayback;
var categoryOptions = AVAudioSessionCategoryOptions.duckOthers
if #available(iOS 9.0, *) {
categoryOptions.formUnion(AVAudioSessionCategoryOptions.interruptSpokenAudioAndMixWithOthers)
}
try self.audioSession.setCategory(category, with: categoryOptions)
try self.audioSession.setActive(true);
} catch _ {
return;
}
self.utteranceTalk(sentence, initSentence: false, speechsynt: self.speechsynt, languageCode:code, withEndPausing: withEndPausing)
do {
try self.audioSession.setCategory(AVAudioSessionCategoryPlayback, with: AVAudioSessionCategoryOptions.mixWithOthers)
} catch _ {
return;
}
}
}
func utteranceTalk(_ sentence: String, initSentence: Bool, speechsynt: AVSpeechSynthesizer, languageCode:String = "en-US", withEndPausing: Bool = false){
if SUser.sharedInstance.currentUser.value!.speechOn != 1 {
return
}
let nextSpeech:AVSpeechUtterance = AVSpeechUtterance(string: sentence)
nextSpeech.voice = AVSpeechSynthesisVoice(language: languageCode)
if !initSentence {
nextSpeech.rate = 0.4;
}
if(withEndPausing){
nextSpeech.postUtteranceDelay = 0.2;
}
speechsynt.speak(nextSpeech)
}
func speechSynthesizer(_ synthesizer: AVSpeechSynthesizer, didFinish utterance:AVSpeechUtterance) {
print("Speaker has finished to talk")
queue.async {
do {
try self.audioSession.setActive(false, with: AVAudioSessionSetActiveOptions.notifyOthersOnDeactivation)
}
catch {}
}
}
}
}
My method is correctly called, but my audioSession still active when the utterance is finished. i've tried lot of thing but nothing work :(.
I would suggest using an AvAudioPlayer. They have very easy start and stop commands.
first declare the audio player as a variable
var SoundEffect: AVAudioPlayer!
then select the file you need
let path = Bundle.main.path(forResource: "Untitled2.wav", ofType:nil)!
let url = URL(fileURLWithPath: path)
let sound = try AVAudioPlayer(contentsOf: url)
SoundEffect = sound
sound.numberOfLoops = -1
sound.play()
and to stop the audio player
if SoundEffect != nil {
SoundEffect.stop()
SoundEffect = nil
}
You cannot stop or deactive AudioSession, your app gets it upon launching. Documentation:
An audio session is the intermediary between your app and iOS used to configure your app’s audio behavior. Upon launch, your app automatically gets a singleton audio session.
So method -setActive: does not make your AudioSession "active", it just puts its category and mode configuration into action. For getting back to the "normal state", you could set default settings or just call setActive(false, with:.notifyOthersOnDeactivation), that will be enough.
A part from documentation of AVAudioSession:
Discussion
If another active audio session has higher priority than yours (for
example, a phone call), and neither audio session allows mixing,
attempting to activate your audio session fails. Deactivating your
session will fail if any associated audio objects (such as queues,
converters, players, or recorders) are currently running.
My guess is that the failure to deactivate the session is the running process(es) of your queue as I highlighted in the document quote.
Probably you should make the deactivation process synchronous instead of asynchronous OR make sure that all the running actions under your queue has been processed.
Give this a try:
func speechSynthesizer(_ synthesizer: AVSpeechSynthesizer, didFinish utterance:AVSpeechUtterance) {
print("Speaker has finished to talk")
queue.sync { // <---- `async` changed to `sync`
do {
try self.audioSession.setActive(false, with: AVAudioSessionSetActiveOptions.notifyOthersOnDeactivation)
}
catch {}
}
}
}
Related
The environment for this is iOS 13.6 and Swift 5. I have a very simple app that successfully plays an MP3 file in the foreground or background. I added MPRemoteCommandCenter play and pause command handlers to it. I play the sound file in the foreground and then pause it.
When I tap the play button from the lock screen, my code calls audioPlayer.play(), which returns true. I hear the sound start playing again, but the currentTime of the player does not advance. After that, the play and pause buttons on the lock screen do nothing. When I foreground the app again, the play button plays from where it was before I went to the lock screen.
Here is my AudioPlayer class:
import AVFoundation
import MediaPlayer
class AudioPlayer: RemoteAudioCommandDelegate {
var audioPlayer = AVAudioPlayer()
let remoteCommandHandler = RemoteAudioCommandHandler()
var timer:Timer!
func play(title: String) {
let path = Bundle.main.path(forResource: title, ofType: "mp3")!
let url = URL(fileURLWithPath: path)
do {
try AVAudioSession.sharedInstance().setCategory(AVAudioSession.Category.playback)
try AVAudioSession.sharedInstance().setActive(true)
audioPlayer = try AVAudioPlayer(contentsOf: url)
remoteCommandHandler.delegate = self
remoteCommandHandler.enableDisableRemoteCommands(true)
timer = Timer.scheduledTimer(timeInterval: 1.0, target: self, selector: #selector(updateNowPlayingInfo), userInfo: nil, repeats: true)
} catch let error as NSError {
print("error = \(error)")
}
}
func play() {
print ("audioPlayer.play() returned \(audioPlayer.play())")
}
func pause() {
audioPlayer.pause()
}
func stop() {
audioPlayer.stop()
}
func currentTime() -> TimeInterval {
return audioPlayer.currentTime
}
func setCurrentTime(_ time:TimeInterval) {
audioPlayer.currentTime = time
}
#objc func updateNowPlayingInfo() {
// Hard-code the nowPlayingInfo since this is a simple test app
var nowPlayingDict =
[MPMediaItemPropertyTitle: "Tin Man",
MPMediaItemPropertyAlbumTitle: "The Complete Greatest Hits",
MPMediaItemPropertyAlbumTrackNumber: NSNumber(value: UInt(10) as UInt),
MPMediaItemPropertyArtist: "America",
MPMediaItemPropertyPlaybackDuration: 208,
MPNowPlayingInfoPropertyPlaybackRate: NSNumber(value: 1.0 as Float)] as [String : Any]
nowPlayingDict[MPNowPlayingInfoPropertyElapsedPlaybackTime] = NSNumber(value: audioPlayer.currentTime as Double)
MPNowPlayingInfoCenter.default().nowPlayingInfo = nowPlayingDict
}
}
Here is my RemoteCommandHandler class:
import Foundation
import MediaPlayer
protocol RemoteAudioCommandDelegate: class {
func play()
func pause()
}
class RemoteAudioCommandHandler: NSObject {
weak var delegate: RemoteAudioCommandDelegate?
var remoteCommandCenter = MPRemoteCommandCenter.shared()
var playTarget: Any? = nil
var pauseTarget: Any? = nil
func enableDisableRemoteCommands(_ enabled: Bool) {
print("Called with enabled = \(enabled)")
remoteCommandCenter.playCommand.isEnabled = enabled
remoteCommandCenter.pauseCommand.isEnabled = enabled
if enabled {
addRemoteCommandHandlers()
} else {
removeRemoteCommandHandlers()
}
}
fileprivate func addRemoteCommandHandlers() {
print( "Entered")
if playTarget == nil {
print( "Adding playTarget")
playTarget = remoteCommandCenter.playCommand.addTarget { (event) -> MPRemoteCommandHandlerStatus in
print("addRemoteCommandHandlers calling delegate play")
self.delegate?.play()
return .success
}
}
if pauseTarget == nil {
print( "Adding pauseTarget")
pauseTarget = remoteCommandCenter.pauseCommand.addTarget { (event) -> MPRemoteCommandHandlerStatus in
print("addRemoteCommandHandlers calling delegate pause")
self.delegate?.pause()
return .success
}
}
}
fileprivate func removeRemoteCommandHandlers() {
print( "Entered")
if playTarget != nil {
print( "Removing playTarget")
remoteCommandCenter.playCommand.removeTarget(playTarget)
playTarget = nil
}
if pauseTarget != nil {
print( "Removing pauseTarget")
remoteCommandCenter.pauseCommand.removeTarget(pauseTarget)
pauseTarget = nil
}
}
}
I will gladly supply further required info, because I'm baffled at why this relatively straightforward code (in my mind) code doesn't work.
Assistance is much appreciated!
After some more debugging, I found that the AVAudioPlayer started to play the sound from the lock screen, but stopped again shortly after.
I mitigated the problem by adding a Timer. The timer checks if the last command by the user was play, but the sound is not playing. I also change the status when the user selects pause or the song stops playing at its natural end.
I am still at a loss for an actual fix for this problem.
I am working on a camera that is wrapped up in a base viewController, with a delegate style interface for callbacks, so all I have to do as a client is subclass the camera view controller, implement the delegate methods, and add the UI buttons.
My question is about recording video. Video recording is started on a unique background task to ensure that the recording can be written to a temporary file. This is done on my
private let sessionQueue = DispatchQueue(label: "com.andrewferrarone.sessionQueue") session queue:
public func startRecording()
{
guard let movieFileOutput = self.movieFileOutput else { return }
//get video preview layer's video orientation on main queue
guard let videoPreviewLayerOrientation = self.previewView.videoPreviewLayer?.connection.videoOrientation else {
print("handleRecord: videoPreviewLayer is nil")
return
}
self.sessionQueue.async {
if !movieFileOutput.isRecording {
if UIDevice.current.isMultitaskingSupported {
self.backgroundRecordingID = UIApplication.shared.beginBackgroundTask(expirationHandler: nil)
}
//update orientation on the movie file output video connection before recording
let movieFileOutputConnection = self.movieFileOutput?.connection(withMediaType: AVMediaTypeVideo)
movieFileOutputConnection?.videoOrientation = videoPreviewLayerOrientation
//start recording to a temporary file:
let outputFileName = UUID().uuidString
let outputFilePath = (NSTemporaryDirectory() as NSString).appendingPathComponent((outputFileName as NSString).appendingPathExtension("mov")!)
movieFileOutput.startRecording(toOutputFileURL: URL(fileURLWithPath: outputFilePath), recordingDelegate: self)
}
}
}
so the recording is setup as a background task dispatched to self.sessionQueue. When I stop recording I receive an AVCaptureFileOutputRecordingDelegate method. In this method, I want to callback my delegate with the filepath, and then cleanup. How do I ensure that the delegate can persist the recording from the temporary file path before cleanup happens and the temporary file is removed?
public func capture(_ captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAt outputFileURL: URL!, fromConnections connections: [Any]!, error: Error!)
{
//cleanup func for later
func cleanup()
{
let path = outputFileURL.path
if FileManager.default.fileExists(atPath: path) {
do {
try FileManager.default.removeItem(atPath: path)
}
catch let error {
print("Could not remove file at url: \(outputFileURL), error: \(error.localizedDescription)")
}
}
if let currentBackgroundRecordingID = self.backgroundRecordingID {
self.backgroundRecordingID = UIBackgroundTaskInvalid
if currentBackgroundRecordingID != UIBackgroundTaskInvalid {
UIApplication.shared.endBackgroundTask(currentBackgroundRecordingID)
}
}
}
var success = true
if error != nil {
print("Movie file finishing error: \(error)")
success = ((error as NSError).userInfo[AVErrorRecordingSuccessfullyFinishedKey] as AnyObject).boolValue
}
DispatchQueue.main.async {
self.delegate?.camera(self, didFinishRecordingToOutputFileAt: outputFileURL, success: success)
}
cleanup()
}
So I called my delegate back on the main queue with the results, but then I need to call cleanup() should I do this on the main queue right after calling back my delegate? is this safe? or if I leave it the way it is now, then we are on self.sessionQueue, and I am unsure if cleanup() will happen before the delegate method implementation has time to persist the recording. If anyone can give me some insight into what is going on and what would be the safest thing to do here, that would be great. According to apple, the documentation says do not assume that AVCaptureFileOutputRecordingDelegate didFinishRecordingToOutputFileAt method is called on a specific thread. Thanks for your time and help!
How about:
DispatchQueue.main.async {
self.delegate?.camera(self, didFinishRecordingToOutputFileAt: outputFileURL, success: success)
self.sessionQueue.async {
cleanup()
}
}
I think that would be the standard way of handling this situation. When the delegate method finishes, you assume that the delegate is done with the file (or copied it somewhere else).
It has been a while since I tried to properly mix the SKTTSPlayer with the AVAudioSession and I have many problems that I can't resolve.
I want the SKTTSPlayer to duck the other audio when it is playing and unduck the other audio when it has finished playing.
Normally, with the AVSpeechSynthesizer, it is pretty easy to duck and unduck the other audio, here is a block of code that does this work pretty easily :
override func viewDidLoad() {
super.viewDidLoad()
var audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSessionCategoryPlayback, withOptions: .DuckOthers)
} catch {
// TODO: Handle error
}
synth.delegate = self
speak("I really want to unduck this session")
}
func setAudioSessionActive(beActive: Bool) {
do {
try AVAudioSession.sharedInstance().setActive(beActive)
print("Setting AVAudiosession active: ", beActive)
} catch let error as NSError {
print("Setting AVAudiosession state failed: \(error.description)")
}
}
func speak(string: String) {
let utterance = AVSpeechUtterance(string: string)
utterance.rate = AVSpeechUtteranceDefaultSpeechRate / 2
utterance.voice = AVSpeechSynthesisVoice(language: language)
synth.speakUtterance(utterance)
}
func speechSynthesizer(synthesizer: AVSpeechSynthesizer, didFinishSpeechUtterance utterance: AVSpeechUtterance) {
if (synthesizer.speaking) {
return
}
setAudioSessionActive(false)
}
As you can see, this can do the work pretty easily, you activate the session once and then everytime an utterance is finished, you desactivate the seesion.
This is not so simple with the SKTTSPlayer. Even if it seems that the SKTTSPlayer is an object of AVSpeechSynthesizer modified by Skobbler, you cannot unduck a session by just desactivate it. If you does that, the session will unDuck and after Duck again.
Here is a block of code that can reproduce the error :
override func viewDidLoad() {
super.viewDidLoad()
var audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSessionCategoryPlayback, withOptions: .DuckOthers)
} catch {
// TODO: Handle error
}
let advisorSettings = SKAdvisorSettings()
advisorSettings.language = SKAdvisorLanguage.EN_US
advisorSettings.advisorType = SKAdvisorType.TextToSpeech
SKRoutingService.sharedInstance().advisorConfigurationSettings = advisorSettings
let ttsSettings = SKAdvisorTTSSettings()
ttsSettings.rate = 0.08
ttsSettings.pitchMultiplier = 0.8
ttsSettings.volume = 1
ttsSettings.preUtteranceDelay = 0.1
ttsSettings.postUtteranceDelay = 0.1
SKTTSPlayer.sharedInstance().textToSpeechConfig = ttsSettings
SKTTSPlayer.sharedInstance().delegate = self
speak("I really want to unduck this session")
}
func setAudioSessionActive(beActive: Bool) {
do {
try AVAudioSession.sharedInstance().setActive(beActive)
print("Setting AVAudiosession active: ", beActive)
} catch let error as NSError {
print("Setting AVAudiosession state failed: \(error.description)")
}
}
func speak(string: String) {
SKTTSPlayer.sharedInstance().playString(string, forLanguage: SKAdvisorLanguage.EN_US)
}
func speechSynthesizer(synthesizer: AVSpeechSynthesizer, didFinishSpeechUtterance utterance: AVSpeechUtterance) {
setAudioSessionActive(false)
}
func TTSPlayer(TTSPlayer: SKTTSPlayer!, willPlayUtterance utterance: AVSpeechUtterance!) {
NSLog("will play")
}
The only workaround I found for this problem is to create a blanksound with an AVAudioPlayer. This sound is played everytime an utterance is finished and the session is unDuck when the blanksound is finished.
override func viewDidLoad() {
super.viewDidLoad()
var audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSessionCategoryPlayback, withOptions: .DuckOthers)
} catch {
// TODO: Handle error
}
let advisorSettings = SKAdvisorSettings()
advisorSettings.language = SKAdvisorLanguage.EN_US
advisorSettings.advisorType = SKAdvisorType.TextToSpeech
SKRoutingService.sharedInstance().advisorConfigurationSettings = advisorSettings
let ttsSettings = SKAdvisorTTSSettings()
ttsSettings.rate = 0.08
ttsSettings.pitchMultiplier = 0.8
ttsSettings.volume = 1
ttsSettings.preUtteranceDelay = 0.1
ttsSettings.postUtteranceDelay = 0.1
SKTTSPlayer.sharedInstance().textToSpeechConfig = ttsSettings
SKTTSPlayer.sharedInstance().delegate = self
speak("I really want to unduck this session")
}
func setAudioSessionActive(beActive: Bool) {
do {
try AVAudioSession.sharedInstance().setActive(beActive)
print("Setting AVAudiosession active: ", beActive)
} catch let error as NSError {
print("Setting AVAudiosession state failed: \(error.description)")
}
}
func speak(string: String) {
SKTTSPlayer.sharedInstance().playString(string, forLanguage: SKAdvisorLanguage.EN_US)
}
func speechSynthesizer(synthesizer: AVSpeechSynthesizer, didFinishSpeechUtterance utterance: AVSpeechUtterance) {
initializeAudioPlayer()
}
func TTSPlayer(TTSPlayer: SKTTSPlayer!, willPlayUtterance utterance: AVSpeechUtterance!) {
NSLog("will play")
}
func initializeAudioPlayer(){
let blankSoundURL = NSBundle.mainBundle().pathForResource("blankSound", ofType: "mp3")
if !NSFileManager.defaultManager().fileExistsAtPath(blankSoundURL!)
{
return
}
else
{
do {
audioPlayer = try? AVAudioPlayer(contentsOfURL: NSURL(fileURLWithPath: blankSoundURL!), fileTypeHint: nil)
audioPlayer.delegate = self
audioPlayer.prepareToPlay()
audioPlayer.play()
}
}
}
func audioPlayerDidFinishPlaying(player: AVAudioPlayer, successfully flag: Bool) {
setAudioSessionActive(false)
}
This is, I think, pretty difficult for nothing and it makes the AVAudioSession hard to work with.
Do you guys have any ideas why I can't unDuck a session with the SKTTSPlayer in the normal way ?
Thank you for reading, I know this is a pretty long post, but I think it might help someone.
Using the TTS option
Set our audio engine to TTS (this way you'll receive the text that needs
to be played)
Implement the didUpdateFilteredAudioInstruction callback here is where
the text instruction is received.
At this time you will have the play the instruction using your custom
TTS engine this is implementation specific to your 3'rd party engine
Return false (this will stop the default tts engine from playing the
instruction again)
The end code will look something like this:
(BOOL)routingService:(SKRoutingService )routingService
didUpdateFilteredAudioInstruction:(NSString)instruction
forLanguage:(SKAdvisorLanguage)language {
//your code goes here - you need to play the "instruction" in a
certain "language"
return false;
}
Using your own AVSpeechSynthesizer:
If it's a TTS engine issue then the only way of addressing this would be
to change the TTS engine you are using (we have limited control over it
see
http://developer.skobbler.com/docs/ios/2.5.1/Classes/SKAdvisorTTSSettings.h
tml ). See also the iOS API documentation:
https://developer.apple.com/library/prerelease/ios/documentation/AVFoundati
on/Reference/AVSpeechSynthesizer_Ref/index.html
I'm trying to get multiple sounds files to play on an AVAudioPlayer instance, however when one sound plays, the other stops. I can't get more than one sound to play at a time. Here is my code:
import AVFoundation
class GSAudio{
static var instance: GSAudio!
var soundFileNameURL: NSURL = NSURL()
var soundFileName = ""
var soundPlay = AVAudioPlayer()
func playSound (soundFile: String){
GSAudio.instance = self
soundFileName = soundFile
soundFileNameURL = NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(soundFileName, ofType: "aif", inDirectory:"Sounds")!)
do{
try soundPlay = AVAudioPlayer(contentsOfURL: soundFileNameURL)
} catch {
print("Could not play sound file!")
}
soundPlay.prepareToPlay()
soundPlay.play ()
}
}
Can anyone please help me by telling me how to get more than one sound file to play at a time? Any help is much appreciated.
Many thanks,
Kai
The reason the audio stops is because you only have one AVAudioPlayer set up, so when you ask the class to play another sound you are currently replacing the old instance with a new instance of AVAudioPlayer. You are overwriting it basically.
You can either create two instances of the GSAudio class, and then call playSound on each of them, or make the class a generic audio manager that uses a dictionary of audioPlayers.
I much prefer the latter option, as it allows for cleaner code and is also more efficient. You can check to see if you have already made a player for the sound before, rather than making a new player for example.
Anyways, I re-made your class for you so that it will play multiple sounds at once. It can also play the same sound over itself (it doesn't replace the previous instance of the sound) Hope it helps!
The class is a singleton, so to access the class use:
GSAudio.sharedInstance
for example, to play a sound you would call:
GSAudio.sharedInstance.playSound("AudioFileName")
and to play a number of sounds at once:
GSAudio.sharedInstance.playSounds("AudioFileName1", "AudioFileName2")
or you could load up the sounds in an array somewhere and call the playSounds function that accepts an array:
let sounds = ["AudioFileName1", "AudioFileName2"]
GSAudio.sharedInstance.playSounds(sounds)
I also added a playSounds function that allows you to delay each sound being played in a cascade kind of format. So:
let soundFileNames = ["SoundFileName1", "SoundFileName2", "SoundFileName3"]
GSAudio.sharedInstance.playSounds(soundFileNames, withDelay: 1.0)
would play sound2 a second after sound1, then sound3 would play a second after sound2 etc.
Here is the class:
class GSAudio: NSObject, AVAudioPlayerDelegate {
static let sharedInstance = GSAudio()
private override init() {}
var players = [NSURL:AVAudioPlayer]()
var duplicatePlayers = [AVAudioPlayer]()
func playSound (soundFileName: String){
let soundFileNameURL = NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource(soundFileName, ofType: "aif", inDirectory:"Sounds")!)
if let player = players[soundFileNameURL] { //player for sound has been found
if player.playing == false { //player is not in use, so use that one
player.prepareToPlay()
player.play()
} else { // player is in use, create a new, duplicate, player and use that instead
let duplicatePlayer = try! AVAudioPlayer(contentsOfURL: soundFileNameURL)
//use 'try!' because we know the URL worked before.
duplicatePlayer.delegate = self
//assign delegate for duplicatePlayer so delegate can remove the duplicate once it's stopped playing
duplicatePlayers.append(duplicatePlayer)
//add duplicate to array so it doesn't get removed from memory before finishing
duplicatePlayer.prepareToPlay()
duplicatePlayer.play()
}
} else { //player has not been found, create a new player with the URL if possible
do{
let player = try AVAudioPlayer(contentsOfURL: soundFileNameURL)
players[soundFileNameURL] = player
player.prepareToPlay()
player.play()
} catch {
print("Could not play sound file!")
}
}
}
func playSounds(soundFileNames: [String]){
for soundFileName in soundFileNames {
playSound(soundFileName)
}
}
func playSounds(soundFileNames: String...){
for soundFileName in soundFileNames {
playSound(soundFileName)
}
}
func playSounds(soundFileNames: [String], withDelay: Double) { //withDelay is in seconds
for (index, soundFileName) in soundFileNames.enumerate() {
let delay = withDelay*Double(index)
let _ = NSTimer.scheduledTimerWithTimeInterval(delay, target: self, selector: #selector(playSoundNotification(_:)), userInfo: ["fileName":soundFileName], repeats: false)
}
}
func playSoundNotification(notification: NSNotification) {
if let soundFileName = notification.userInfo?["fileName"] as? String {
playSound(soundFileName)
}
}
func audioPlayerDidFinishPlaying(player: AVAudioPlayer, successfully flag: Bool) {
duplicatePlayers.removeAtIndex(duplicatePlayers.indexOf(player)!)
//Remove the duplicate player once it is done
}
}
Here's a Swift 4 version of #Oliver Wilkinson code with some safechecks and improved code formatting:
import Foundation
import AVFoundation
class GSAudio: NSObject, AVAudioPlayerDelegate {
static let sharedInstance = GSAudio()
private override init() { }
var players: [URL: AVAudioPlayer] = [:]
var duplicatePlayers: [AVAudioPlayer] = []
func playSound(soundFileName: String) {
guard let bundle = Bundle.main.path(forResource: soundFileName, ofType: "aac") else { return }
let soundFileNameURL = URL(fileURLWithPath: bundle)
if let player = players[soundFileNameURL] { //player for sound has been found
if !player.isPlaying { //player is not in use, so use that one
player.prepareToPlay()
player.play()
} else { // player is in use, create a new, duplicate, player and use that instead
do {
let duplicatePlayer = try AVAudioPlayer(contentsOf: soundFileNameURL)
duplicatePlayer.delegate = self
//assign delegate for duplicatePlayer so delegate can remove the duplicate once it's stopped playing
duplicatePlayers.append(duplicatePlayer)
//add duplicate to array so it doesn't get removed from memory before finishing
duplicatePlayer.prepareToPlay()
duplicatePlayer.play()
} catch let error {
print(error.localizedDescription)
}
}
} else { //player has not been found, create a new player with the URL if possible
do {
let player = try AVAudioPlayer(contentsOf: soundFileNameURL)
players[soundFileNameURL] = player
player.prepareToPlay()
player.play()
} catch let error {
print(error.localizedDescription)
}
}
}
func playSounds(soundFileNames: [String]) {
for soundFileName in soundFileNames {
playSound(soundFileName: soundFileName)
}
}
func playSounds(soundFileNames: String...) {
for soundFileName in soundFileNames {
playSound(soundFileName: soundFileName)
}
}
func playSounds(soundFileNames: [String], withDelay: Double) { //withDelay is in seconds
for (index, soundFileName) in soundFileNames.enumerated() {
let delay = withDelay * Double(index)
let _ = Timer.scheduledTimer(timeInterval: delay, target: self, selector: #selector(playSoundNotification(_:)), userInfo: ["fileName": soundFileName], repeats: false)
}
}
#objc func playSoundNotification(_ notification: NSNotification) {
if let soundFileName = notification.userInfo?["fileName"] as? String {
playSound(soundFileName: soundFileName)
}
}
func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, successfully flag: Bool) {
if let index = duplicatePlayers.index(of: player) {
duplicatePlayers.remove(at: index)
}
}
}
I have created a helper library that simplifies playing sounds in Swift. It creates multiple instances of AVAudioPlayer to allow playing the same sound multiple times concurrently. You can download it from Github or import with Cocoapods.
Here is the link: SwiftySound
The usage is as simple as it can be:
Sound.play(file: "sound.mp3")
All answers are posting pages of code; it doesn't need to be that complicated.
// Create a new player for the sound; it doesn't matter which sound file this is
let soundPlayer = try AVAudioPlayer( contentsOf: url )
soundPlayer.numberOfLoops = 0
soundPlayer.volume = 1
soundPlayer.play()
soundPlayers.append( soundPlayer )
// In an timer based loop or other callback such as display link, prune out players that are done, thus deallocating them
checkSfx: for player in soundPlayers {
if player.isPlaying { continue } else {
if let index = soundPlayers.index(of: player) {
soundPlayers.remove(at: index)
break checkSfx
}
}
}
Swift 5+
Compiling some of the previous answers, improving code style and reusability
I usually avoid loose strings throughout my projects and use, instead, custom protocols for objects that will hold those string properties.
I prefer this to the enum approach simply because enumerations tend to couple your project together quite quickly. Everytime you add a new case you must edit the same file with the enumeration, breaking somewhat the Open-Closed principle from SOLID and increasing chances for error.
In this particular case, you could have a protocol that defines sounds:
protocol Sound {
func getFileName() -> String
func getFileExtension() -> String
func getVolume() -> Float
func isLoop() -> Bool
}
extension Sound {
func getVolume() -> Float { 1 }
func isLoop() -> Bool { false }
}
And when you need a new sound you can simply create a new structure or class that implements this protocol (It will even be suggested on autocomplete if your IDE, just like Xcode, supports it, giving you similar benefits to those of the enumeration... and it works way better in medium to large multi framework projects).
(Usually I leave volume and other configurations with default implementations as they are less frequently customized).
For instance, you could have a coin drop sound:
struct CoinDropSound: Sound {
func getFileName() -> String { "coin_drop" }
func getFileExtension() -> String { "wav" }
}
Then, you could use a singleton SoundManager that would take care of managing playing audio files
import AVFAudio
final class SoundManager: NSObject, AVAudioPlayerDelegate {
static let shared = SoundManager()
private var audioPlayers: [URL: AVAudioPlayer] = [:]
private var duplicateAudioPlayers: [AVAudioPlayer] = []
private override init() {}
func play(sound: Sound) {
let fileName = sound.getFileName()
let fileExtension = sound.getFileExtension()
guard let url = Bundle.main.url(forResource: fileName, withExtension: fileExtension),
let player = getAudioPlayer(for: url) else { return }
player.volume = sound.getVolume()
player.numberOfLoops = numberOfLoops
player.prepareToPlay()
player.play()
}
private func getAudioPlayer(for url: URL) -> AVAudioPlayer? {
guard let player = audioPlayers[url] else {
let player = try? AVAudioPlayer(contentsOf: url)
audioPlayers[url] = player
return player
}
guard player.isPlaying else { return player }
guard let duplicatePlayer = try? AVAudioPlayer(contentsOf: url) else { return nil }
duplicatePlayer.delegate = self
duplicateAudioPlayers.append(duplicatePlayer)
return duplicatePlayer
}
func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, successfully flag: Bool) {
duplicateAudioPlayers.removeAll { $0 == player }
}
}
Here I created a helper getAudioPlayer to be able to return early from code execution and make use of the guard let.
Using guard let more often and preferring less nested code can, most of the time, highly improve readability.
To use this SoundManager from anywhere in your project, simply access its shared instance and pass an object that conforms to Sound.
For example, given the previous CoinDropSound:
SoundManager.shared.play(sound: CoinDropSound())
You could maybe omit the sound parameter as it may improve readability
class SoundManager {
// ...
func play(_ sound: Sound) {
// ...
}
// ...
}
And then:
SoundManager.shared.play(CoinDropSound())
I have background music in my game. I tried to make a method to play and pause the music.
My app crashes when I press on the play/pause music button.
I don't understand why its not working.
The method in the Main Scene: (Edited)
var SoundOnOff = SKSpriteNode(imageNamed: "MusicOn.png")
if (SoundOnOff.containsPoint(location)) {
if BackgroundMusic.sharedHelper.isMuted() {
//BackgroundMusic.sharedHelper.mute()
BackgroundMusic.sharedHelper.pause()
self.SoundOnOff.texture = SKTexture(imageNamed:"MusicOff.png")
print("Music Off!")
}
else {
//BackgroundMusic.sharedHelper.unmute()
BackgroundMusic.sharedHelper.resume()
self.SoundOnOff.texture = SKTexture(imageNamed:"MusicOn.png")
print("Music On!")
}
}
BackgroundMusic Class (Edited)
import AVFoundation
class BackgroundMusic: NSObject {
internal let localDefaults = NSUserDefaults.standardUserDefaults()
static let sharedHelper = BackgroundMusic()
var BgMusic: AVAudioPlayer?
/// Keys
internal struct Key {
static let muted = "MusicMuteState"
}
override init() {
super.init()
print("Music helper init")
playBackgroundMusic()
if isMuted() {
mute()
}
}
func playBackgroundMusic() {
let aSound = NSURL(fileURLWithPath: NSBundle.mainBundle().pathForResource("Secrets of the Schoolyard", ofType: "mp3")!)
do {
BgMusic = try AVAudioPlayer(contentsOfURL:aSound)
BgMusic!.numberOfLoops = -1
BgMusic!.prepareToPlay()
BgMusic!.play()
} catch {
print("Cannot play the file")
}
}
func mute() {
BgMusic!.volume = 0
localDefaults.setBool(true, forKey: Key.muted)
}
/// Unmute
func unmute() {
BgMusic!.volume = 1
localDefaults.setBool(false, forKey: Key.muted)
}
// Check mute state
func isMuted() -> Bool {
if localDefaults.boolForKey(Key.muted) {
return true
} else {
return false
}
}
}
Your class is a subclass of AVAudioPlayer but you're not actually using your own instance to play the music. When you use BackgroundMusic.sharedHelper.playing, you're referencing an instance of your class, not the BgMusic player that is actually playing. Since your class hasn't been initialized with any sound file, I presume that it cannot properly process .playing
I recently answered a similar question. I am also using my own helper for music which is similar to yours. There is a few things that you should do different in your helper.
On and off music/sfx for sprite kit
GitHub: https://github.com/crashoverride777/Swift2-SpriteKit-Music-Helper