I'm using AVFoundation framework. Whenever the player plays the buffer, my background music gets stopped so I used below code to allow it to continue playing irrespective of the AVFoundation player.
try audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord, with: [.mixWithOthers,.allowBluetooth])
try audioSession.setMode(AVAudioSessionModeDefault)
try audioSession.setActive(true)
It does work but the problem is the quality of the background music gets dramatically affected. The music don't have the bass effects anymore whenever the AVPlayer plays the buffer.
I want the background music uninterrupted while using AVPlayer. Is it possible?
update : I added full code if anyone wants to check. Can feel the difference in background itune music as soon as the app is opened or the session is activated when using this code.
class ViewCosdfntroller: UIViewController {
var engine = AVAudioEngine()
let audioSession = AVAudioSession.sharedInstance()
let player = AVAudioPlayerNode()
let mixer = AVAudioMixerNode()
override func viewDidLoad() {
super.viewDidLoad()
do {
try audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord, with: [.mixWithOthers,.allowBluetooth])
try audioSession.setMode(AVAudioSessionModeDefault)
try audioSession.setActive(true)
} catch {
}
let input = engine.inputNode
let bus = 0
let inputFormat = input.outputFormat(forBus: bus)
let recordingFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 11025.0, channels: 1, interleaved: false)
engine.attach(player)
engine.attach(mixer)
engine.connect(input, to: mixer, format: input.outputFormat(forBus: 0))
engine.connect(player, to: engine.mainMixerNode, format: recordingFormat)
mixer.installTap(onBus: bus, bufferSize: AVAudioFrameCount(inputFormat.sampleRate * 0.4), format: inputFormat, block: { (buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
let Converter:AVAudioConverter = AVAudioConverter.init(from: inputFormat, to: recordingFormat!)!
let newbuffer = AVAudioPCMBuffer(pcmFormat: recordingFormat!,frameCapacity: AVAudioFrameCount((recordingFormat?.sampleRate)! * 0.4))
let inputBlock : AVAudioConverterInputBlock = { (inNumPackets, outStatus) -> AVAudioBuffer? in
outStatus.pointee = AVAudioConverterInputStatus.haveData
let audioBuffer : AVAudioBuffer = buffer
return audioBuffer
}
var error : NSError?
Converter.convert(to: newbuffer!, error: &error, withInputFrom: inputBlock)
self.player.scheduleBuffer(newbuffer!)
})
do {
try! engine.start()
player.play()
} catch {
print(error)
}
}
}
Unless this is some weird mixing quirk, the quality change you report may just be that recording categories change the default audio output device to the tiny, tinny receiver (because telephones, don't ask). Override this behaviour by adding .defaultToSpeaker to your setCategory() call:
try audioSession.setCategory(AVAudioSessionCategoryPlayAndRecord, with: [.mixWithOthers,.allowBluetooth, .defaultToSpeaker])
I think you need this one:
try audioSession.setCategory(AVAudioSessionCategoryAmbient)
Documentation:
https://developer.apple.com/documentation/avfoundation/avaudiosessioncategoryambient
When you use this category, audio from other apps mixes with your audio
Related
I need to play Youtube and recognize my voice at same time.I could succeed to recognize my voice when the system speaker was selected for Youtube play.How ever the recognition of my voice was failed when the bluetooth hands free device such as AirPod Pro was selected for Youtube play.If I play sound using AVPlayer and AVAudioPlayer and recognize my voice at same time, the recognition of my voice was succeeded via bluetooth hands free device.Please advice me how to pass my voice from BLE hands free device as the sound input when specifying allowBluetooth option to AVAudioSession.
Before playing YoutubePlayerKit, audioSession was set as follows.AVAudioSession options: .allowBluetooth worked fine for BLE and system sound input for AVPlayer and AVAudioPlayer but not for YoutubePlayerKit.
let configuration = YouTubePlayer.Configuration(
isUserInteractionEnabled: false,
autoPlay: false,
showControls: false,
enableJavaScriptAPI: true,
loopEnabled: false,
playInline: true
)
do {
audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(.playAndRecord, mode: .measurement, options: .allowBluetooth)
// try audioSession.setCategory(.playAndRecord, mode: .measurement, options: .duckOthers)
try AVAudioSession.sharedInstance().setActive(true, options: .notifyOthersOnDeactivation)
} catch _ {
handleError(withMessage:"play youtube audio session error")// failed to record
}
youTubeID = URL(string: movieURL)!.lastPathComponent
let videoID = URL(string: movieURL)?.deletingLastPathComponent()
print("youTubeID \(String(describing: youTubeID)) videoID \(String(describing: videoID))")
if (videoID)!.absoluteString.contains("https://youtu.be/"){
playYoutube = true
DispatchQueue.main.async{ [self] in
youTubePlayer = YouTubePlayer(
source: .video(id: youTubeID),
configuration: configuration)
youTubePlayerView = YouTubePlayerHostingView(player: youTubePlayer)
youTubePlayer.pause()
youTubePlayerView.frame = self.topView.bounds
self.topView.addSubview(youTubePlayerView)
self.checkYoutubeDuration(player:youTubePlayer)
}
}
And set audioEngine as follows when doing voice recognition.
guard let recognizer = SFSpeechRecognizer(locale: Locale.init(identifier: "en-US")), recognizer.isAvailable else {
handleError(withMessage: "voice recobnition error")
return
}
audioEngine = AVAudioEngine()
inputNode = audioEngine.inputNode
inputNode.removeTap(onBus: 0)
guard let commonFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 2, interleaved: false) else { return }
let recordingFormat = inputNode.outputFormat(forBus: 0)
inputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
self.recognitionRequest?.append(buffer)
}
self.audioEngine.prepare()
do {
try self.audioEngine.start()
print("audioEngine.start()")
}catch{
print("audioEngine.start failed")
}
// Create a speech recognition request.
recognitionRequest = SFSpeechAudioBufferRecognitionRequest()
recognitionRequest!.shouldReportPartialResults = true
I am building an app that needs to perform analysis on the audio it receives from the microphone in real time. In my app, I also need to play a beep sound and start recording audio at the same time, in other words, I can't play the beep sound and then start recording. This introduces the problem of hearing the beep sound in my recording, (this might be because I am playing the beep sound through the speaker, but unfortunately I cannot compromise in this regard either). Since the beep sound is just a tone of about 2350 kHz, I was wondering how I could exclude that range of frequencies (say from 2300 kHz to 2400 kHz) in my recordings and prevent it from influencing my audio samples. After doing some googling I came up with what I think might be the solution, a band stop filter. According to Wikipedia: "a band-stop filter or band-rejection filter is a filter that passes most frequencies unaltered, but attenuates those in a specific range to very low levels". This seems like what I need to to exclude frequencies from 2300 kHz to 2400 kHz in my recordings (or at least for the first second of the recording while the beep sound is playing). My question is: how would I implement this with AVAudioEngine? Is there a way I can turn off the filter after the first second of the recording when the beep sound is done playing without stopping the recording?
Since I am new to working with audio with AVAudioEngine (I've always just stuck to the higher levels of AVFoundation) I followed this tutorial to help me create a class to handle all the messy stuff. This is what my code looks like:
class Recorder {
enum RecordingState {
case recording, paused, stopped
}
private var engine: AVAudioEngine!
private var mixerNode: AVAudioMixerNode!
private var state: RecordingState = .stopped
private var audioPlayer = AVAudioPlayerNode()
init() {
setupSession()
setupEngine()
}
fileprivate func setupSession() {
let session = AVAudioSession.sharedInstance()
//The original tutorial sets the category to .record
//try? session.setCategory(.record)
try? session.setCategory(.playAndRecord, options: [.mixWithOthers, .defaultToSpeaker])
try? session.setActive(true, options: .notifyOthersOnDeactivation)
}
fileprivate func setupEngine() {
engine = AVAudioEngine()
mixerNode = AVAudioMixerNode()
// Set volume to 0 to avoid audio feedback while recording.
mixerNode.volume = 0
engine.attach(mixerNode)
//Attach the audio player node
engine.attach(audioPlayer)
makeConnections()
// Prepare the engine in advance, in order for the system to allocate the necessary resources.
engine.prepare()
}
fileprivate func makeConnections() {
let inputNode = engine.inputNode
let inputFormat = inputNode.outputFormat(forBus: 0)
engine.connect(inputNode, to: mixerNode, format: inputFormat)
let mainMixerNode = engine.mainMixerNode
let mixerFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: inputFormat.sampleRate, channels: 1, interleaved: false)
engine.connect(mixerNode, to: mainMixerNode, format: mixerFormat)
//AudioPlayer Connection
let path = Bundle.main.path(forResource: "beep.mp3", ofType:nil)!
let url = URL(fileURLWithPath: path)
let file = try! AVAudioFile(forReading: url)
engine.connect(audioPlayer, to: mainMixerNode, format: nil)
audioPlayer.scheduleFile(file, at: nil)
}
//MARK: Start Recording Function
func startRecording() throws {
print("Start Recording!")
let tapNode: AVAudioNode = mixerNode
let format = tapNode.outputFormat(forBus: 0)
let documentURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
// AVAudioFile uses the Core Audio Format (CAF) to write to disk.
// So we're using the caf file extension.
let file = try AVAudioFile(forWriting: documentURL.appendingPathComponent("recording.caf"), settings: format.settings)
tapNode.installTap(onBus: 0, bufferSize: 4096, format: format, block: {
(buffer, time) in
try? file.write(from: buffer)
print(buffer.description)
print(buffer.stride)
let floatArray = Array(UnsafeBufferPointer(start: buffer.floatChannelData![0], count:Int(buffer.frameLength)))
})
try engine.start()
audioPlayer.play()
state = .recording
}
//MARK: Other recording functions
func resumeRecording() throws {
try engine.start()
state = .recording
}
func pauseRecording() {
engine.pause()
state = .paused
}
func stopRecording() {
// Remove existing taps on nodes
mixerNode.removeTap(onBus: 0)
engine.stop()
state = .stopped
}
}
AVAudioUnitEQ supports a band-stop filter.
Perhaps something like:
// Create an instance of AVAudioUnitEQ and connect it to the engine's main mixer
let eq = AVAudioUnitEQ(numberOfBands: 1)
engine.attach(eq)
engine.connect(eq, to: engine.mainMixerNode, format: nil)
engine.connect(player, to: eq, format: nil)
eq.bands[0].frequency = 2350
eq.bands[0].filterType = .bandStop
eq.bands[0].bypass = false
A slightly more complete answer, linked to an IBAction; in this example, I use .parametric for the filter type, with more bands than required, to give a broader insight on how to use it:
#IBAction func PlayWithEQ(_ sender: Any) {
self.engine.stop()
self.engine = AVAudioEngine()
let player = AVAudioPlayerNode()
let url = Bundle.main.url(forResource:"yoursong", withExtension: "m4a")!
let f = try! AVAudioFile(forReading: url)
self.engine.attach(player)
// adding eq effect node
let effect = AVAudioUnitEQ(numberOfBands: 4)
let bands = effect.bands
let freq = [125, 250, 2350, 8000]
for i in 0...(bands.count - 1) {
bands[i].frequency = Float(freq[i])
}
bands[0].gain = 0.0
bands[0].filterType = .parametric
bands[0].bandwidth = 1
bands[1].gain = 0.0
bands[1].filterType = .parametric
bands[1].bandwidth = 0.5
// filter of interest, rejecting 2350Hz (adjust bandwith as needed)
bands[2].gain = -60.0
bands[2].filterType = .parametric
bands[2].bandwidth = 1
bands[3].gain = 0.0
bands[3].filterType = .parametric
bands[3].bandwidth = 1
self.engine.attach(effect)
self.engine.connect(player, to: effect, format: f.processingFormat)
let mixer = self.engine.mainMixerNode
self.engine.connect(effect, to: mixer, format: f.processingFormat)
player.scheduleFile(f, at: nil) {
delay(0.05) {
if self.engine.isRunning {
self.engine.stop()
}
}
}
self.engine.prepare()
try! self.engine.start()
player.play()
}
I used AVAudioEngine to gather PCM data from the microphone in iOS and it worked fine, however when I tried moving the project to WatchOS, I get feedback while recording. How would I stop playback from the speakers while recording?
var audioEngine = AVAudioEngine()
try AVAudioSession.sharedInstance().setCategory(.playAndRecord, mode: .default)
try AVAudioSession.sharedInstance().setActive(true)
let input = audioEngine.inputNode
let format = input.inputFormat(forBus: 0)
audioEngine.connect(input, to: audioEngine.mainMixerNode, format: format)
try! audioEngine.start()
let mixer = audioEngine.mainMixerNode
let format = mixer.outputFormat(forBus: 0)
let sampleRate = format.sampleRate
let fft_size = 2048
mixer.installTap(onBus: 0, bufferSize: UInt32(fft_size), format: format,
block: {(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
// Processing
}
For anyone else that runs into this, I fixed it by removing the connection from the inputNode to the mainMixerNode, and installed the tap straight on the inputNode. The way I was doing it before I guess creates a feedback loop where it's playing back what it's recording. Not sure why this only happens in WatchOS and not on iPhone... perhaps it was playing back from the ear speaker rather than the one next to the mic. Fixed code:
var audioEngine = AVAudioEngine()
try AVAudioSession.sharedInstance().setCategory(.playAndRecord, mode: .default)
try AVAudioSession.sharedInstance().setActive(true)
try! audioEngine.start()
let input = audioEngine.inputNode
let format = mixer.outputFormat(forBus: 0)
let sampleRate = format.sampleRate
let fft_size = 2048
input.installTap(onBus: 0, bufferSize: UInt32(fft_size), format: format,
block: {(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
// Processing
}
I use AVAudioMixerNode to change audio format. this entry helped me a lot. Below code gives me data i want. But i hear my own voice on phone's speaker. How can i prevent it?
func startAudioEngine()
{
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let downMixer = AVAudioMixerNode()
//I think you the engine's I/O nodes are already attached to itself by default, so we attach only the downMixer here:
engine.attach(downMixer)
//You can tap the downMixer to intercept the audio and do something with it:
downMixer.installTap(onBus: 0, bufferSize: 2048, format: downMixer.outputFormat(forBus: 0), block: //originally 1024
{ (buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
//i get audio data here
}
)
//let's get the input audio format right as it is
let format = input.inputFormat(forBus: 0)
//I initialize a 16KHz format I need:
let format16KHzMono = AVAudioFormat.init(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: 11025.0, channels: 1, interleaved: true)
//connect the nodes inside the engine:
//INPUT NODE --format-> downMixer --16Kformat--> mainMixer
//as you can see I m downsampling the default 44khz we get in the input to the 16Khz I want
engine.connect(input, to: downMixer, format: format)//use default input format
engine.connect(downMixer, to: engine.outputNode, format: format16KHzMono)//use new audio format
engine.prepare()
do {
try engine.start()
} catch {
// #TODO: error out
}
}
You can hear your microphone recording through your speakers because your microphone is connected to downMixer, which is connected to engine.outputNode. You could probably just mute the output for the downMixer if you aren't using it with other inputs:
downMixer.outputVolume = 0.0
I did it like this to change the frequency to 48000Hz / 16 bit per sample / 2 channels, and save it to wave file:
let outputAudioFileFormat = [AVFormatIDKey: Int(kAudioFormatLinearPCM), AVSampleRateKey: 48000, AVNumberOfChannelsKey: 2, AVEncoderAudioQualityKey: AVAudioQuality.high.rawValue]
let audioRecordingFormat : AVAudioFormat = AVAudioFormat.init(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: 48000, channels: 2, interleaved: true)!
do{
try file = AVAudioFile(forWriting: url, settings: outputAudioFileFormat, commonFormat: .pcmFormatInt16, interleaved: true)
let recordingSession = AVAudioSession.sharedInstance()
try recordingSession.setPreferredInput(input)
try recordingSession.setPreferredSampleRate(audioRecordingFormat.sampleRate)
engine.inputNode.installTap(onBus: 0, bufferSize: 1024, format: audioRecordingFormat, block: self.bufferAvailable)
engine.connect(engine.inputNode, to: engine.outputNode, format: audioRecordingFormat) //configure graph
}
catch
{
debugPrint("Could not initialize the audio file: \(error)")
}
And the function block
func bufferAvailable(buffer: AVAudioPCMBuffer, time: AVAudioTime)
{
do
{
try self.file?.write(from: buffer)
if self.onBufferAvailable != nil {
DispatchQueue.main.async {
self.onBufferAvailable!(buffer) // outside function used for analyzing and displaying a wave meter
}
}
}
catch{
self.stopEngine()
DispatchQueue.main.async {
self.onRecordEnd(false)
}
}
}
The stopEngine function is this, you should call it also when you want to stop the recording:
private func stopEngine()
{
self.engine.inputNode.removeTap(onBus: 0)
self.engine.stop()
}
I'm really excited about the new AVAudioEngine. It seems like a good API wrapper around audio unit. Unfortunately the documentation is so far nonexistent, and I'm having problems getting a simple graph to work.
Using the following simple code to set up an audio engine graph, the tap block is never called. It mimics some of the sample code floating around the web, though those also did not work.
let inputNode = audioEngine.inputNode
var error: NSError?
let bus = 0
inputNode.installTapOnBus(bus, bufferSize: 2048, format: inputNode.inputFormatForBus(bus)) {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
println("sfdljk")
}
audioEngine.prepare()
if audioEngine.startAndReturnError(&error) {
println("started audio")
} else {
if let engineStartError = error {
println("error starting audio: \(engineStartError.localizedDescription)")
}
}
All I'm looking for is the raw pcm buffer for analysis. I don't need any effects or output. According to the WWDC talk "502 Audio Engine in Practice", this setup should work.
Now if you want to capture data from the input node, you can install a node tap and we've talked about that.
But what's interesting about this particular example is, if I wanted to work with just the input node, say just capture data from the microphone and maybe examine it, analyze it in real time or maybe write it out to file, I can directly install a tap on the input node.
And the tap will do the work of pulling the input node for data, stuffing it in buffers and then returning that back to the application.
Once you have that data you can do whatever you need to do with it.
Here are some links I tried:
http://hondrouthoughts.blogspot.com/2014/09/avfoundation-audio-monitoring.html
http://jamiebullock.com/post/89243252529/live-coding-audio-with-swift-playgrounds (SIGABRT in playground on startAndReturnError)
Edit: This is the implementation based on Thorsten Karrer's suggestion. It unfortunately does not work.
class AudioProcessor {
let audioEngine = AVAudioEngine()
init(){
let inputNode = audioEngine.inputNode
let bus = 0
var error: NSError?
inputNode.installTapOnBus(bus, bufferSize: 2048, format:inputNode.inputFormatForBus(bus)) {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
println("sfdljk")
}
audioEngine.prepare()
audioEngine.startAndReturnError(nil)
println("started audio")
}
}
It might be the case that your AVAudioEngine is going out of scope and is released by ARC ("If you liked it then you should have put retain on it...").
The following code (engine is moved to an ivar and thus sticks around) fires the tap:
class AppDelegate: NSObject, NSApplicationDelegate {
let audioEngine = AVAudioEngine()
func applicationDidFinishLaunching(aNotification: NSNotification) {
let inputNode = audioEngine.inputNode
let bus = 0
inputNode.installTapOnBus(bus, bufferSize: 2048, format: inputNode.inputFormatForBus(bus)) {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
println("sfdljk")
}
audioEngine.prepare()
audioEngine.startAndReturnError(nil)
}
}
(I removed the error handling for brevity)
UPDATED: I have implemented a complete working example of Recording mic input, applying some effects (reverbs, delay, distortion) at runtime, and save all these effects to an output file.
var engine = AVAudioEngine()
var distortion = AVAudioUnitDistortion()
var reverb = AVAudioUnitReverb()
var audioBuffer = AVAudioPCMBuffer()
var outputFile = AVAudioFile()
var delay = AVAudioUnitDelay()
//Initialize the audio engine
func initializeAudioEngine() {
engine.stop()
engine.reset()
engine = AVAudioEngine()
isRealTime = true
do {
try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayAndRecord)
let ioBufferDuration = 128.0 / 44100.0
try AVAudioSession.sharedInstance().setPreferredIOBufferDuration(ioBufferDuration)
} catch {
assertionFailure("AVAudioSession setup error: \(error)")
}
let fileUrl = URLFor("/NewRecording.caf")
print(fileUrl)
do {
try outputFile = AVAudioFile(forWriting: fileUrl!, settings: engine.mainMixerNode.outputFormatForBus(0).settings)
}
catch {
}
let input = engine.inputNode!
let format = input.inputFormatForBus(0)
//settings for reverb
reverb.loadFactoryPreset(.MediumChamber)
reverb.wetDryMix = 40 //0-100 range
engine.attachNode(reverb)
delay.delayTime = 0.2 // 0-2 range
engine.attachNode(delay)
//settings for distortion
distortion.loadFactoryPreset(.DrumsBitBrush)
distortion.wetDryMix = 20 //0-100 range
engine.attachNode(distortion)
engine.connect(input, to: reverb, format: format)
engine.connect(reverb, to: distortion, format: format)
engine.connect(distortion, to: delay, format: format)
engine.connect(delay, to: engine.mainMixerNode, format: format)
assert(engine.inputNode != nil)
isReverbOn = false
try! engine.start()
}
//Now the recording function:
func startRecording() {
let mixer = engine.mainMixerNode
let format = mixer.outputFormatForBus(0)
mixer.installTapOnBus(0, bufferSize: 1024, format: format, block:
{ (buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
print(NSString(string: "writing"))
do{
try self.outputFile.writeFromBuffer(buffer)
}
catch {
print(NSString(string: "Write failed"));
}
})
}
func stopRecording() {
engine.mainMixerNode.removeTapOnBus(0)
engine.stop()
}
I hope this might help you. Thanks!
The above answer didn't work for me but the following did. I'm installing a tap on a mixer node.
mMixerNode?.installTapOnBus(0, bufferSize: 4096, format: mMixerNode?.outputFormatForBus(0),
{
(buffer: AVAudioPCMBuffer!, time:AVAudioTime!) -> Void in
NSLog("tapped")
}
)
nice topic
hi brodney
in your topic i find my solution . here is similar topic Generate AVAudioPCMBuffer with AVAudioRecorder
see lecture Wwdc 2014 502 - AVAudioEngine in Practice capture microphone => in 20 min create buffer with tap code => in 21 .50
here is swift 3 code
#IBAction func button01Pressed(_ sender: Any) {
let inputNode = audioEngine.inputNode
let bus = 0
inputNode?.installTap(onBus: bus, bufferSize: 2048, format: inputNode?.inputFormat(forBus: bus)) {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
var theLength = Int(buffer.frameLength)
print("theLength = \(theLength)")
var samplesAsDoubles:[Double] = []
for i in 0 ..< Int(buffer.frameLength)
{
var theSample = Double((buffer.floatChannelData?.pointee[i])!)
samplesAsDoubles.append( theSample )
}
print("samplesAsDoubles.count = \(samplesAsDoubles.count)")
}
audioEngine.prepare()
try! audioEngine.start()
}
to stop audio
func stopAudio()
{
let inputNode = audioEngine.inputNode
let bus = 0
inputNode?.removeTap(onBus: bus)
self.audioEngine.stop()
}