I'm trying install a tap on the output audio that is played on my app. I have no issue catching buffer from microphone input, but when it comes to catch sound that it goes trough the speaker or the earpiece or whatever the output device is, it does not succeed. Am I missing something?
In my example I'm trying to catch the audio buffer from an audio file that an AVPLayer is playing. But let's pretend I don't have access directly to the AVPlayer instance.
The goal is to perform Speech Recognition on an audio stream.
func catchAudioBuffers() throws {
let audioSession = AVAudioSession.sharedInstance()
try audioSession.setCategory(.playAndRecord, mode: .voiceChat, options: .allowBluetooth)
try audioSession.setActive(true)
let outputNode = audioEngine.outputNode
let recordingFormat = outputNode.outputFormat(forBus: 0)
outputNode.installTap(onBus: 0, bufferSize: 1024, format: recordingFormat) { (buffer: AVAudioPCMBuffer, when: AVAudioTime) in
// PROCESS AUDIO BUFFER
}
audioEngine.prepare()
try audioEngine.start()
// For example I am playing an audio conversation with an AVPlayer and a local file.
player.playSound()
}
This code results in a:
AVAEInternal.h:76 required condition is false: [AVAudioIONodeImpl.mm:1057:SetOutputFormat: (_isInput)]
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: _isInput'
I was facing the same problem and during 2 days of brainstorming found the following.
Apple says that For AVAudioOutputNode, tap format must be specified as nil. I'm not sure that it's important but in my case, that finally worked, format was nil.
You need to start recording and don't forget to stop it.
Removing tap is really important, otherwise you will have file that you can't open.
Try to save the file with the same audio settings that you used in source file.
Here's my code that finally worked. It was partly taken from this question Saving Audio After Effect in iOS.
func playSound() {
let rate: Float? = effect.speed
let pitch: Float? = effect.pitch
let echo: Bool? = effect.echo
let reverb: Bool? = effect.reverb
// initialize audio engine components
audioEngine = AVAudioEngine()
// node for playing audio
audioPlayerNode = AVAudioPlayerNode()
audioEngine.attach(audioPlayerNode)
// node for adjusting rate/pitch
let changeRatePitchNode = AVAudioUnitTimePitch()
if let pitch = pitch {
changeRatePitchNode.pitch = pitch
}
if let rate = rate {
changeRatePitchNode.rate = rate
}
audioEngine.attach(changeRatePitchNode)
// node for echo
let echoNode = AVAudioUnitDistortion()
echoNode.loadFactoryPreset(.multiEcho1)
audioEngine.attach(echoNode)
// node for reverb
let reverbNode = AVAudioUnitReverb()
reverbNode.loadFactoryPreset(.cathedral)
reverbNode.wetDryMix = 50
audioEngine.attach(reverbNode)
// connect nodes
if echo == true && reverb == true {
connectAudioNodes(audioPlayerNode, changeRatePitchNode, echoNode, reverbNode, audioEngine.mainMixerNode, audioEngine.outputNode)
} else if echo == true {
connectAudioNodes(audioPlayerNode, changeRatePitchNode, echoNode, audioEngine.mainMixerNode, audioEngine.outputNode)
} else if reverb == true {
connectAudioNodes(audioPlayerNode, changeRatePitchNode, reverbNode, audioEngine.mainMixerNode, audioEngine.outputNode)
} else {
connectAudioNodes(audioPlayerNode, changeRatePitchNode, audioEngine.mainMixerNode, audioEngine.outputNode)
}
// schedule to play and start the engine!
audioPlayerNode.stop()
audioPlayerNode.scheduleFile(audioFile, at: nil) {
var delayInSeconds: Double = 0
if let lastRenderTime = self.audioPlayerNode.lastRenderTime, let playerTime = self.audioPlayerNode.playerTime(forNodeTime: lastRenderTime) {
if let rate = rate {
delayInSeconds = Double(self.audioFile.length - playerTime.sampleTime) / Double(self.audioFile.processingFormat.sampleRate) / Double(rate)
} else {
delayInSeconds = Double(self.audioFile.length - playerTime.sampleTime) / Double(self.audioFile.processingFormat.sampleRate)
}
}
// schedule a stop timer for when audio finishes playing
self.stopTimer = Timer(timeInterval: delayInSeconds, target: self, selector: #selector(EditViewController.stopAudio), userInfo: nil, repeats: false)
RunLoop.main.add(self.stopTimer!, forMode: RunLoop.Mode.default)
}
do {
try audioEngine.start()
} catch {
showAlert(Alerts.AudioEngineError, message: String(describing: error))
return
}
//Try to save
let dirPaths: String = (NSSearchPathForDirectoriesInDomains(.libraryDirectory, .userDomainMask, true)[0]) + "/sounds/"
let tmpFileUrl = URL(fileURLWithPath: dirPaths + "effected.caf")
//Save the tmpFileUrl into global varibale to not lose it (not important if you want to do something else)
filteredOutputURL = URL(fileURLWithPath: filePath)
do{
print(dirPaths)
let settings = [AVSampleRateKey : NSNumber(value: Float(44100.0)),
AVFormatIDKey : NSNumber(value: Int32(kAudioFormatMPEG4AAC)),
AVNumberOfChannelsKey : NSNumber(value: 1),
AVEncoderAudioQualityKey : NSNumber(value: Int32(AVAudioQuality.medium.rawValue))]
self.newAudio = try! AVAudioFile(forWriting: tmpFileUrl as URL, settings: settings)
let length = self.audioFile.length
audioEngine.mainMixerNode.installTap(onBus: 0, bufferSize: 4096, format: nil) {
(buffer: AVAudioPCMBuffer?, time: AVAudioTime!) -> Void in
//Let us know when to stop saving the file, otherwise saving infinitely
if (self.newAudio.length) <= length {
do{
try self.newAudio.write(from: buffer!)
} catch _{
print("Problem Writing Buffer")
}
} else {
//if we dont remove it, will keep on tapping infinitely
self.audioEngine.mainMixerNode.removeTap(onBus: 0)
}
}
}
// play the recording!
audioPlayerNode.play()
}
#objc func stopAudio() {
if let audioPlayerNode = audioPlayerNode {
let engine = audioEngine
audioPlayerNode.stop()
engine?.mainMixerNode.removeTap(onBus: 0)
}
if let stopTimer = stopTimer {
stopTimer.invalidate()
}
configureUI(.notPlaying)
if let audioEngine = audioEngine {
audioEngine.stop()
audioEngine.reset()
}
isPlaying = false
}
Related
I am building an app that needs to perform analysis on the audio it receives from the microphone in real time. In my app, I also need to play a beep sound and start recording audio at the same time, in other words, I can't play the beep sound and then start recording. This introduces the problem of hearing the beep sound in my recording, (this might be because I am playing the beep sound through the speaker, but unfortunately I cannot compromise in this regard either). Since the beep sound is just a tone of about 2350 kHz, I was wondering how I could exclude that range of frequencies (say from 2300 kHz to 2400 kHz) in my recordings and prevent it from influencing my audio samples. After doing some googling I came up with what I think might be the solution, a band stop filter. According to Wikipedia: "a band-stop filter or band-rejection filter is a filter that passes most frequencies unaltered, but attenuates those in a specific range to very low levels". This seems like what I need to to exclude frequencies from 2300 kHz to 2400 kHz in my recordings (or at least for the first second of the recording while the beep sound is playing). My question is: how would I implement this with AVAudioEngine? Is there a way I can turn off the filter after the first second of the recording when the beep sound is done playing without stopping the recording?
Since I am new to working with audio with AVAudioEngine (I've always just stuck to the higher levels of AVFoundation) I followed this tutorial to help me create a class to handle all the messy stuff. This is what my code looks like:
class Recorder {
enum RecordingState {
case recording, paused, stopped
}
private var engine: AVAudioEngine!
private var mixerNode: AVAudioMixerNode!
private var state: RecordingState = .stopped
private var audioPlayer = AVAudioPlayerNode()
init() {
setupSession()
setupEngine()
}
fileprivate func setupSession() {
let session = AVAudioSession.sharedInstance()
//The original tutorial sets the category to .record
//try? session.setCategory(.record)
try? session.setCategory(.playAndRecord, options: [.mixWithOthers, .defaultToSpeaker])
try? session.setActive(true, options: .notifyOthersOnDeactivation)
}
fileprivate func setupEngine() {
engine = AVAudioEngine()
mixerNode = AVAudioMixerNode()
// Set volume to 0 to avoid audio feedback while recording.
mixerNode.volume = 0
engine.attach(mixerNode)
//Attach the audio player node
engine.attach(audioPlayer)
makeConnections()
// Prepare the engine in advance, in order for the system to allocate the necessary resources.
engine.prepare()
}
fileprivate func makeConnections() {
let inputNode = engine.inputNode
let inputFormat = inputNode.outputFormat(forBus: 0)
engine.connect(inputNode, to: mixerNode, format: inputFormat)
let mainMixerNode = engine.mainMixerNode
let mixerFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: inputFormat.sampleRate, channels: 1, interleaved: false)
engine.connect(mixerNode, to: mainMixerNode, format: mixerFormat)
//AudioPlayer Connection
let path = Bundle.main.path(forResource: "beep.mp3", ofType:nil)!
let url = URL(fileURLWithPath: path)
let file = try! AVAudioFile(forReading: url)
engine.connect(audioPlayer, to: mainMixerNode, format: nil)
audioPlayer.scheduleFile(file, at: nil)
}
//MARK: Start Recording Function
func startRecording() throws {
print("Start Recording!")
let tapNode: AVAudioNode = mixerNode
let format = tapNode.outputFormat(forBus: 0)
let documentURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
// AVAudioFile uses the Core Audio Format (CAF) to write to disk.
// So we're using the caf file extension.
let file = try AVAudioFile(forWriting: documentURL.appendingPathComponent("recording.caf"), settings: format.settings)
tapNode.installTap(onBus: 0, bufferSize: 4096, format: format, block: {
(buffer, time) in
try? file.write(from: buffer)
print(buffer.description)
print(buffer.stride)
let floatArray = Array(UnsafeBufferPointer(start: buffer.floatChannelData![0], count:Int(buffer.frameLength)))
})
try engine.start()
audioPlayer.play()
state = .recording
}
//MARK: Other recording functions
func resumeRecording() throws {
try engine.start()
state = .recording
}
func pauseRecording() {
engine.pause()
state = .paused
}
func stopRecording() {
// Remove existing taps on nodes
mixerNode.removeTap(onBus: 0)
engine.stop()
state = .stopped
}
}
AVAudioUnitEQ supports a band-stop filter.
Perhaps something like:
// Create an instance of AVAudioUnitEQ and connect it to the engine's main mixer
let eq = AVAudioUnitEQ(numberOfBands: 1)
engine.attach(eq)
engine.connect(eq, to: engine.mainMixerNode, format: nil)
engine.connect(player, to: eq, format: nil)
eq.bands[0].frequency = 2350
eq.bands[0].filterType = .bandStop
eq.bands[0].bypass = false
A slightly more complete answer, linked to an IBAction; in this example, I use .parametric for the filter type, with more bands than required, to give a broader insight on how to use it:
#IBAction func PlayWithEQ(_ sender: Any) {
self.engine.stop()
self.engine = AVAudioEngine()
let player = AVAudioPlayerNode()
let url = Bundle.main.url(forResource:"yoursong", withExtension: "m4a")!
let f = try! AVAudioFile(forReading: url)
self.engine.attach(player)
// adding eq effect node
let effect = AVAudioUnitEQ(numberOfBands: 4)
let bands = effect.bands
let freq = [125, 250, 2350, 8000]
for i in 0...(bands.count - 1) {
bands[i].frequency = Float(freq[i])
}
bands[0].gain = 0.0
bands[0].filterType = .parametric
bands[0].bandwidth = 1
bands[1].gain = 0.0
bands[1].filterType = .parametric
bands[1].bandwidth = 0.5
// filter of interest, rejecting 2350Hz (adjust bandwith as needed)
bands[2].gain = -60.0
bands[2].filterType = .parametric
bands[2].bandwidth = 1
bands[3].gain = 0.0
bands[3].filterType = .parametric
bands[3].bandwidth = 1
self.engine.attach(effect)
self.engine.connect(player, to: effect, format: f.processingFormat)
let mixer = self.engine.mainMixerNode
self.engine.connect(effect, to: mixer, format: f.processingFormat)
player.scheduleFile(f, at: nil) {
delay(0.05) {
if self.engine.isRunning {
self.engine.stop()
}
}
}
self.engine.prepare()
try! self.engine.start()
player.play()
}
I'm writing a first in first out recording app that buffers up to 2.5 mins of audio using AudioQueue. I've got most of it figured out but I'm at a roadblock trying to crop audio data.
I've seen people do it with AVAssetExportSession but it seems like it wouldn't be performant to export a new track every time the AudioQueueInputCallback is called.
I'm not married to using AVAssestExportSession by any means if anyone has a better idea.
Here's where I'm doing my write and was hoping to execute the crop.
var beforeSeconds = TimeInterval() // find the current estimated duration (not reliable)
var propertySize = UInt32(MemoryLayout.size(ofValue: beforeSeconds))
var osStatus = AudioFileGetProperty(audioRecorder.recordFile!, kAudioFilePropertyEstimatedDuration, &propertySize, &beforeSeconds)
if numPackets > 0 {
AudioFileWritePackets(audioRecorder.recordFile!, // write to disk
false,
buffer.mAudioDataByteSize,
packetDescriptions,
audioRecorder.recordPacket,
&numPackets,
buffer.mAudioData)
audioRecorder.recordPacket += Int64(numPackets) // up the packet index
var afterSeconds = TimeInterval() // find the after write estimated duration (not reliable)
var propertySize = UInt32(MemoryLayout.size(ofValue: afterSeconds))
var osStatus = AudioFileGetProperty(audioRecorder.recordFile!, kAudioFilePropertyEstimatedDuration, &propertySize, &afterSeconds)
assert(osStatus == noErr, "couldn't get record time")
if afterSeconds >= 150.0 {
print("hit max buffer!")
audioRecorder.onBufferMax?(afterSeconds - beforeSeconds)
}
}
Here's where the callback is executed
func onBufferMax(_ difference: Double){
let asset = AVAsset(url: tempFilePath)
let duration = CMTimeGetSeconds(asset.duration)
guard duration >= 150.0 else { return }
guard let exporter = AVAssetExportSession(asset: asset, presetName: AVAssetExportPresetAppleM4A) else {
print("exporter init failed")
return }
exporter.outputURL = getDocumentsDirectory().appendingPathComponent("buffered.caf") // helper function that calls the FileManager
exporter.outputFileType = AVFileTypeAppleM4A
let startTime = CMTimeMake(Int64(difference), 1)
let endTime = CMTimeMake(Int64(WYNDRConstants.maxTimeInterval + difference), 1)
exporter.timeRange = CMTimeRangeFromTimeToTime(startTime, endTime)
exporter.exportAsynchronously(completionHandler: {
switch exporter.status {
case .failed:
print("failed to export")
case .cancelled:
print("canceled export")
default:
print("export successful")
}
})
}
A ring buffer is a useful structure for storing, either in memory or on disk, the most recent n seconds of audio. Here is a simple solution that stores the audio in memory, presented in the traditional UIViewController format.
N.B 2.5 minutes of 44.1kHz audio stored as floats requires about 26MB of RAM, which is on the heavy side for a mobile device.
import AVFoundation
class ViewController: UIViewController {
let engine = AVAudioEngine()
var requiredSamples: AVAudioFrameCount = 0
var ringBuffer: [AVAudioPCMBuffer] = []
var ringBufferSizeInSamples: AVAudioFrameCount = 0
func startRecording() {
let input = engine.inputNode!
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
requiredSamples = AVAudioFrameCount(inputFormat.sampleRate * 2.5 * 60)
input.installTap(onBus: bus, bufferSize: 512, format: inputFormat) { (buffer, time) -> Void in
self.appendAudioBuffer(buffer)
}
try! engine.start()
}
func appendAudioBuffer(_ buffer: AVAudioPCMBuffer) {
ringBuffer.append(buffer)
ringBufferSizeInSamples += buffer.frameLength
// throw away old buffers if ring buffer gets too large
if let firstBuffer = ringBuffer.first {
if ringBufferSizeInSamples - firstBuffer.frameLength >= requiredSamples {
ringBuffer.remove(at: 0)
ringBufferSizeInSamples -= firstBuffer.frameLength
}
}
}
func stopRecording() {
engine.stop()
let url = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first!.appendingPathComponent("foo.m4a")
let settings: [String : Any] = [AVFormatIDKey: Int(kAudioFormatMPEG4AAC)]
// write ring buffer to file.
let file = try! AVAudioFile(forWriting: url, settings: settings)
for buffer in ringBuffer {
try! file.write(from: buffer)
}
}
override func viewDidLoad() {
super.viewDidLoad()
// example usage
startRecording()
DispatchQueue.main.asyncAfter(deadline: .now() + 4*60) {
print("stopping")
self.stopRecording()
}
}
}
I am working with Swift's AVFoundation to launch ultrasonic sinewave and my approach is to play a .wav file. I wonder if there's approach to play the sound continuously instead of using a extra .wav file.
Here follows my code but I don't think the new code will be similar to this:
let myThread = Thread(target: self,
selector: #selector(ZouViewController.play()),
object: nil)
myThread.start()
[...]
func play(){
//rewrite soom
let fileName = Bundle.main.path(forResource: "19kHz", ofType: "wav")
let url = URL(fileURLWithPath: fileName!)
soundPlayer = try? AVAudioPlayer(contentsOf: url)
while true{
soundPlayer?.play()
}
}
The file 19kHz.wav is a sound file playing ultrasonic sinewave at frequency 19kHz, but its duration is not unavoidable. so there would be a sudden change at the begin of sound signal every loop when it is played again. So I want to abandon that approach and try to play the data continuously from a buffer. Is there any way to play a sound signal in a buffer?
I have addressed it by following code:
import Foundation
import AVFoundation
class PlaySineWave{
var audioEngine = AVAudioEngine()
var audioFormat : AVAudioFormat
let FL: AVAudioFrameCount = 44100
let freq : Float = 19000 //19kHz
var pcmBuffer : AVAudioPCMBuffer
init() {
self.audioFormat = AVAudioFormat(standardFormatWithSampleRate: 44100.0, channels: 1)
self.pcmBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat,
frameCapacity:AVAudioFrameCount(FL))
self.pcmBuffer.frameLength = AVAudioFrameCount(FL)
}
func play(){
let floatData = self.pcmBuffer.floatChannelData!.pointee
let step = 2 * Float.pi/Float(FL)
for i in 0 ..< Int(FL) {
floatData[i] = 0.3 * sinf(freq * Float(i) * step)
}
let playerNode = AVAudioPlayerNode()
self.audioEngine.attach(playerNode)
audioEngine.connect(playerNode, to: audioEngine.mainMixerNode,format: pcmBuffer.format)
do {
try audioEngine.start()
} catch let err as NSError {
print("Oh, no! \(err.code) \(err.domain)")
}
playerNode.play()
playerNode.scheduleBuffer(pcmBuffer, at:nil, options: [.loops]) { }
//audioEngine.stop()
}
}
After defined the class, In the ViewController it was called as
override func viewDidLoad() {
[...]
let myThread =
Thread(target:self,selector:#selector(SpectralViewController.play),
object:nil)
myThread.start()
}
[...]
func play(){
var Player = PlaySineWave()
Player.play()
}
i am developing an applicatoin so that people can record and change their voices thru app and share it . Basically i so many things and now its time to ask you to help . Here is my play function which plays recorded audio file and adds effects on it .
private func playAudio(pitch : Float, rate: Float, reverb: Float, echo: Float) {
// Initialize variables
audioEngine = AVAudioEngine()
audioPlayerNode = AVAudioPlayerNode()
audioEngine.attachNode(audioPlayerNode)
// Setting the pitch
let pitchEffect = AVAudioUnitTimePitch()
pitchEffect.pitch = pitch
audioEngine.attachNode(pitchEffect)
// Setting the platback-rate
let playbackRateEffect = AVAudioUnitVarispeed()
playbackRateEffect.rate = rate
audioEngine.attachNode(playbackRateEffect)
// Setting the reverb effect
let reverbEffect = AVAudioUnitReverb()
reverbEffect.loadFactoryPreset(AVAudioUnitReverbPreset.Cathedral)
reverbEffect.wetDryMix = reverb
audioEngine.attachNode(reverbEffect)
// Setting the echo effect on a specific interval
let echoEffect = AVAudioUnitDelay()
echoEffect.delayTime = NSTimeInterval(echo)
audioEngine.attachNode(echoEffect)
// Chain all these up, ending with the output
audioEngine.connect(audioPlayerNode, to: playbackRateEffect, format: nil)
audioEngine.connect(playbackRateEffect, to: pitchEffect, format: nil)
audioEngine.connect(pitchEffect, to: reverbEffect, format: nil)
audioEngine.connect(reverbEffect, to: echoEffect, format: nil)
audioEngine.connect(echoEffect, to: audioEngine.outputNode, format: nil)
audioPlayerNode.stop()
let length = 4000
let buffer = AVAudioPCMBuffer(PCMFormat: audioPlayerNode.outputFormatForBus(0),frameCapacity:AVAudioFrameCount(length))
buffer.frameLength = AVAudioFrameCount(length)
try! audioEngine.start()
let dirPaths: AnyObject = NSSearchPathForDirectoriesInDomains( NSSearchPathDirectory.DocumentDirectory, NSSearchPathDomainMask.UserDomainMask, true)[0]
let tmpFileUrl: NSURL = NSURL.fileURLWithPath(dirPaths.stringByAppendingPathComponent("effectedSound.m4a"))
do{
print(dirPaths)
let settings = [AVFormatIDKey: NSNumber(unsignedInt: kAudioFormatMPEG4AAC), AVSampleRateKey: NSNumber(integer: 44100), AVNumberOfChannelsKey: NSNumber(integer: 2)]
self.newAudio = try AVAudioFile(forWriting: tmpFileUrl, settings: settings)
audioEngine.outputNode.installTapOnBus(0, bufferSize: (AVAudioFrameCount(self.player!.duration)), format: self.audioPlayerNode.outputFormatForBus(0)){
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) in
print(self.newAudio.length)
print("=====================")
print(self.audioFile.length)
print("**************************")
if (self.newAudio.length) < (self.audioFile.length){
do{
//print(buffer)
try self.newAudio.writeFromBuffer(buffer)
}catch _{
print("Problem Writing Buffer")
}
}else{
self.audioPlayerNode.removeTapOnBus(0)
}
}
}catch _{
print("Problem")
}
audioPlayerNode.play()
}
I guess the problem is i am installTapOnBus to audioPlayerNode but the effected audio is on audioEngine.outputNode .However i tried to installTapOnBus to audioEngine.outputNode but it gives me error.Also i've tried to connect effects to audioEngine.mixerNode but it also not a solution . So that do you have any experiences on saving effected audio file ? How can i get this effected audio?
Any help is appreciated
Thank you
Here it is my solution to question :
func playAndRecord(pitch : Float, rate: Float, reverb: Float, echo: Float) {
// Initialize variables
// These are global variables . if you want you can just (let audioEngine = etc ..) init here these variables
audioEngine = AVAudioEngine()
audioPlayerNode = AVAudioPlayerNode()
audioEngine.attachNode(audioPlayerNode)
playerB = AVAudioPlayerNode()
audioEngine.attachNode(playerB)
// Setting the pitch
let pitchEffect = AVAudioUnitTimePitch()
pitchEffect.pitch = pitch
audioEngine.attachNode(pitchEffect)
// Setting the platback-rate
let playbackRateEffect = AVAudioUnitVarispeed()
playbackRateEffect.rate = rate
audioEngine.attachNode(playbackRateEffect)
// Setting the reverb effect
let reverbEffect = AVAudioUnitReverb()
reverbEffect.loadFactoryPreset(AVAudioUnitReverbPreset.Cathedral)
reverbEffect.wetDryMix = reverb
audioEngine.attachNode(reverbEffect)
// Setting the echo effect on a specific interval
let echoEffect = AVAudioUnitDelay()
echoEffect.delayTime = NSTimeInterval(echo)
audioEngine.attachNode(echoEffect)
// Chain all these up, ending with the output
audioEngine.connect(audioPlayerNode, to: playbackRateEffect, format: nil)
audioEngine.connect(playbackRateEffect, to: pitchEffect, format: nil)
audioEngine.connect(pitchEffect, to: reverbEffect, format: nil)
audioEngine.connect(reverbEffect, to: echoEffect, format: nil)
audioEngine.connect(echoEffect, to: audioEngine.mainMixerNode, format: nil)
// Good practice to stop before starting
audioPlayerNode.stop()
// Play the audio file
// this player is also a global variable AvAudioPlayer
if(player != nil){
player?.stop()
}
// audioFile here is our original audio
audioPlayerNode.scheduleFile(audioFile, atTime: nil, completionHandler: {
print("Complete")
})
try! audioEngine.start()
let dirPaths: AnyObject = NSSearchPathForDirectoriesInDomains( NSSearchPathDirectory.DocumentDirectory, NSSearchPathDomainMask.UserDomainMask, true)[0]
let tmpFileUrl: NSURL = NSURL.fileURLWithPath(dirPaths.stringByAppendingPathComponent("effectedSound2.m4a"))
//Save the tmpFileUrl into global varibale to not lose it (not important if you want to do something else)
filteredOutputURL = tmpFileUrl
do{
print(dirPaths)
self.newAudio = try! AVAudioFile(forWriting: tmpFileUrl, settings: [
AVFormatIDKey: NSNumber(unsignedInt:kAudioFormatAppleLossless),
AVEncoderAudioQualityKey : AVAudioQuality.Low.rawValue,
AVEncoderBitRateKey : 320000,
AVNumberOfChannelsKey: 2,
AVSampleRateKey : 44100.0
])
let length = self.audioFile.length
audioEngine.mainMixerNode.installTapOnBus(0, bufferSize: 1024, format: self.audioEngine.mainMixerNode.inputFormatForBus(0)) {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
print(self.newAudio.length)
print("=====================")
print(length)
print("**************************")
if (self.newAudio.length) < length {//Let us know when to stop saving the file, otherwise saving infinitely
do{
//print(buffer)
try self.newAudio.writeFromBuffer(buffer)
}catch _{
print("Problem Writing Buffer")
}
}else{
self.audioEngine.mainMixerNode.removeTapOnBus(0)//if we dont remove it, will keep on tapping infinitely
//DO WHAT YOU WANT TO DO HERE WITH EFFECTED AUDIO
}
}
}catch _{
print("Problem")
}
audioPlayerNode.play()
}
This doesn't seem to be hooked up correctly. I'm just learning all this myself, but I found that the effects are correctly added when you connect them to a mixer node. Also, you'll want to tap the mixer, not the engine output node. I've just copied your code and made a few modifications to take this into account.
private func playAudio(pitch : Float, rate: Float, reverb: Float, echo: Float) {
// Initialize variables
audioEngine = AVAudioEngine()
audioPlayerNode = AVAudioPlayerNode()
audioEngine.attachNode(audioPlayerNode)
// Setting the pitch
let pitchEffect = AVAudioUnitTimePitch()
pitchEffect.pitch = pitch
audioEngine.attachNode(pitchEffect)
// Setting the playback-rate
let playbackRateEffect = AVAudioUnitVarispeed()
playbackRateEffect.rate = rate
audioEngine.attachNode(playbackRateEffect)
// Setting the reverb effect
let reverbEffect = AVAudioUnitReverb()
reverbEffect.loadFactoryPreset(AVAudioUnitReverbPreset.Cathedral)
reverbEffect.wetDryMix = reverb
audioEngine.attachNode(reverbEffect)
// Setting the echo effect on a specific interval
let echoEffect = AVAudioUnitDelay()
echoEffect.delayTime = NSTimeInterval(echo)
audioEngine.attachNode(echoEffect)
// Set up a mixer node
let audioMixer = AVAudioMixerNode()
audioEngine.attachNode(audioMixer)
// Chain all these up, ending with the output
audioEngine.connect(audioPlayerNode, to: playbackRateEffect, format: nil)
audioEngine.connect(playbackRateEffect, to: pitchEffect, format: nil)
audioEngine.connect(pitchEffect, to: reverbEffect, format: nil)
audioEngine.connect(reverbEffect, to: echoEffect, format: nil)
audioEngine.connect(echoEffect, to: audioMixer, format: nil)
audioEngine.connect(audioMixer, to: audioEngine.outputNode, format: nil)
audioPlayerNode.stop()
let length = 4000
let buffer = AVAudioPCMBuffer(PCMFormat: audioPlayerNode.outputFormatForBus(0),frameCapacity:AVAudioFrameCount(length))
buffer.frameLength = AVAudioFrameCount(length)
try! audioEngine.start()
let dirPaths: AnyObject = NSSearchPathForDirectoriesInDomains( NSSearchPathDirectory.DocumentDirectory, NSSearchPathDomainMask.UserDomainMask, true)[0]
let tmpFileUrl: NSURL = NSURL.fileURLWithPath(dirPaths.stringByAppendingPathComponent("effectedSound.m4a"))
do{
print(dirPaths)
let settings = [AVFormatIDKey: NSNumber(unsignedInt: kAudioFormatMPEG4AAC), AVSampleRateKey: NSNumber(integer: 44100), AVNumberOfChannelsKey: NSNumber(integer: 2)]
self.newAudio = try AVAudioFile(forWriting: tmpFileUrl, settings: settings)
audioMixer.installTapOnBus(0, bufferSize: (AVAudioFrameCount(self.player!.duration)), format: self.audioMixer.outputFormatForBus(0)){
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) in
print(self.newAudio.length)
print("=====================")
print(self.audioFile.length)
print("**************************")
if (self.newAudio.length) < (self.audioFile.length){
do{
//print(buffer)
try self.newAudio.writeFromBuffer(buffer)
}catch _{
print("Problem Writing Buffer")
}
}else{
self.audioMixer.removeTapOnBus(0)
}
}
}catch _{
print("Problem")
}
audioPlayerNode.play()
}
I also had trouble getting the file formatted properly. I finally got it working when I changed my path of the output file from m4a to caf. One other suggestion is to not have nil for the format parameter. I used the audioFile.processingFormat. I hope this helps. My audio effects/mixing is functional, although I did not chain my effects. So feel free to ask questions.
just change the parameter unsigned int from kAudioFormatMPEG4AAC to kAudioFormatLinearPCM and also change file type to .caf it will sure helpfull my friend
For anyone who have the problem of having to play the audio file TWICE to save it, i just added the following line at the respective place and it solved my problem.
might help someone in the future.
P.S: I used the EXACT same code as the checked Answer from above, just added this one line and solved my problem.
//Do what you want to do here with effected Audio
self.newAudio = try! AVAudioFile(forReading: tmpFileUrl)
We can use a certain way to adjust the voices such as: aliens, men, old people, robots, children, ....
and has a playback counter
var delayInSecond: Double = 0
if let lastRenderTime = self.audioPlayerNode.lastRenderTime, let playerTime = self.audioPlayerNode.playerTime(forNodeTime: lastRenderTime)
{
if let rate = rate {
delayInSecond = Double(self.audioFile.length - playerTime.sampleTime) / Double(self.audioFile.processingFormat.sampleRate) / Double(rate)
}else{
delayInSecond = Double(self.audioFile.length - playerTime.sampleTime) / Double(self.audioFile.processingFormat.sampleRate)
}
//schedule a stop timer for when audio finishes playing
self.stopTimer = Timer(timeInterval: delayInSecond, target: self, selector: #selector(stopPlay), userInfo: nil, repeats: true)
RunLoop.main.add(self.stopTimer, forMode: .default)
}
I got this after I add
self.newAudio = try! AVAudioFile(forReading: tmpFileUrl)
return like this
Error
Domain=com.apple.coreaudio.avfaudio
Code=1685348671 "(null)" UserInfo={failed
call=ExtAudioFileOpenURL((CFURLRef)fileUR
L, &_extAudioFile)}
I want to write a simple app that 'does something' when the sound level at the mic reaches a certain level, showing the audio input levels for extra credit
cant find any examples in swift that get to this -- dont want to record, just monitor
have been checking out the docs on the AVFoundation classes but cant get off the ground
thanks
Let you can use below code :
func initalizeRecorder ()
{
do {
try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayAndRecord)
try AVAudioSession.sharedInstance().setActive(true)
}catch{
print(error);
}
let stringDir:NSString = self.getDocumentsDirectory();
let audioFilename = stringDir.stringByAppendingPathComponent("recording.m4a")
let audioURL = NSURL(fileURLWithPath: audioFilename)
print("File Path : \(audioFilename)");
// make a dictionary to hold the recording settings so we can instantiate our AVAudioRecorder
let settings = [
AVFormatIDKey: Int(kAudioFormatMPEG4AAC),
AVSampleRateKey: 12000.0,
AVNumberOfChannelsKey: 1 as NSNumber,
AVEncoderBitRateKey:12800 as NSNumber,
AVLinearPCMBitDepthKey:16 as NSNumber,
AVEncoderAudioQualityKey: AVAudioQuality.High.rawValue
]
do {
if audioRecorder == nil
{
audioRecorder = try AVAudioRecorder(URL: audioURL, settings: settings )
audioRecorder!.delegate = self
audioRecorder!.prepareToRecord();
audioRecorder!.meteringEnabled = true;
}
audioRecorder!.recordForDuration(NSTimeInterval(5.0));
} catch {
print("Error")
}
}
//GET DOCUMENT DIR PATH
func getDocumentsDirectory() -> String {
let paths = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)
let documentsDirectory = paths[0]
return documentsDirectory
}
////START RECORDING
#IBAction func btnStartPress(sender: AnyObject) {
recordingSession = AVAudioSession.sharedInstance()
do {
recordingSession.requestRecordPermission() { [unowned self] (allowed: Bool) -> Void in
dispatch_async(dispatch_get_main_queue()) {
if allowed {
print("Allowd Permission Record!!")
self.initalizeRecorder ()
self.audioRecorder!.record()
//instantiate a timer to be called with whatever frequency we want to grab metering values
self.levelTimer = NSTimer.scheduledTimerWithTimeInterval(0.02, target: self, selector: Selector("levelTimerCallback"), userInfo: nil, repeats: true)
} else {
// failed to record!
self.showPermissionAlert();
print("Failed Permission Record!!")
}
}
}
} catch {
// failed to record!
print("Failed Permission Record!!")
}
}
//This selector/function is called every time our timer (levelTime) fires
func levelTimerCallback() {
//we have to update meters before we can get the metering values
if audioRecorder != nil
{
audioRecorder!.updateMeters()
let ALPHA : Double = 0.05;
let peakPowerForChannel : Double = pow(Double(10.0), (0.05) * Double(audioRecorder!.peakPowerForChannel(0)));
lowPassResults = ALPHA * peakPowerForChannel + Double((1.0) - ALPHA) * lowPassResults;
print("low pass res = \(lowPassResults)");
if (lowPassResults > 0.7 ){
print("Mic blow detected");
}
}
}
//STOP RECORDING
#IBAction func btnStopPress(sender: AnyObject) {
if audioRecorder != nil
{
audioRecorder!.stop()
self.levelTimer.invalidate()
}
}
In AVAudioRecorder you can "record audio" (you don't have to save it) and set meteringEnabled to use the function peakPowerForChannel(_:)
It will
Returns the peak power for a given channel, in decibels, for the sound being recorded.
This link may provide a sample code.
Let me know if it help you.