AVAudioSession.sharedInstance().setCategory(.playAndRecord, mode: .default)
I can record using the above code.
However, during playback, only the speakers at the top of the iPhone produce sound.
So I would like to add defaultToSpeaker to options.
However, adding it does not record.
Is there a solution?
Please help me.
Here is the code I wrote.
final class RecordEngine {
private var engine: AVAudioEngine!
private var mixer: AVAudioMixerNode!
private var player: AVAudioPlayerNode!
private var outputFile: AVAudioFile!
let session = AVAudioSession.sharedInstance()
init() {
prepareAVAudioSession()
prepareNodes()
prepare()
}
func start() {
try! engine.start()
}
func stop() {
engine.pause()
engine.reset()
}
private func prepareAVAudioSession() {
do {
// try session.setCategory(.playAndRecord, mode: .default, options: [.allowBluetooth, .allowBluetoothA2DP])
try session.setCategory(.playAndRecord, mode: .default, options: .defaultToSpeaker)
try session.setActive(true)
} catch {
}
}
private func prepareNodes() {
engine = AVAudioEngine()
mixer = AVAudioMixerNode()
player = AVAudioPlayerNode()
engine.attach(mixer)
engine.attach(player)
}
private func prepare() {
let input = engine.inputNode
let mainMixer = engine.mainMixerNode
let format = input.outputFormat(forBus: 0)
engine.connect(player, to: mainMixer, format: format)
engine.connect(input, to: mixer, format: format)
engine.prepare()
}
func startRecord() {
start()
let format = mixer.outputFormat(forBus: 0)
let outputFileURL = URL(string: NSTemporaryDirectory() + "temp.caf")!
do {
outputFile = try AVAudioFile(forWriting: outputFileURL, settings: format.settings)
} catch {
print(error)
}
mixer.installTap(onBus: 0, bufferSize: 1024, format: format) { [weak self] buffer, when in
do {
try self?.outputFile.write(from: buffer)
print(buffer)
} catch {
print(error)
}
}
}
func stopRecord() {
mixer.removeTap(onBus: 0)
print(outputFile)
stop()
}
func startPlaying() {
start()
player.scheduleFile(outputFile, at: nil) {
print("complete")
}
player.play()
}
func pausePlaying() {
player.pause()
}
}
The problem isn't defaultToSpeaker but rather the mixer setup. You've got a mixer (that you install a tap on) with no output and also mainMixerNode.
Do you need both mixers? If so, you could connect mixer to mainMixer:
engine.connect(mixer, to: mainMixer, format: format)
or remove mixer and use mainMixer everywhere, or some 3rd option that doesn't produce feedback.
Related
I am using shazamkit package to recognize sound in flutter. android version works perfect but in iOS version when I start to use get this error:
ERROR: [0x190bf000] >avae> AVAudioNode.mm:568: CreateRecordingTap: required condition is false: IsFormatSampleRateAndChannelCountValid(format)
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: IsFormatSampleRateAndChannelCountValid(format)'
this is my swift code :
import Flutter
import UIKit
import ShazamKit
import AudioToolbox
public class SwiftFlutterShazamKitPlugin: NSObject, FlutterPlugin {
private var session: SHSession?
private let audioEngine = AVAudioEngine()
private let playerNode = AVAudioPlayerNode()
private let mixerNode = AVAudioMixerNode()
private var callbackChannel: FlutterMethodChannel?
private var sampleRate = 44800
public static func register(with registrar: FlutterPluginRegistrar) {
let channel = FlutterMethodChannel(name: "flutter_shazam_kit", binaryMessenger: registrar.messenger())
let instance = SwiftFlutterShazamKitPlugin(callbackChannel: FlutterMethodChannel(name: "flutter_shazam_kit_callback", binaryMessenger: registrar.messenger()))
registrar.addMethodCallDelegate(instance, channel: channel)
}
init(callbackChannel: FlutterMethodChannel? = nil) {
self.callbackChannel = callbackChannel
}
public func handle(_ call: FlutterMethodCall, result: #escaping FlutterResult) {
switch call.method {
case "configureShazamKitSession":
let args = call.arguments as! Dictionary<String, Any>
configureShazamKitSession(
customCatalogPath: args["customCatalogPath"] as? String,
sampleRate: args["sampleRate"] as! Int
)
result(nil)
case "startDetectionWithMicrophone":
do{
configureAudio()
try startListening(result: result)
}catch{
callbackChannel?.invokeMethod("didHasError", arguments: error.localizedDescription)
}
case "endDetectionWithMicrophone":
stopListening()
result(nil)
case "endSession":
session = nil
result(nil)
default:
result(nil)
}
}
}
//MARK: ShazamKit session delegation here
//MARK: Methods for AVAudio
extension SwiftFlutterShazamKitPlugin {
func configureShazamKitSession(customCatalogPath: String?, sampleRate: Int) {
self.sampleRate = sampleRate
do {
if session == nil {
if (customCatalogPath == nil) {
session = SHSession()
} else {
let documentsUrl = FileManager.default.urls(
for: .documentDirectory,
in: .userDomainMask
).first!
let catalog = SHCustomCatalog()
try catalog.add(from: URL(fileURLWithPath: customCatalogPath!))
session = SHSession(catalog: catalog)
}
session?.delegate = self
}
} catch let error {
callbackChannel?.invokeMethod("didHasError",
arguments: "configureShazamKitSession() failed")
}
}
func addAudio(buffer: AVAudioPCMBuffer, audioTime: AVAudioTime) {
// Add the audio to the current match request
session?.matchStreamingBuffer(buffer, at: audioTime)
}
func configureAudio() {
playerNode.stop()
audioEngine.stop()
let inputFormat = audioEngine.inputNode.inputFormat(forBus: 0)
// Set an output format compatible with ShazamKit.
let outputFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1)
// Create a mixer node to convert the input.
audioEngine.attach(mixerNode)
// Attach the mixer to the microphone input and the output of the audio engine.
audioEngine.connect(audioEngine.inputNode, to: mixerNode, format: inputFormat)
// audioEngine.connect(mixerNode, to: audioEngine.outputNode, format: outputFormat)
// Install a tap on the mixer node to capture the microphone audio.
mixerNode.installTap(onBus: 0,
bufferSize: 8192,
format: outputFormat) { buffer, audioTime in
// Add captured audio to the buffer used for making a match.
self.addAudio(buffer: buffer, audioTime: audioTime)
}
}
func startListening(result: FlutterResult) throws {
guard session != nil else{
callbackChannel?.invokeMethod("didHasError", arguments: "ShazamSession not found, please call configureShazamKitSession() first to initialize it.")
result(nil)
return
}
callbackChannel?.invokeMethod("detectStateChanged", arguments: 1)
// Throw an error if the audio engine is already running.
guard !audioEngine.isRunning else {
callbackChannel?.invokeMethod("didHasError", arguments: "Audio engine is currently running, please stop the audio engine first and then try again")
return
}
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
return false
}
// Ask the user for permission to use the mic if required then start the engine.
try audioSession.setCategory(.playAndRecord)
audioSession.requestRecordPermission { [weak self] success in
guard success else {
self?.callbackChannel?.invokeMethod("didHasError", arguments: "Recording permission not found, please allow permission first and then try again")
return
}
do{
self?.audioEngine.prepare()
try self?.audioEngine.start()
}catch{
self?.callbackChannel?.invokeMethod("didHasError", arguments: "Can't start the audio engine")
}
}
result(nil)
}
func stopListening() {
callbackChannel?.invokeMethod("detectStateChanged", arguments: 0)
// Check if the audio engine is already recording.
mixerNode.removeTap(onBus: 0)
audioEngine.stop()
}
}
//MARK: Delegate methods for SHSession
extension SwiftFlutterShazamKitPlugin: SHSessionDelegate{
public func session(_ session: SHSession, didFind match: SHMatch) {
var mediaItems: [[String: Any]] = []
match.mediaItems.forEach { rawItem in
var item: [String : Any] = [:]
var count: UInt32 = 0
let properties = class_copyPropertyList(class_getSuperclass(rawItem.classForCoder), &count)
for i in 0..<count {
guard let property = properties?[Int(i)] else { continue }
let name = String(cString: property_getName(property))
if (name == "properties") {
let props = rawItem.value(forKey: name) as! NSDictionary
for property in props.allKeys {
let prop = property as! String
var val = props.value(forKey: prop)!
if (String(describing: type(of: val)) == "__NSTaggedDate") {
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSZ"
val = dateFormatter.string(from: val as! Date)
}
item[prop] = val
}
}
}
mediaItems.append(item)
free(properties)
}
do {
let jsonData = try JSONSerialization.data(withJSONObject: mediaItems)
let jsonString = String(data: jsonData, encoding: .utf8)
self.callbackChannel?.invokeMethod("matchFound", arguments: jsonString)
} catch {
callbackChannel?.invokeMethod("didHasError", arguments: "Error when trying to format data, please try again")
}
}
public func session(_ session: SHSession, didNotFindMatchFor signature: SHSignature, error: Error?) {
callbackChannel?.invokeMethod("notFound", arguments: nil)
callbackChannel?.invokeMethod("didHasError", arguments: error?.localizedDescription)
}
}
Your help is greatly appreciated
https://pub.dev/packages/flutter_shazam_kit
I am working on an audio streaming application with recording functionality for a receiver.
I got stuck at the point where the user want to record audio stream on the receiver side.
Below is my code
Initialisation
var engine = AVAudioEngine()
var recordingFile: AVAudioFile?
var audioPlayer: AVAudioPlayer?
let player = AVAudioPlayerNode()
var isRecording: Bool = false
Initialise AudioEngine
func initializeAudioEngine() {
let input = self.engine.inputNode
let format = input.inputFormat(forBus: 0)
self.engine.attach(self.player)
let mainMixerNode = self.engine.mainMixerNode
self.engine.connect(input, to:mainMixerNode, format: format)
self.engine.prepare()
do {
try self.engine.start()
self.startRecording()
} catch (let error) {
print("START FAILED", error)
}
}
Start Recording
func startRecording() {
self.createRecordingFile()
self.engine.mainMixerNode.installTap(onBus: 0,
bufferSize: 1024,
format: self.engine.mainMixerNode.outputFormat(forBus: 0)) { (buffer, time) -> Void in
do {
self.isRecording = true
try self.recordingFile?.write(from: buffer)
} catch (let error) {
print("RECORD ERROR", error);
}
return
}
}
Create Buffer
private func createBuffer(forFileNamed fileName: String) -> AVAudioPCMBuffer? {
var res: AVAudioPCMBuffer?
if let fileURL = Bundle.main.url(forResource: fileName, withExtension: "caf") {
do {
let file = try AVAudioFile(forReading: fileURL)
res = AVAudioPCMBuffer(pcmFormat: file.processingFormat, frameCapacity:AVAudioFrameCount(file.length))
if let _ = res {
do {
try file.read(into: res!)
} catch (let error) {
print("ERROR read file", error)
}
}
} catch (let error) {
print("ERROR file creation", error)
}
}
return res
}
Stop Recording
func stopRecording() {
self.engine.mainMixerNode.removeTap(onBus: 0)
}
I am trying to record using earphone, but It's not working
Its will work because once you setup
let audiosession = AVAudioSession()
As AVAudioSessionCategoryPlayAndRecord and set
audiosession.setActive(true)
It will start recording whichever audio dump to device.
WebRTC does not have any Internal API to start or stop recording.
We can try using AVAudioSession instead.
First setUp Audio session
func setUPAudioSession() -> Bool {
let audiosession = AVAudioSession()
do {
try audiosession.setCategory(AVAudioSessionCategoryPlayAndRecord)
} catch(let error) {
print("--> \(error.localizedDescription)")
}
do {
try audiosession.setActive(true)
} catch (let error) {
print("--> \(error.localizedDescription)")
}
return audiosession.isInputAvailable;
}
After setUp the audio session now start recording as below
func startRecording() -> Bool {
var settings: [String: Any] = [String: String]()
settings[AVFormatIDKey] = kAudioFormatLinearPCM
settings[AVSampleRateKey] = 8000.0
settings[AVNumberOfChannelsKey] = 1
settings[AVLinearPCMBitDepthKey] = 16
settings[AVLinearPCMIsBigEndianKey] = false
settings[AVLinearPCMIsFloatKey] = false
settings[AVAudioQualityMax] = AVEncoderAudioQualityKey
//Create device directory where recorded file will be save automatically
let searchPaths: [String] = NSSearchPathForDirectoriesInDomains(.documentDirectory, .allDomainsMask, true)
let documentPath_ = searchPaths.first
let pathToSave = "\(documentPath_)/\(dateString)"
let url: URL = URL(pathToSave)
recorder = try? AVAudioRecorder(url: url, settings: settings)
// Initialize degate, metering, etc.
recorder.delegate = self;
recorder.meteringEnabled = true;
recorder?.prepareToRecord()
if let recordIs = recorder {
return recordIs.record()
}
return false
}
Play recorded file
func playrecodingFile() {
//Get the path of recorded file saved in previous method
let searchPaths: [String] = NSSearchPathForDirectoriesInDomains(.documentDirectory, .allDomainsMask, true)
let documentPath_ = searchPaths.first
let fileManager = FileManager.default
let arrayListOfRecordSound: [String]
if fileManager.fileExists(atPath: recordingFolder()) {
let arrayListOfRecordSound = try? fileManager.contentsOfDirectory(atPath: documentPath_)
}
let selectedSound = "\(documentPath_)/\(arrayListOfRecordSound.first)"
let url = URL.init(fileURLWithPath: selectedSound)
let player = try? AVAudioPlayer(contentsOf: url)
player?.delegate = self;
try? AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)
player?.prepareToPlay()
player?.play()
}
Stop recording
func stopRecording() {
recorder?.stop()
}
pauseRecording
func pauseRecording() {
recorder?.pause()
}
Stop recording
func stopRecording() {
recorder?.stop()
}
I'm trying to record audio, then save offline with AudioKit.renderToFile, then use AKPlayer to play the original recorded audio file.
import UIKit
import AudioKit
class ViewController: UIViewController {
private var recordUrl:URL!
private var isRecording:Bool = false
public var player:AKPlayer!
private let format = AVAudioFormat(commonFormat: .pcmFormatFloat64, sampleRate: 44100, channels: 2, interleaved: true)!
private var amplitudeTracker:AKAmplitudeTracker!
private var boostedMic:AKBooster!
private var mic:AKMicrophone!
private var micMixer:AKMixer!
private var silence:AKBooster!
public var recorder: AKNodeRecorder!
#IBOutlet weak var recordButton: UIButton!
override func viewDidLoad() {
super.viewDidLoad()
//self.recordUrl = Bundle.main.url(forResource: "sound", withExtension: "caf")
//self.startAudioPlayback(url: self.recordUrl!)
self.recordUrl = self.urlForDocument("record.caf")
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func requestMic(completion: #escaping () -> Void) {
AVAudioSession.sharedInstance().requestRecordPermission({ (granted: Bool) in
if granted { completion()}
})
}
public func switchToMicrophone() {
stopEngine()
do {
try AKSettings.setSession(category: .playAndRecord, with: .allowBluetoothA2DP)
} catch {
AKLog("Could not set session category.")
}
mic = AKMicrophone()
micMixer = AKMixer(mic)
boostedMic = AKBooster(micMixer, gain: 5)
amplitudeTracker = AKAmplitudeTracker(boostedMic)
silence = AKBooster(amplitudeTracker, gain: 0)
AudioKit.output = silence
startEngine()
}
#IBAction func startStopRecording(_ sender: Any) {
self.isRecording = !self.isRecording
if self.isRecording {
self.startRecording()
self.recordButton.setTitle("Stop Recording", for: .normal)
} else {
self.stopRecording()
self.recordButton.setTitle("Start Recording", for: .normal)
}
}
func startRecording() {
self.requestMic() {
self.switchToMicrophone()
if let url = self.recordUrl {
do {
let audioFile = try AKAudioFile(forWriting: url, settings: self.format.settings, commonFormat: .pcmFormatFloat64, interleaved: true)
self.recorder = try AKNodeRecorder(node: self.micMixer, file: audioFile)
try self.recorder.reset()
try self.recorder.record()
} catch {
print("error setting up recording", error)
}
}
}
}
func stopRecording() {
recorder.stop()
startAudioPlayback(url: self.recordUrl)
}
#IBAction func saveToDisk(_ sender: Any) {
if let source = self.player, let saveUrl = self.urlForDocument("pitchAudio.caf") {
do {
source.stop()
let audioFile = try AKAudioFile(forWriting: saveUrl, settings: self.format.settings, commonFormat: .pcmFormatFloat64, interleaved: true)
try AudioKit.renderToFile(audioFile, duration: source.duration, prerender: {
source.play()
})
print("audio file rendered")
} catch {
print("error rendering", error)
}
// PROBLEM STARTS HERE //
self.startAudioPlayback(url: self.recordUrl)
}
}
public func startAudioPlayback(url:URL) {
print("loading playback audio", url)
self.stopEngine()
do {
try AKSettings.setSession(category: .playback)
player = AKPlayer.init()
try player.load(url: url)
}
catch {
print("error setting up audio playback", error)
return
}
player.prepare()
player.isLooping = true
self.setPitch(pitch: self.getPitch(), saveValue: false)
AudioKit.output = player
startEngine()
startPlayer()
}
public func startPlayer() {
if AudioKit.engine.isRunning { self.player.play() }
else { print("audio engine not running, can't play") }
}
public func startEngine() {
if !AudioKit.engine.isRunning {
print("starting engine")
do { try AudioKit.start() }
catch {
print("error starting audio", error)
}
}
}
public func stopEngine(){
if AudioKit.engine.isRunning {
print("stopping engine")
do {
try AudioKit.stop()
}
catch {
print("error stopping audio", error)
}
}
//playback doesn't work without this?
mic = nil
}
#IBAction func changePitch(_ sender: UISlider) {
self.setPitch(pitch:Double(sender.value))
}
public func getPitch() -> Double {
return UserDefaults.standard.double(forKey: "pitchFactor")
}
public func setPitch(pitch:Double, saveValue:Bool = true) {
player.pitch = pitch * 1000.0
if saveValue {
UserDefaults.standard.set(pitch, forKey: "pitchFactor")
UserDefaults.standard.synchronize()
}
}
func urlForDocument(_ named:String) -> URL? {
let path = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] as String
let url = NSURL(fileURLWithPath: path)
if let pathComponent = url.appendingPathComponent(named) {
return pathComponent
}
return nil
}
}
The order of calls is switchToMicrophone, startRecording, stopRecording, startAudioPlayback, saveToDisk, and again, startAudioPlayback
Please see the github repo for full code in ViewController.swift
After the renderToFile function, when restarting AudioKit for the player, the following errors occur:
[mcmx] 338: input bus 0 sample rate is 0
[avae] AVAEInternal.h:103:_AVAE_CheckNoErr: [AVAudioEngineGraph.mm:1265:Initialize: (err = AUGraphParser::InitializeActiveNodesInOutputChain(ThisGraph, kOutputChainOptimizedTraversal, *GetOutputNode(), isOutputChainActive)): error -10875
[avae] AVAudioEngine.mm:149:-[AVAudioEngine prepare]: Engine#0x1c4008ae0: could not initialize, error = -10875
[mcmx] 338: input bus 0 sample rate is 0
[avae] AVAEInternal.h:103:_AVAE_CheckNoErr: [AVAudioEngineGraph.mm:1265:Initialize: (err = AUGraphParser::InitializeActiveNodesInOutputChain(ThisGraph, kOutputChainOptimizedTraversal, *GetOutputNode(), isOutputChainActive)): error -10875
error starting audio Error Domain=com.apple.coreaudio.avfaudio Code=-10875 "(null)" UserInfo={failed call=err = AUGraphParser::InitializeActiveNodesInOutputChain(ThisGraph, kOutputChainOptimizedTraversal, *GetOutputNode(), isOutputChainActive)} ***
This all works flawlessly if I take the recording piece out, or the offline render out, but not with both included.
It might be that the issue is with your order of execution try swapping startAudioPlayback, saveToDisk, so that it first does saveToDisk, and then reads the file back and plays it, startAudioPlayback.
EDIT: So far by playing around with it I believe I have identified the issue. Once you save the file the other tempfile which was the recording is disappearing for some reason. I think that needs to be narrowed down why is that.
Or perhaps to playaround and send the whole saveToDisk method to a background thread without interrupting the currently playing file.
In my spare time I'll try to tweak it a little more and let you know.
EDIT 2:
check this https://stackoverflow.com/a/48133092/9497657
if you get nowhere try to post your issue here:
https://github.com/audiokit/AudioKit/issues/
also check this tutorial out as well:
https://www.raywenderlich.com/145770/audiokit-tutorial-getting-started
also it might be useful to message Aurelius Prochazka as he is a developer of AudioKit who could help.
I was able to get it working by combining the recording and playback into a single pipeline:
mixer = AKMixer(mic)
boostedMic = AKBooster(mixer, gain: 5)
amplitudeTracker = AKAmplitudeTracker(boostedMic)
micBooster = AKBooster(amplitudeTracker, gain: 0)
player = AKPlayer()
try? player.load(url: self.recordUrl)
player.prepare()
player.gain = 2.0
outputMixer = AKMixer(micBooster, player)
AudioKit.output = outputMixer
I am working on get audio data from iPhone mic and send it to socket, I already try AVAudioEngine to get audio buffer but some how its not woking. so can you please suggest me what it better way to get recording buffer data in live.
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
SocketIOManager.sharedInstance.socket.on("listen") {data, ack in
let BuffurData:Data = data[0] as! Data
// let playData = self?.audioBufferToNSData(PCMBuffer: BuffurData as! AVAudioPCMBuffer)
do {
// let data = NSData(bytes: &BuffurData, length: BuffurData.count)
let player = try AVAudioPlayer(data:BuffurData)
player.play()
} catch let error as NSError {
print(error.description)
}
print("socket connected \(data)")
}
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize:4096, format:format, block: { [weak self] buffer, when in
guard let this = self else {
return
}
// writing to file: for testing purposes only
do {
try this.file!.write(from: buffer)
} catch {
}
if let channel1Buffer = buffer.floatChannelData?[0] {
let test = self?.copyAudioBufferBytes(buffer)
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!);
// socket.on("listen", function (data)
/*! #property floatChannelData
#abstract Access the buffer's float audio samples.
#discussion
floatChannelData returns pointers to the buffer's audio samples if the buffer's format is
32-bit float, or nil if it is another format.
The returned pointer is to format.channelCount pointers to float. Each of these pointers
is to "frameLength" valid samples, which are spaced by "stride" samples.
If format.interleaved is false (as with the standard deinterleaved float format), then
the pointers will be to separate chunks of memory. "stride" is 1.
If format.interleaved is true, then the pointers will refer into the same chunk of interleaved
samples, each offset by 1 frame. "stride" is the number of interleaved channels.
*/
// #TODO: send data, better to pass into separate queue for processing
}
})
engine.prepare()
do {
try engine.start()
} catch {
// #TODO: error out
}
}
Try this code:
var audioPlayerQueue = DispatchQueue(label: "audioPlayerQueue", qos: DispatchQoS.userInteractive)
var peerAudioPlayer: AVAudioPlayerNode = AVAudioPlayerNode()
var peerInputFormat: AVAudioFormat?
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
engine.attach(self.peerAudioPlayer)
self.peerInputFormat = AVAudioFormat.init(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false)
self.peerAudioEngine.connect(peerAudioPlayer, to: self.peerAudioEngine.mainMixerNode, format: peerInput?.outputFormat(forBus: 0))
do {
peerAudioEngine.prepare()
try peerAudioEngine.start()
} catch let error {
print(error.localizedDescription)
}
SocketIOManager.sharedInstance.socket.on("listen") { data, ack in
let pcmBuffer = toPCMBuffer(data: data)
self.audioPlayerQueue.async {
self.peerAudioPlayer.scheduleBuffer(pcmBuffer, completionHandler: nil)
if self.peerAudioEngine.isRunning {
self.peerAudioPlayer.play()
} else {
do {
try self.peerAudioEngine.start()
} catch {
print(error.localizedDescription)
}
}
}
}
print("socket connected \(data)")
}
func toPCMBuffer(data: NSData) -> AVAudioPCMBuffer {
let audioFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false) // given NSData audio format
let PCMBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(data.length) / audioFormat.streamDescription.pointee.mBytesPerFrame)
PCMBuffer.frameLength = PCMBuffer.frameCapacity
let channels = UnsafeBufferPointer(start: PCMBuffer.floatChannelData, count: Int(PCMBuffer.format.channelCount))
data.getBytes(UnsafeMutableRawPointer(channels[0]) , length: data.length)
return PCMBuffer
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize: 4410, format: format, block: { (buffer: AVAudioPCMBuffer, AVAudioTime) in
guard let this = self else {
return
}
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!)
})
do {
engine.prepare()
try engine.start()
} catch {
// #TODO: error out
}
}
// **Edit: For Enable Lound Speaker**
func speakerEnabled(_ enabled:Bool) -> Bool {
let session = AVAudioSession.sharedInstance()
var options = session.categoryOptions
if (enabled) {
options.insert(.defaultToSpeaker)
} else {
options.remove(.defaultToSpeaker)
}
try! session.setCategory(AVAudioSessionCategoryPlayAndRecord, with: options)
return true
}
i'm pretty new to Swift/iOS, just started learning that a couple days ago. I'm using Swift 3 and want to develop two iPhone apps that can send audio stream from microphone to other iPhone devices using Multi-peer Connectivity. The first app would be the speaker's app and the other would be the listener's app.
Previously, I learned how to advertise, browse, and invite peers from this useful tutorial
and I learned how to get audio data from microphone and convert them to bytes from this answer and this answer. Thanks a lot to Rhythmic Fistman.
So, my code is combination of what those articles include.
This is ViewController of listener app
import UIKit
import MultipeerConnectivity
import AVFoundation
class ColorSwitchViewController: UIViewController {
#IBOutlet weak var connectionsLabel: UILabel!
let colorService = ColorServiceManager()
var engine = AVAudioEngine()
let player = AVAudioPlayerNode()
// Somewhere, schedule the stream in the mainRunLoop, set the delegate and open it. Choose the peer that you want to connect
var inputStream = InputStream()
var inputStreamIsSet: Bool!
var outputStreamIsSet: Bool!
public let peerID = MCPeerID(displayName: UIDevice.current.name)
//MARK: Private Functions
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func copyAudioBufferBytes(_ audioBuffer: AVAudioPCMBuffer) -> [UInt8] {
let srcLeft = audioBuffer.floatChannelData![0]
let bytesPerFrame = audioBuffer.format.streamDescription.pointee.mBytesPerFrame
let numBytes = Int(bytesPerFrame * audioBuffer.frameLength)
// initialize bytes to 0 (how to avoid?)
var audioByteArray = [UInt8] (repeating: 0, count: numBytes)
// copy data from buffer
srcLeft.withMemoryRebound(to: UInt8.self, capacity: numBytes) { srcByteData in
audioByteArray.withUnsafeMutableBufferPointer {
$0.baseAddress!.initialize(from: srcByteData, count: numBytes)
}
}
return audioByteArray
}
func bytesToAudioBuffer(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let fmt = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
let frameLength = UInt32(buf.count) / fmt.streamDescription.pointee.mBytesPerFrame
let audioBuffer = AVAudioPCMBuffer(pcmFormat: fmt, frameCapacity: frameLength)
audioBuffer.frameLength = frameLength
let dstLeft = audioBuffer.floatChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Float.self, capacity: Int(frameLength))
dstLeft.initialize(from: src, count: Int(frameLength))
}
return audioBuffer
}
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
}
func change(color : UIColor) {
UIView.animate(withDuration: 0.2) {
self.view.backgroundColor = color
}
}
}
extension ColorSwitchViewController : ColorServiceManagerDelegate {
func connectedDevicesChanged(manager: ColorServiceManager, connectedDevices: [String]) {
OperationQueue.main.addOperation {
self.connectionsLabel.text = "Connections: \(connectedDevices)"
}
}
func colorChanged(manager: ColorServiceManager, colorString: String) {
OperationQueue.main.addOperation {
switch colorString {
case "red":
self.change(color: .red)
case "yellow":
self.change(color: .yellow)
default:
NSLog("%#", "Unknown color value received: \(colorString)")
}
}
}
func streamReceived(manager: ColorServiceManager, stream: InputStream, streamName: String, fromPeer: MCPeerID) {
NSLog("%#", "name " + fromPeer.displayName)
if streamName == "stream" && fromPeer != peerID {
NSLog("%#", "voice received")
stream.schedule(in: RunLoop.current, forMode: .defaultRunLoopMode)
stream.open()
var bytes = [UInt8](repeating: 0, count: 17640)
if (stream.hasBytesAvailable == true) {
NSLog("%#", "has bytes available...")
} else {
NSLog("%#", "has NO byte ...")
}
let result = stream.read(&bytes, maxLength: bytes.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: stream.streamError))")
} else {
print("The number of bytes read is \(result)")
}
let audioBuffer = self.bytesToAudioBuffer(bytes) //Here is where the app crashes
engine.attach(player)
let outputFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
engine.connect(player, to: engine.mainMixerNode, format: outputFormat)
do {
try engine.start()
player.scheduleBuffer(audioBuffer, completionHandler: nil)
player.play()
} catch let error {
print(error.localizedDescription)
}
}
}
}
And the ViewController of the speaker app is similar except that it contains code for sending the stream and doesn't contain code for receiving
// ....
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
func startStream() {
let input = engine.inputNode!
engine.attach(player)
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
engine.connect(player, to: engine.mainMixerNode, format: inputFormat)
NSLog("%#", "sendStream: to \(self.colorService.session.connectedPeers.count) peers")
if self.colorService.session.connectedPeers.count > 0 {
do {
let outputStream = try self.colorService.session.startStream(withName: "stream", toPeer: self.colorService.session.connectedPeers.first!)
outputStream.schedule(in: RunLoop.main, forMode:RunLoopMode.defaultRunLoopMode)
outputStream.open()
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
input.installTap(onBus: bus, bufferSize: 2048, format: inputFormat, block: {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
self.player.scheduleBuffer(buffer)
let audioBuffer = self.copyAudioBufferBytes(buffer)
// NSLog("%#", "speaking...")
let result = outputStream.write(audioBuffer, maxLength: audioBuffer.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: outputStream.streamError))")
} else {
print("The number of bytes written is \(result)")
}
})
try! engine.start()
player.play()
}
catch let error {
NSLog("%#", "Error for sending: \(error)")
}
}
}
func stopStream() {
engine.inputNode?.removeTap(onBus: 0)
player.stop()
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
self.startStream()
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
self.stopStream()
}
// ...
Unfortunately, on the listener side, i get the app receiving the stream with no bytes available. NSLog("%#", "has NO byte ...") was called. I wonder if the listener app really receive the audio stream or not.
So, What's my mistake here? Any helps would be appreciated. Thank you in advance.