MCSessionDelegate - Getting No Bytes When Receiving Stream - ios

i'm pretty new to Swift/iOS, just started learning that a couple days ago. I'm using Swift 3 and want to develop two iPhone apps that can send audio stream from microphone to other iPhone devices using Multi-peer Connectivity. The first app would be the speaker's app and the other would be the listener's app.
Previously, I learned how to advertise, browse, and invite peers from this useful tutorial
and I learned how to get audio data from microphone and convert them to bytes from this answer and this answer. Thanks a lot to Rhythmic Fistman.
So, my code is combination of what those articles include.
This is ViewController of listener app
import UIKit
import MultipeerConnectivity
import AVFoundation
class ColorSwitchViewController: UIViewController {
#IBOutlet weak var connectionsLabel: UILabel!
let colorService = ColorServiceManager()
var engine = AVAudioEngine()
let player = AVAudioPlayerNode()
// Somewhere, schedule the stream in the mainRunLoop, set the delegate and open it. Choose the peer that you want to connect
var inputStream = InputStream()
var inputStreamIsSet: Bool!
var outputStreamIsSet: Bool!
public let peerID = MCPeerID(displayName: UIDevice.current.name)
//MARK: Private Functions
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func copyAudioBufferBytes(_ audioBuffer: AVAudioPCMBuffer) -> [UInt8] {
let srcLeft = audioBuffer.floatChannelData![0]
let bytesPerFrame = audioBuffer.format.streamDescription.pointee.mBytesPerFrame
let numBytes = Int(bytesPerFrame * audioBuffer.frameLength)
// initialize bytes to 0 (how to avoid?)
var audioByteArray = [UInt8] (repeating: 0, count: numBytes)
// copy data from buffer
srcLeft.withMemoryRebound(to: UInt8.self, capacity: numBytes) { srcByteData in
audioByteArray.withUnsafeMutableBufferPointer {
$0.baseAddress!.initialize(from: srcByteData, count: numBytes)
}
}
return audioByteArray
}
func bytesToAudioBuffer(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let fmt = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
let frameLength = UInt32(buf.count) / fmt.streamDescription.pointee.mBytesPerFrame
let audioBuffer = AVAudioPCMBuffer(pcmFormat: fmt, frameCapacity: frameLength)
audioBuffer.frameLength = frameLength
let dstLeft = audioBuffer.floatChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Float.self, capacity: Int(frameLength))
dstLeft.initialize(from: src, count: Int(frameLength))
}
return audioBuffer
}
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
}
func change(color : UIColor) {
UIView.animate(withDuration: 0.2) {
self.view.backgroundColor = color
}
}
}
extension ColorSwitchViewController : ColorServiceManagerDelegate {
func connectedDevicesChanged(manager: ColorServiceManager, connectedDevices: [String]) {
OperationQueue.main.addOperation {
self.connectionsLabel.text = "Connections: \(connectedDevices)"
}
}
func colorChanged(manager: ColorServiceManager, colorString: String) {
OperationQueue.main.addOperation {
switch colorString {
case "red":
self.change(color: .red)
case "yellow":
self.change(color: .yellow)
default:
NSLog("%#", "Unknown color value received: \(colorString)")
}
}
}
func streamReceived(manager: ColorServiceManager, stream: InputStream, streamName: String, fromPeer: MCPeerID) {
NSLog("%#", "name " + fromPeer.displayName)
if streamName == "stream" && fromPeer != peerID {
NSLog("%#", "voice received")
stream.schedule(in: RunLoop.current, forMode: .defaultRunLoopMode)
stream.open()
var bytes = [UInt8](repeating: 0, count: 17640)
if (stream.hasBytesAvailable == true) {
NSLog("%#", "has bytes available...")
} else {
NSLog("%#", "has NO byte ...")
}
let result = stream.read(&bytes, maxLength: bytes.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: stream.streamError))")
} else {
print("The number of bytes read is \(result)")
}
let audioBuffer = self.bytesToAudioBuffer(bytes) //Here is where the app crashes
engine.attach(player)
let outputFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
engine.connect(player, to: engine.mainMixerNode, format: outputFormat)
do {
try engine.start()
player.scheduleBuffer(audioBuffer, completionHandler: nil)
player.play()
} catch let error {
print(error.localizedDescription)
}
}
}
}
And the ViewController of the speaker app is similar except that it contains code for sending the stream and doesn't contain code for receiving
// ....
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
func startStream() {
let input = engine.inputNode!
engine.attach(player)
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
engine.connect(player, to: engine.mainMixerNode, format: inputFormat)
NSLog("%#", "sendStream: to \(self.colorService.session.connectedPeers.count) peers")
if self.colorService.session.connectedPeers.count > 0 {
do {
let outputStream = try self.colorService.session.startStream(withName: "stream", toPeer: self.colorService.session.connectedPeers.first!)
outputStream.schedule(in: RunLoop.main, forMode:RunLoopMode.defaultRunLoopMode)
outputStream.open()
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
input.installTap(onBus: bus, bufferSize: 2048, format: inputFormat, block: {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
self.player.scheduleBuffer(buffer)
let audioBuffer = self.copyAudioBufferBytes(buffer)
// NSLog("%#", "speaking...")
let result = outputStream.write(audioBuffer, maxLength: audioBuffer.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: outputStream.streamError))")
} else {
print("The number of bytes written is \(result)")
}
})
try! engine.start()
player.play()
}
catch let error {
NSLog("%#", "Error for sending: \(error)")
}
}
}
func stopStream() {
engine.inputNode?.removeTap(onBus: 0)
player.stop()
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
self.startStream()
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
self.stopStream()
}
// ...
Unfortunately, on the listener side, i get the app receiving the stream with no bytes available. NSLog("%#", "has NO byte ...") was called. I wonder if the listener app really receive the audio stream or not.
So, What's my mistake here? Any helps would be appreciated. Thank you in advance.

Related

crash App iOS flutter when I use shazamkit package

I am using shazamkit package to recognize sound in flutter. android version works perfect but in iOS version when I start to use get this error:
ERROR: [0x190bf000] >avae> AVAudioNode.mm:568: CreateRecordingTap: required condition is false: IsFormatSampleRateAndChannelCountValid(format)
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: IsFormatSampleRateAndChannelCountValid(format)'
this is my swift code :
import Flutter
import UIKit
import ShazamKit
import AudioToolbox
public class SwiftFlutterShazamKitPlugin: NSObject, FlutterPlugin {
private var session: SHSession?
private let audioEngine = AVAudioEngine()
private let playerNode = AVAudioPlayerNode()
private let mixerNode = AVAudioMixerNode()
private var callbackChannel: FlutterMethodChannel?
private var sampleRate = 44800
public static func register(with registrar: FlutterPluginRegistrar) {
let channel = FlutterMethodChannel(name: "flutter_shazam_kit", binaryMessenger: registrar.messenger())
let instance = SwiftFlutterShazamKitPlugin(callbackChannel: FlutterMethodChannel(name: "flutter_shazam_kit_callback", binaryMessenger: registrar.messenger()))
registrar.addMethodCallDelegate(instance, channel: channel)
}
init(callbackChannel: FlutterMethodChannel? = nil) {
self.callbackChannel = callbackChannel
}
public func handle(_ call: FlutterMethodCall, result: #escaping FlutterResult) {
switch call.method {
case "configureShazamKitSession":
let args = call.arguments as! Dictionary<String, Any>
configureShazamKitSession(
customCatalogPath: args["customCatalogPath"] as? String,
sampleRate: args["sampleRate"] as! Int
)
result(nil)
case "startDetectionWithMicrophone":
do{
configureAudio()
try startListening(result: result)
}catch{
callbackChannel?.invokeMethod("didHasError", arguments: error.localizedDescription)
}
case "endDetectionWithMicrophone":
stopListening()
result(nil)
case "endSession":
session = nil
result(nil)
default:
result(nil)
}
}
}
//MARK: ShazamKit session delegation here
//MARK: Methods for AVAudio
extension SwiftFlutterShazamKitPlugin {
func configureShazamKitSession(customCatalogPath: String?, sampleRate: Int) {
self.sampleRate = sampleRate
do {
if session == nil {
if (customCatalogPath == nil) {
session = SHSession()
} else {
let documentsUrl = FileManager.default.urls(
for: .documentDirectory,
in: .userDomainMask
).first!
let catalog = SHCustomCatalog()
try catalog.add(from: URL(fileURLWithPath: customCatalogPath!))
session = SHSession(catalog: catalog)
}
session?.delegate = self
}
} catch let error {
callbackChannel?.invokeMethod("didHasError",
arguments: "configureShazamKitSession() failed")
}
}
func addAudio(buffer: AVAudioPCMBuffer, audioTime: AVAudioTime) {
// Add the audio to the current match request
session?.matchStreamingBuffer(buffer, at: audioTime)
}
func configureAudio() {
playerNode.stop()
audioEngine.stop()
let inputFormat = audioEngine.inputNode.inputFormat(forBus: 0)
// Set an output format compatible with ShazamKit.
let outputFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1)
// Create a mixer node to convert the input.
audioEngine.attach(mixerNode)
// Attach the mixer to the microphone input and the output of the audio engine.
audioEngine.connect(audioEngine.inputNode, to: mixerNode, format: inputFormat)
// audioEngine.connect(mixerNode, to: audioEngine.outputNode, format: outputFormat)
// Install a tap on the mixer node to capture the microphone audio.
mixerNode.installTap(onBus: 0,
bufferSize: 8192,
format: outputFormat) { buffer, audioTime in
// Add captured audio to the buffer used for making a match.
self.addAudio(buffer: buffer, audioTime: audioTime)
}
}
func startListening(result: FlutterResult) throws {
guard session != nil else{
callbackChannel?.invokeMethod("didHasError", arguments: "ShazamSession not found, please call configureShazamKitSession() first to initialize it.")
result(nil)
return
}
callbackChannel?.invokeMethod("detectStateChanged", arguments: 1)
// Throw an error if the audio engine is already running.
guard !audioEngine.isRunning else {
callbackChannel?.invokeMethod("didHasError", arguments: "Audio engine is currently running, please stop the audio engine first and then try again")
return
}
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
return false
}
// Ask the user for permission to use the mic if required then start the engine.
try audioSession.setCategory(.playAndRecord)
audioSession.requestRecordPermission { [weak self] success in
guard success else {
self?.callbackChannel?.invokeMethod("didHasError", arguments: "Recording permission not found, please allow permission first and then try again")
return
}
do{
self?.audioEngine.prepare()
try self?.audioEngine.start()
}catch{
self?.callbackChannel?.invokeMethod("didHasError", arguments: "Can't start the audio engine")
}
}
result(nil)
}
func stopListening() {
callbackChannel?.invokeMethod("detectStateChanged", arguments: 0)
// Check if the audio engine is already recording.
mixerNode.removeTap(onBus: 0)
audioEngine.stop()
}
}
//MARK: Delegate methods for SHSession
extension SwiftFlutterShazamKitPlugin: SHSessionDelegate{
public func session(_ session: SHSession, didFind match: SHMatch) {
var mediaItems: [[String: Any]] = []
match.mediaItems.forEach { rawItem in
var item: [String : Any] = [:]
var count: UInt32 = 0
let properties = class_copyPropertyList(class_getSuperclass(rawItem.classForCoder), &count)
for i in 0..<count {
guard let property = properties?[Int(i)] else { continue }
let name = String(cString: property_getName(property))
if (name == "properties") {
let props = rawItem.value(forKey: name) as! NSDictionary
for property in props.allKeys {
let prop = property as! String
var val = props.value(forKey: prop)!
if (String(describing: type(of: val)) == "__NSTaggedDate") {
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSZ"
val = dateFormatter.string(from: val as! Date)
}
item[prop] = val
}
}
}
mediaItems.append(item)
free(properties)
}
do {
let jsonData = try JSONSerialization.data(withJSONObject: mediaItems)
let jsonString = String(data: jsonData, encoding: .utf8)
self.callbackChannel?.invokeMethod("matchFound", arguments: jsonString)
} catch {
callbackChannel?.invokeMethod("didHasError", arguments: "Error when trying to format data, please try again")
}
}
public func session(_ session: SHSession, didNotFindMatchFor signature: SHSignature, error: Error?) {
callbackChannel?.invokeMethod("notFound", arguments: nil)
callbackChannel?.invokeMethod("didHasError", arguments: error?.localizedDescription)
}
}
Your help is greatly appreciated
https://pub.dev/packages/flutter_shazam_kit

mAudioEngine.start() crash in ios

Terminating app due to uncaught exception
'com.apple.coreaudio.avfaudio', reason: 'required condition is false:
_engine->IsRunning()'
Getting crash in iphone 13 mini
public class SwiftSoundStreamPlugin: NSObject, FlutterPlugin {
private var channel: FlutterMethodChannel
private var registrar: FlutterPluginRegistrar
private var hasPermission: Bool = false
private var debugLogging: Bool = false
//========= Recorder's vars
private let mAudioEngine = AVAudioEngine()
private let mRecordBus = 0
private var mInputNode: AVAudioInputNode
private var mRecordSampleRate: Double = 16000 // 16Khz
private var mRecordBufferSize: AVAudioFrameCount = 8192
private var mRecordChannel = 0
private var mRecordSettings: [String:Int]!
private var mRecordFormat: AVAudioFormat!
//========= Player's vars
private let PLAYER_OUTPUT_SAMPLE_RATE: Double = 44100 // 32Khz
private let mPlayerBus = 0
private let mPlayerNode = AVAudioPlayerNode()
private var mPlayerSampleRate: Double = 44100 // 16Khz
private var mPlayerBufferSize: AVAudioFrameCount = 127000
private var mPlayerOutputFormat: AVAudioFormat!
private var mPlayerInputFormat: AVAudioFormat!
/** ======== Basic Plugin initialization ======== **/
public static func register(with registrar: FlutterPluginRegistrar) {
let channel = FlutterMethodChannel(name: "vn.casperpas.sound_stream:methods", binaryMessenger: registrar.messenger())
let instance = SwiftSoundStreamPlugin( channel, registrar: registrar)
registrar.addMethodCallDelegate(instance, channel: channel)
}
init( _ channel: FlutterMethodChannel, registrar: FlutterPluginRegistrar ) {
self.channel = channel
self.registrar = registrar
self.mInputNode = mAudioEngine.inputNode
super.init()
self.attachPlayer()
mAudioEngine.prepare()
}
public func handle(_ call: FlutterMethodCall, result: #escaping FlutterResult) {
switch call.method {
case "hasPermission":
hasPermission(result)
case "initializeRecorder":
initializeRecorder(call, result)
case "startRecording":
startRecording(result)
case "stopRecording":
stopRecording(result)
case "setBufferSize":
setBufferSize(call, result)
case "initializePlayer":
initializePlayer(call, result)
case "startPlayer":
startPlayer(result)
case "stopPlayer":
stopPlayer(result)
case "writeChunk":
writeChunk(call, result)
default:
print("Unrecognized method: \(call.method)")
sendResult(result, FlutterMethodNotImplemented)
}
}
private func sendResult(_ result: #escaping FlutterResult, _ arguments: Any?) {
DispatchQueue.main.async {
result( arguments )
}
}
private func invokeFlutter( _ method: String, _ arguments: Any? ) {
DispatchQueue.main.async {
self.channel.invokeMethod( method, arguments: arguments )
}
}
/** ======== Plugin methods ======== **/
private func checkAndRequestPermission(completion callback: #escaping ((Bool) -> Void)) {
if (hasPermission) {
callback(hasPermission)
return
}
var permission: AVAudioSession.RecordPermission
#if swift(>=4.2)
permission = AVAudioSession.sharedInstance().recordPermission
#else
permission = AVAudioSession.sharedInstance().recordPermission()
#endif
switch permission {
case .granted:
print("granted")
hasPermission = true
callback(hasPermission)
break
case .denied:
print("denied")
hasPermission = false
callback(hasPermission)
break
case .undetermined:
print("undetermined")
AVAudioSession.sharedInstance().requestRecordPermission() { [unowned self] allowed in
if allowed {
self.hasPermission = true
print("undetermined true")
callback(self.hasPermission)
} else {
self.hasPermission = false
print("undetermined false")
callback(self.hasPermission)
}
}
break
default:
callback(hasPermission)
break
}
}
private func hasPermission( _ result: #escaping FlutterResult) {
checkAndRequestPermission { value in
self.sendResult(result, value)
}
}
private func startEngine() {
guard !mAudioEngine.isRunning else {
return
}
if mAudioEngine.outputNode.outputFormat(forBus: mPlayerBus).channelCount == 0 {
// if count is 0 then it throws a exception or crash
mAudioEngine.reset()
/*
or try changing object
mAudioEngine = AVAudioEngine()
*/
}
mAudioEngine.reset()
try? mAudioEngine.start()
}
private func stopEngine() {
mAudioEngine.stop()
mAudioEngine.reset()
}
private func sendEventMethod(_ name: String, _ data: Any) {
var eventData: [String: Any] = [:]
eventData["name"] = name
eventData["data"] = data
invokeFlutter("platformEvent", eventData)
}
private func initializeRecorder(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>
else {
sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
return
}
mRecordSampleRate = argsArr["sampleRate"] as? Double ?? mRecordSampleRate
debugLogging = argsArr["showLogs"] as? Bool ?? debugLogging
mRecordFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: mRecordSampleRate, channels: 2, interleaved: true)
checkAndRequestPermission { isGranted in
if isGranted {
self.sendRecorderStatus(SoundStreamStatus.Initialized)
self.sendResult(result, true)
} else {
self.sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
}
}
}
private func resetEngineForRecord() {
mAudioEngine.inputNode.removeTap(onBus: mRecordBus)
let input = mAudioEngine.inputNode
let inputFormat = input.outputFormat(forBus: mRecordBus)
let converter = AVAudioConverter(from: inputFormat, to: mRecordFormat!)!
let ratio: Float = Float(inputFormat.sampleRate)/Float(mRecordFormat.sampleRate)
input.installTap(onBus: mRecordBus, bufferSize: mRecordBufferSize, format: inputFormat) { (buffer, time) -> Void in
let inputCallback: AVAudioConverterInputBlock = { inNumPackets, outStatus in
outStatus.pointee = .haveData
return buffer
}
let convertedBuffer = AVAudioPCMBuffer(pcmFormat: self.mRecordFormat!, frameCapacity: UInt32(Float(buffer.frameCapacity) / ratio))!
var error: NSError?
let status = converter.convert(to: convertedBuffer, error: &error, withInputFrom: inputCallback)
assert(status != .error)
if (self.mRecordFormat?.commonFormat == AVAudioCommonFormat.pcmFormatInt16) {
let values = self.audioBufferToBytes(convertedBuffer)
self.sendMicData(values)
}
}
}
private func startRecording(_ result: #escaping FlutterResult) {
resetEngineForRecord()
startEngine()
sendRecorderStatus(SoundStreamStatus.Playing)
result(true)
}
private func stopRecording(_ result: #escaping FlutterResult) {
mAudioEngine.inputNode.removeTap(onBus: mRecordBus)
sendRecorderStatus(SoundStreamStatus.Stopped)
result(true)
}
private func sendMicData(_ data: [UInt8]) {
let channelData = FlutterStandardTypedData(bytes: NSData(bytes: data, length: data.count) as Data)
sendEventMethod("dataPeriod", channelData)
}
private func sendRecorderStatus(_ status: SoundStreamStatus) {
sendEventMethod("recorderStatus", status.rawValue)
}
private func setBufferSize(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>
else {
sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
return
}
mPlayerBufferSize = argsArr["bufferSize"] as? AVAudioFrameCount ?? mPlayerBufferSize
// result(true)
}
private func initializePlayer(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>
else {
sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
return
}
mPlayerSampleRate = argsArr["sampleRate"] as? Double ?? mPlayerSampleRate
debugLogging = argsArr["showLogs"] as? Bool ?? debugLogging
mPlayerInputFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: mPlayerSampleRate, channels: 1, interleaved: true)
sendPlayerStatus(SoundStreamStatus.Initialized)
}
private func attachPlayer() {
mPlayerOutputFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: PLAYER_OUTPUT_SAMPLE_RATE, channels: 1, interleaved: true)
mAudioEngine.attach(mPlayerNode)
mAudioEngine.connect(mPlayerNode, to: mAudioEngine.outputNode, format: mPlayerOutputFormat)
// mAudioEngine.connect(mPlayerNode, to: mAudioEngine.mainMixerNode, format: mPlayerOutputFormat)
}
private func startPlayer(_ result: #escaping FlutterResult) {
startEngine()
if !mPlayerNode.isPlaying {
mPlayerNode.play()
}
sendPlayerStatus(SoundStreamStatus.Playing)
result(true)
}
private func stopPlayer(_ result: #escaping FlutterResult) {
if mPlayerNode.isPlaying {
mPlayerNode.stop()
}
sendPlayerStatus(SoundStreamStatus.Stopped)
result(true)
}
private func sendPlayerStatus(_ status: SoundStreamStatus) {
sendEventMethod("playerStatus", status.rawValue)
}
private func writeChunk(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>,
let data = argsArr["data"] as? FlutterStandardTypedData
else {
sendResult(result, FlutterError( code: SoundStreamErrors.FailedToWriteBuffer.rawValue,
message:"Failed to write Player buffer",
details: nil ))
return
}
let byteData = [UInt8](data.data)
pushPlayerChunk(byteData, result)
}
private func pushPlayerChunk(_ chunk: [UInt8], _ result: #escaping FlutterResult) {
let buffer = bytesToAudioBuffer(chunk)
mPlayerNode.scheduleBuffer(convertBufferFormat(
buffer,
from: mPlayerInputFormat,
to: mPlayerOutputFormat
));
result(true)
}
private func convertBufferFormat(_ buffer: AVAudioPCMBuffer, from: AVAudioFormat, to: AVAudioFormat) -> AVAudioPCMBuffer {
let formatConverter = AVAudioConverter(from: from, to: to)
let ratio: Float = Float(from.sampleRate)/Float(to.sampleRate)
let pcmBuffer = AVAudioPCMBuffer(pcmFormat: to, frameCapacity: UInt32(Float(buffer.frameCapacity) / ratio))!
var error: NSError? = nil
let inputBlock: AVAudioConverterInputBlock = {inNumPackets, outStatus in
outStatus.pointee = .haveData
return buffer
}
formatConverter?.convert(to: pcmBuffer, error: &error, withInputFrom: inputBlock)
return pcmBuffer
}
private func audioBufferToBytes(_ audioBuffer: AVAudioPCMBuffer) -> [UInt8] {
let srcLeft = audioBuffer.int16ChannelData![0]
let bytesPerFrame = audioBuffer.format.streamDescription.pointee.mBytesPerFrame
let numBytes = Int(bytesPerFrame * audioBuffer.frameLength)
// initialize bytes by 0
var audioByteArray = [UInt8](repeating: 0, count: numBytes)
srcLeft.withMemoryRebound(to: UInt8.self, capacity: numBytes) { srcByteData in
audioByteArray.withUnsafeMutableBufferPointer {
$0.baseAddress!.initialize(from: srcByteData, count: numBytes)
}
}
return audioByteArray
}
private func bytesToAudioBuffer(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let frameLength = UInt32(buf.count) / mPlayerInputFormat.streamDescription.pointee.mBytesPerFrame
let audioBuffer = AVAudioPCMBuffer(pcmFormat: mPlayerInputFormat, frameCapacity: frameLength)!
audioBuffer.frameLength = frameLength
let dstLeft = audioBuffer.int16ChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Int16.self, capacity: Int(frameLength))
dstLeft.initialize(from: src, count: Int(frameLength))
}
return audioBuffer
}
}

I can't record using AVAudioEngine

AVAudioSession.sharedInstance().setCategory(.playAndRecord, mode: .default)
I can record using the above code.
However, during playback, only the speakers at the top of the iPhone produce sound.
So I would like to add defaultToSpeaker to options.
However, adding it does not record.
Is there a solution?
Please help me.
Here is the code I wrote.
final class RecordEngine {
private var engine: AVAudioEngine!
private var mixer: AVAudioMixerNode!
private var player: AVAudioPlayerNode!
private var outputFile: AVAudioFile!
let session = AVAudioSession.sharedInstance()
init() {
prepareAVAudioSession()
prepareNodes()
prepare()
}
func start() {
try! engine.start()
}
func stop() {
engine.pause()
engine.reset()
}
private func prepareAVAudioSession() {
do {
// try session.setCategory(.playAndRecord, mode: .default, options: [.allowBluetooth, .allowBluetoothA2DP])
try session.setCategory(.playAndRecord, mode: .default, options: .defaultToSpeaker)
try session.setActive(true)
} catch {
}
}
private func prepareNodes() {
engine = AVAudioEngine()
mixer = AVAudioMixerNode()
player = AVAudioPlayerNode()
engine.attach(mixer)
engine.attach(player)
}
private func prepare() {
let input = engine.inputNode
let mainMixer = engine.mainMixerNode
let format = input.outputFormat(forBus: 0)
engine.connect(player, to: mainMixer, format: format)
engine.connect(input, to: mixer, format: format)
engine.prepare()
}
func startRecord() {
start()
let format = mixer.outputFormat(forBus: 0)
let outputFileURL = URL(string: NSTemporaryDirectory() + "temp.caf")!
do {
outputFile = try AVAudioFile(forWriting: outputFileURL, settings: format.settings)
} catch {
print(error)
}
mixer.installTap(onBus: 0, bufferSize: 1024, format: format) { [weak self] buffer, when in
do {
try self?.outputFile.write(from: buffer)
print(buffer)
} catch {
print(error)
}
}
}
func stopRecord() {
mixer.removeTap(onBus: 0)
print(outputFile)
stop()
}
func startPlaying() {
start()
player.scheduleFile(outputFile, at: nil) {
print("complete")
}
player.play()
}
func pausePlaying() {
player.pause()
}
}
The problem isn't defaultToSpeaker but rather the mixer setup. You've got a mixer (that you install a tap on) with no output and also mainMixerNode.
Do you need both mixers? If so, you could connect mixer to mainMixer:
engine.connect(mixer, to: mainMixer, format: format)
or remove mixer and use mainMixer everywhere, or some 3rd option that doesn't produce feedback.

How to get Audio recording buffer data live?

I am working on get audio data from iPhone mic and send it to socket, I already try AVAudioEngine to get audio buffer but some how its not woking. so can you please suggest me what it better way to get recording buffer data in live.
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
SocketIOManager.sharedInstance.socket.on("listen") {data, ack in
let BuffurData:Data = data[0] as! Data
// let playData = self?.audioBufferToNSData(PCMBuffer: BuffurData as! AVAudioPCMBuffer)
do {
// let data = NSData(bytes: &BuffurData, length: BuffurData.count)
let player = try AVAudioPlayer(data:BuffurData)
player.play()
} catch let error as NSError {
print(error.description)
}
print("socket connected \(data)")
}
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize:4096, format:format, block: { [weak self] buffer, when in
guard let this = self else {
return
}
// writing to file: for testing purposes only
do {
try this.file!.write(from: buffer)
} catch {
}
if let channel1Buffer = buffer.floatChannelData?[0] {
let test = self?.copyAudioBufferBytes(buffer)
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!);
// socket.on("listen", function (data)
/*! #property floatChannelData
#abstract Access the buffer's float audio samples.
#discussion
floatChannelData returns pointers to the buffer's audio samples if the buffer's format is
32-bit float, or nil if it is another format.
The returned pointer is to format.channelCount pointers to float. Each of these pointers
is to "frameLength" valid samples, which are spaced by "stride" samples.
If format.interleaved is false (as with the standard deinterleaved float format), then
the pointers will be to separate chunks of memory. "stride" is 1.
If format.interleaved is true, then the pointers will refer into the same chunk of interleaved
samples, each offset by 1 frame. "stride" is the number of interleaved channels.
*/
// #TODO: send data, better to pass into separate queue for processing
}
})
engine.prepare()
do {
try engine.start()
} catch {
// #TODO: error out
}
}
Try this code:
var audioPlayerQueue = DispatchQueue(label: "audioPlayerQueue", qos: DispatchQoS.userInteractive)
var peerAudioPlayer: AVAudioPlayerNode = AVAudioPlayerNode()
var peerInputFormat: AVAudioFormat?
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
engine.attach(self.peerAudioPlayer)
self.peerInputFormat = AVAudioFormat.init(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false)
self.peerAudioEngine.connect(peerAudioPlayer, to: self.peerAudioEngine.mainMixerNode, format: peerInput?.outputFormat(forBus: 0))
do {
peerAudioEngine.prepare()
try peerAudioEngine.start()
} catch let error {
print(error.localizedDescription)
}
SocketIOManager.sharedInstance.socket.on("listen") { data, ack in
let pcmBuffer = toPCMBuffer(data: data)
self.audioPlayerQueue.async {
self.peerAudioPlayer.scheduleBuffer(pcmBuffer, completionHandler: nil)
if self.peerAudioEngine.isRunning {
self.peerAudioPlayer.play()
} else {
do {
try self.peerAudioEngine.start()
} catch {
print(error.localizedDescription)
}
}
}
}
print("socket connected \(data)")
}
func toPCMBuffer(data: NSData) -> AVAudioPCMBuffer {
let audioFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false) // given NSData audio format
let PCMBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(data.length) / audioFormat.streamDescription.pointee.mBytesPerFrame)
PCMBuffer.frameLength = PCMBuffer.frameCapacity
let channels = UnsafeBufferPointer(start: PCMBuffer.floatChannelData, count: Int(PCMBuffer.format.channelCount))
data.getBytes(UnsafeMutableRawPointer(channels[0]) , length: data.length)
return PCMBuffer
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize: 4410, format: format, block: { (buffer: AVAudioPCMBuffer, AVAudioTime) in
guard let this = self else {
return
}
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!)
})
do {
engine.prepare()
try engine.start()
} catch {
// #TODO: error out
}
}
// **Edit: For Enable Lound Speaker**
func speakerEnabled(_ enabled:Bool) -> Bool {
let session = AVAudioSession.sharedInstance()
var options = session.categoryOptions
if (enabled) {
options.insert(.defaultToSpeaker)
} else {
options.remove(.defaultToSpeaker)
}
try! session.setCategory(AVAudioSessionCategoryPlayAndRecord, with: options)
return true
}

EZAudio doesnt work: Thread1 EXC_BAD_ACCESS while creating EZRecorder instance

My complete implementation of EZAudio:
class ViewController: UIViewController, EZMicrophoneDelegate, EZRecorderDelegate {
#IBOutlet var recordingAudioPlot: EZAudioPlot!
private var isRecording = false {
didSet {
if isRecording {
player.pause()
recordingAudioPlot.clear()
microphone.startFetchingAudio()
recorder = EZRecorder(url: filePathUrl(), clientFormat: microphone.audioStreamBasicDescription(), fileType: EZRecorderFileType.M4A, delegate: self)
// ** Here is where the error occurs **
} else {
recorder.delegate = nil
microphone.stopFetchingAudio()
recorder.closeAudioFile()
player.playAudioFile(EZAudioFile(url: filePathUrl()))
}
}
}
private var microphone = EZMicrophone()
private var recorder = EZRecorder()
private var player = EZAudioPlayer()
#IBAction func startStopRecordingButtonTapped(_ sender: UIButton) {
isRecording = !isRecording
}
override func viewDidLoad() {
super.viewDidLoad()
let session = AVAudioSession.sharedInstance()
try! session.setCategory(AVAudioSessionCategoryPlayAndRecord)
try! session.setActive(true)
microphone.delegate = self
try! session.overrideOutputAudioPort(.speaker)
}
func microphone(_ microphone: EZMicrophone!, hasAudioReceived buffer: UnsafeMutablePointer<UnsafeMutablePointer<Float>?>!, withBufferSize bufferSize: UInt32, withNumberOfChannels numberOfChannels: UInt32) {
DispatchQueue.main.async {
self.recordingAudioPlot.updateBuffer(buffer[0], withBufferSize: bufferSize)
}
}
func microphone(_ microphone: EZMicrophone!, hasBufferList bufferList: UnsafeMutablePointer<AudioBufferList>!, withBufferSize bufferSize: UInt32, withNumberOfChannels numberOfChannels: UInt32) {
if isRecording {
recorder.appendData(from: bufferList, withBufferSize: bufferSize)
}
}
private func filePathUrl() -> URL {
let path = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).first ?? ""
return URL(fileURLWithPath: String(format: "%#/%#", path, "pathtofile.m4a"))
}
}
The error is following:
What goes wrong?
The solution is to declare recorder as optional type, not an instance:
private var recorder: EZRecorder?
Something happens when first time it tries to deallocate first initialized recorder... but now there is nil so the error doesn't exist anymore.

Resources