How to get Audio recording buffer data live? - ios

I am working on get audio data from iPhone mic and send it to socket, I already try AVAudioEngine to get audio buffer but some how its not woking. so can you please suggest me what it better way to get recording buffer data in live.
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
SocketIOManager.sharedInstance.socket.on("listen") {data, ack in
let BuffurData:Data = data[0] as! Data
// let playData = self?.audioBufferToNSData(PCMBuffer: BuffurData as! AVAudioPCMBuffer)
do {
// let data = NSData(bytes: &BuffurData, length: BuffurData.count)
let player = try AVAudioPlayer(data:BuffurData)
player.play()
} catch let error as NSError {
print(error.description)
}
print("socket connected \(data)")
}
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize:4096, format:format, block: { [weak self] buffer, when in
guard let this = self else {
return
}
// writing to file: for testing purposes only
do {
try this.file!.write(from: buffer)
} catch {
}
if let channel1Buffer = buffer.floatChannelData?[0] {
let test = self?.copyAudioBufferBytes(buffer)
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!);
// socket.on("listen", function (data)
/*! #property floatChannelData
#abstract Access the buffer's float audio samples.
#discussion
floatChannelData returns pointers to the buffer's audio samples if the buffer's format is
32-bit float, or nil if it is another format.
The returned pointer is to format.channelCount pointers to float. Each of these pointers
is to "frameLength" valid samples, which are spaced by "stride" samples.
If format.interleaved is false (as with the standard deinterleaved float format), then
the pointers will be to separate chunks of memory. "stride" is 1.
If format.interleaved is true, then the pointers will refer into the same chunk of interleaved
samples, each offset by 1 frame. "stride" is the number of interleaved channels.
*/
// #TODO: send data, better to pass into separate queue for processing
}
})
engine.prepare()
do {
try engine.start()
} catch {
// #TODO: error out
}
}

Try this code:
var audioPlayerQueue = DispatchQueue(label: "audioPlayerQueue", qos: DispatchQoS.userInteractive)
var peerAudioPlayer: AVAudioPlayerNode = AVAudioPlayerNode()
var peerInputFormat: AVAudioFormat?
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
engine.attach(self.peerAudioPlayer)
self.peerInputFormat = AVAudioFormat.init(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false)
self.peerAudioEngine.connect(peerAudioPlayer, to: self.peerAudioEngine.mainMixerNode, format: peerInput?.outputFormat(forBus: 0))
do {
peerAudioEngine.prepare()
try peerAudioEngine.start()
} catch let error {
print(error.localizedDescription)
}
SocketIOManager.sharedInstance.socket.on("listen") { data, ack in
let pcmBuffer = toPCMBuffer(data: data)
self.audioPlayerQueue.async {
self.peerAudioPlayer.scheduleBuffer(pcmBuffer, completionHandler: nil)
if self.peerAudioEngine.isRunning {
self.peerAudioPlayer.play()
} else {
do {
try self.peerAudioEngine.start()
} catch {
print(error.localizedDescription)
}
}
}
}
print("socket connected \(data)")
}
func toPCMBuffer(data: NSData) -> AVAudioPCMBuffer {
let audioFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false) // given NSData audio format
let PCMBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(data.length) / audioFormat.streamDescription.pointee.mBytesPerFrame)
PCMBuffer.frameLength = PCMBuffer.frameCapacity
let channels = UnsafeBufferPointer(start: PCMBuffer.floatChannelData, count: Int(PCMBuffer.format.channelCount))
data.getBytes(UnsafeMutableRawPointer(channels[0]) , length: data.length)
return PCMBuffer
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize: 4410, format: format, block: { (buffer: AVAudioPCMBuffer, AVAudioTime) in
guard let this = self else {
return
}
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!)
})
do {
engine.prepare()
try engine.start()
} catch {
// #TODO: error out
}
}
// **Edit: For Enable Lound Speaker**
func speakerEnabled(_ enabled:Bool) -> Bool {
let session = AVAudioSession.sharedInstance()
var options = session.categoryOptions
if (enabled) {
options.insert(.defaultToSpeaker)
} else {
options.remove(.defaultToSpeaker)
}
try! session.setCategory(AVAudioSessionCategoryPlayAndRecord, with: options)
return true
}

Related

crash App iOS flutter when I use shazamkit package

I am using shazamkit package to recognize sound in flutter. android version works perfect but in iOS version when I start to use get this error:
ERROR: [0x190bf000] >avae> AVAudioNode.mm:568: CreateRecordingTap: required condition is false: IsFormatSampleRateAndChannelCountValid(format)
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: IsFormatSampleRateAndChannelCountValid(format)'
this is my swift code :
import Flutter
import UIKit
import ShazamKit
import AudioToolbox
public class SwiftFlutterShazamKitPlugin: NSObject, FlutterPlugin {
private var session: SHSession?
private let audioEngine = AVAudioEngine()
private let playerNode = AVAudioPlayerNode()
private let mixerNode = AVAudioMixerNode()
private var callbackChannel: FlutterMethodChannel?
private var sampleRate = 44800
public static func register(with registrar: FlutterPluginRegistrar) {
let channel = FlutterMethodChannel(name: "flutter_shazam_kit", binaryMessenger: registrar.messenger())
let instance = SwiftFlutterShazamKitPlugin(callbackChannel: FlutterMethodChannel(name: "flutter_shazam_kit_callback", binaryMessenger: registrar.messenger()))
registrar.addMethodCallDelegate(instance, channel: channel)
}
init(callbackChannel: FlutterMethodChannel? = nil) {
self.callbackChannel = callbackChannel
}
public func handle(_ call: FlutterMethodCall, result: #escaping FlutterResult) {
switch call.method {
case "configureShazamKitSession":
let args = call.arguments as! Dictionary<String, Any>
configureShazamKitSession(
customCatalogPath: args["customCatalogPath"] as? String,
sampleRate: args["sampleRate"] as! Int
)
result(nil)
case "startDetectionWithMicrophone":
do{
configureAudio()
try startListening(result: result)
}catch{
callbackChannel?.invokeMethod("didHasError", arguments: error.localizedDescription)
}
case "endDetectionWithMicrophone":
stopListening()
result(nil)
case "endSession":
session = nil
result(nil)
default:
result(nil)
}
}
}
//MARK: ShazamKit session delegation here
//MARK: Methods for AVAudio
extension SwiftFlutterShazamKitPlugin {
func configureShazamKitSession(customCatalogPath: String?, sampleRate: Int) {
self.sampleRate = sampleRate
do {
if session == nil {
if (customCatalogPath == nil) {
session = SHSession()
} else {
let documentsUrl = FileManager.default.urls(
for: .documentDirectory,
in: .userDomainMask
).first!
let catalog = SHCustomCatalog()
try catalog.add(from: URL(fileURLWithPath: customCatalogPath!))
session = SHSession(catalog: catalog)
}
session?.delegate = self
}
} catch let error {
callbackChannel?.invokeMethod("didHasError",
arguments: "configureShazamKitSession() failed")
}
}
func addAudio(buffer: AVAudioPCMBuffer, audioTime: AVAudioTime) {
// Add the audio to the current match request
session?.matchStreamingBuffer(buffer, at: audioTime)
}
func configureAudio() {
playerNode.stop()
audioEngine.stop()
let inputFormat = audioEngine.inputNode.inputFormat(forBus: 0)
// Set an output format compatible with ShazamKit.
let outputFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1)
// Create a mixer node to convert the input.
audioEngine.attach(mixerNode)
// Attach the mixer to the microphone input and the output of the audio engine.
audioEngine.connect(audioEngine.inputNode, to: mixerNode, format: inputFormat)
// audioEngine.connect(mixerNode, to: audioEngine.outputNode, format: outputFormat)
// Install a tap on the mixer node to capture the microphone audio.
mixerNode.installTap(onBus: 0,
bufferSize: 8192,
format: outputFormat) { buffer, audioTime in
// Add captured audio to the buffer used for making a match.
self.addAudio(buffer: buffer, audioTime: audioTime)
}
}
func startListening(result: FlutterResult) throws {
guard session != nil else{
callbackChannel?.invokeMethod("didHasError", arguments: "ShazamSession not found, please call configureShazamKitSession() first to initialize it.")
result(nil)
return
}
callbackChannel?.invokeMethod("detectStateChanged", arguments: 1)
// Throw an error if the audio engine is already running.
guard !audioEngine.isRunning else {
callbackChannel?.invokeMethod("didHasError", arguments: "Audio engine is currently running, please stop the audio engine first and then try again")
return
}
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
return false
}
// Ask the user for permission to use the mic if required then start the engine.
try audioSession.setCategory(.playAndRecord)
audioSession.requestRecordPermission { [weak self] success in
guard success else {
self?.callbackChannel?.invokeMethod("didHasError", arguments: "Recording permission not found, please allow permission first and then try again")
return
}
do{
self?.audioEngine.prepare()
try self?.audioEngine.start()
}catch{
self?.callbackChannel?.invokeMethod("didHasError", arguments: "Can't start the audio engine")
}
}
result(nil)
}
func stopListening() {
callbackChannel?.invokeMethod("detectStateChanged", arguments: 0)
// Check if the audio engine is already recording.
mixerNode.removeTap(onBus: 0)
audioEngine.stop()
}
}
//MARK: Delegate methods for SHSession
extension SwiftFlutterShazamKitPlugin: SHSessionDelegate{
public func session(_ session: SHSession, didFind match: SHMatch) {
var mediaItems: [[String: Any]] = []
match.mediaItems.forEach { rawItem in
var item: [String : Any] = [:]
var count: UInt32 = 0
let properties = class_copyPropertyList(class_getSuperclass(rawItem.classForCoder), &count)
for i in 0..<count {
guard let property = properties?[Int(i)] else { continue }
let name = String(cString: property_getName(property))
if (name == "properties") {
let props = rawItem.value(forKey: name) as! NSDictionary
for property in props.allKeys {
let prop = property as! String
var val = props.value(forKey: prop)!
if (String(describing: type(of: val)) == "__NSTaggedDate") {
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSZ"
val = dateFormatter.string(from: val as! Date)
}
item[prop] = val
}
}
}
mediaItems.append(item)
free(properties)
}
do {
let jsonData = try JSONSerialization.data(withJSONObject: mediaItems)
let jsonString = String(data: jsonData, encoding: .utf8)
self.callbackChannel?.invokeMethod("matchFound", arguments: jsonString)
} catch {
callbackChannel?.invokeMethod("didHasError", arguments: "Error when trying to format data, please try again")
}
}
public func session(_ session: SHSession, didNotFindMatchFor signature: SHSignature, error: Error?) {
callbackChannel?.invokeMethod("notFound", arguments: nil)
callbackChannel?.invokeMethod("didHasError", arguments: error?.localizedDescription)
}
}
Your help is greatly appreciated
https://pub.dev/packages/flutter_shazam_kit

-50 error when converting PCM buffer with AVAudioConverter

I'm trying to convert a AVAudioPCMBuffer with a 44100 sample rate to one with a 48000 sample rate, but I always get an exception (-50 error) when converting. Here's the code:
guard let deviceFormat = AVAudioFormat(standardFormatWithSampleRate: 48000.0, channels: 1) else {
preconditionFailure()
}
// This file is saved as mono 44100
guard let lowToneURL = Bundle.main.url(forResource: "Tone220", withExtension: "wav") else {
preconditionFailure()
}
guard let audioFile = try? AVAudioFile(forReading: lowToneURL) else {
preconditionFailure()
}
let tempBuffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat,
frameCapacity: AVAudioFrameCount(audioFile.length))!
tempBuffer.frameLength = tempBuffer.frameCapacity
do { try audioFile.read(into: tempBuffer) } catch {
assertionFailure("*** Caught: \(error)")
}
guard let converter = AVAudioConverter(from: audioFile.processingFormat, to: deviceFormat) else {
preconditionFailure()
}
guard let convertedBuffer = AVAudioPCMBuffer(pcmFormat: deviceFormat,
frameCapacity: AVAudioFrameCount(audioFile.length)) else {
preconditionFailure()
}
convertedBuffer.frameLength = tempBuffer.frameCapacity
do { try converter.convert(to: convertedBuffer, from: tempBuffer) } catch {
assertionFailure("*** Caught: \(error)")
}
Any ideas?
An Apple engineer answered this on their dev forums. I missed that the convert(to:from:) variant of AVAudioConverter can't convert sample rate so you have to use the withInputFrom variant. The docs on that aren't too clear but I came up with:
private func pcmBufferForFile(filename: String, sampleRate: Float) -> AVAudioPCMBuffer {
guard let newFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1) else {
preconditionFailure()
}
guard let url = Bundle.main.url(forResource: filename, withExtension: "wav") else {
preconditionFailure()
}
guard let audioFile = try? AVAudioFile(forReading: url) else {
preconditionFailure()
}
guard let tempBuffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat,
frameCapacity: AVAudioFrameCount(audioFile.length)) else {
preconditionFailure()
}
let conversionRatio = sampleRate / Float(tempBuffer.format.sampleRate)
let newLength = Float(audioFile.length) * conversionRatio
guard let newBuffer = AVAudioPCMBuffer(pcmFormat: newFormat,
frameCapacity: AVAudioFrameCount(newLength)) else {
preconditionFailure()
}
do { try audioFile.read(into: tempBuffer) } catch {
preconditionFailure()
}
guard let converter = AVAudioConverter(from: audioFile.processingFormat, to: newFormat) else {
preconditionFailure()
}
var error: NSError?
converter.convert(to: newBuffer, error: &error, withInputFrom: { (packetCount, statusPtr) -> AVAudioBuffer? in
statusPtr.pointee = .haveData
return tempBuffer
})
if error != nil {
print("*** Conversion error: \(error!)")
}
return newBuffer
}

WebRTC iOS: Record Remote Audio stream using WebRTC

I am working on an audio streaming application with recording functionality for a receiver.
I got stuck at the point where the user want to record audio stream on the receiver side.
Below is my code
Initialisation
var engine = AVAudioEngine()
var recordingFile: AVAudioFile?
var audioPlayer: AVAudioPlayer?
let player = AVAudioPlayerNode()
var isRecording: Bool = false
Initialise AudioEngine
func initializeAudioEngine() {
let input = self.engine.inputNode
let format = input.inputFormat(forBus: 0)
self.engine.attach(self.player)
let mainMixerNode = self.engine.mainMixerNode
self.engine.connect(input, to:mainMixerNode, format: format)
self.engine.prepare()
do {
try self.engine.start()
self.startRecording()
} catch (let error) {
print("START FAILED", error)
}
}
Start Recording
func startRecording() {
self.createRecordingFile()
self.engine.mainMixerNode.installTap(onBus: 0,
bufferSize: 1024,
format: self.engine.mainMixerNode.outputFormat(forBus: 0)) { (buffer, time) -> Void in
do {
self.isRecording = true
try self.recordingFile?.write(from: buffer)
} catch (let error) {
print("RECORD ERROR", error);
}
return
}
}
Create Buffer
private func createBuffer(forFileNamed fileName: String) -> AVAudioPCMBuffer? {
var res: AVAudioPCMBuffer?
if let fileURL = Bundle.main.url(forResource: fileName, withExtension: "caf") {
do {
let file = try AVAudioFile(forReading: fileURL)
res = AVAudioPCMBuffer(pcmFormat: file.processingFormat, frameCapacity:AVAudioFrameCount(file.length))
if let _ = res {
do {
try file.read(into: res!)
} catch (let error) {
print("ERROR read file", error)
}
}
} catch (let error) {
print("ERROR file creation", error)
}
}
return res
}
Stop Recording
func stopRecording() {
self.engine.mainMixerNode.removeTap(onBus: 0)
}
I am trying to record using earphone, but It's not working
Its will work because once you setup
let audiosession = AVAudioSession()
As AVAudioSessionCategoryPlayAndRecord and set
audiosession.setActive(true)
It will start recording whichever audio dump to device.
WebRTC does not have any Internal API to start or stop recording.
We can try using AVAudioSession instead.
First setUp Audio session
func setUPAudioSession() -> Bool {
let audiosession = AVAudioSession()
do {
try audiosession.setCategory(AVAudioSessionCategoryPlayAndRecord)
} catch(let error) {
print("--> \(error.localizedDescription)")
}
do {
try audiosession.setActive(true)
} catch (let error) {
print("--> \(error.localizedDescription)")
}
return audiosession.isInputAvailable;
}
After setUp the audio session now start recording as below
func startRecording() -> Bool {
var settings: [String: Any] = [String: String]()
settings[AVFormatIDKey] = kAudioFormatLinearPCM
settings[AVSampleRateKey] = 8000.0
settings[AVNumberOfChannelsKey] = 1
settings[AVLinearPCMBitDepthKey] = 16
settings[AVLinearPCMIsBigEndianKey] = false
settings[AVLinearPCMIsFloatKey] = false
settings[AVAudioQualityMax] = AVEncoderAudioQualityKey
//Create device directory where recorded file will be save automatically
let searchPaths: [String] = NSSearchPathForDirectoriesInDomains(.documentDirectory, .allDomainsMask, true)
let documentPath_ = searchPaths.first
let pathToSave = "\(documentPath_)/\(dateString)"
let url: URL = URL(pathToSave)
recorder = try? AVAudioRecorder(url: url, settings: settings)
// Initialize degate, metering, etc.
recorder.delegate = self;
recorder.meteringEnabled = true;
recorder?.prepareToRecord()
if let recordIs = recorder {
return recordIs.record()
}
return false
}
Play recorded file
func playrecodingFile() {
//Get the path of recorded file saved in previous method
let searchPaths: [String] = NSSearchPathForDirectoriesInDomains(.documentDirectory, .allDomainsMask, true)
let documentPath_ = searchPaths.first
let fileManager = FileManager.default
let arrayListOfRecordSound: [String]
if fileManager.fileExists(atPath: recordingFolder()) {
let arrayListOfRecordSound = try? fileManager.contentsOfDirectory(atPath: documentPath_)
}
let selectedSound = "\(documentPath_)/\(arrayListOfRecordSound.first)"
let url = URL.init(fileURLWithPath: selectedSound)
let player = try? AVAudioPlayer(contentsOf: url)
player?.delegate = self;
try? AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)
player?.prepareToPlay()
player?.play()
}
Stop recording
func stopRecording() {
recorder?.stop()
}
pauseRecording
func pauseRecording() {
recorder?.pause()
}
Stop recording
func stopRecording() {
recorder?.stop()
}

I can't record using AVAudioEngine

AVAudioSession.sharedInstance().setCategory(.playAndRecord, mode: .default)
I can record using the above code.
However, during playback, only the speakers at the top of the iPhone produce sound.
So I would like to add defaultToSpeaker to options.
However, adding it does not record.
Is there a solution?
Please help me.
Here is the code I wrote.
final class RecordEngine {
private var engine: AVAudioEngine!
private var mixer: AVAudioMixerNode!
private var player: AVAudioPlayerNode!
private var outputFile: AVAudioFile!
let session = AVAudioSession.sharedInstance()
init() {
prepareAVAudioSession()
prepareNodes()
prepare()
}
func start() {
try! engine.start()
}
func stop() {
engine.pause()
engine.reset()
}
private func prepareAVAudioSession() {
do {
// try session.setCategory(.playAndRecord, mode: .default, options: [.allowBluetooth, .allowBluetoothA2DP])
try session.setCategory(.playAndRecord, mode: .default, options: .defaultToSpeaker)
try session.setActive(true)
} catch {
}
}
private func prepareNodes() {
engine = AVAudioEngine()
mixer = AVAudioMixerNode()
player = AVAudioPlayerNode()
engine.attach(mixer)
engine.attach(player)
}
private func prepare() {
let input = engine.inputNode
let mainMixer = engine.mainMixerNode
let format = input.outputFormat(forBus: 0)
engine.connect(player, to: mainMixer, format: format)
engine.connect(input, to: mixer, format: format)
engine.prepare()
}
func startRecord() {
start()
let format = mixer.outputFormat(forBus: 0)
let outputFileURL = URL(string: NSTemporaryDirectory() + "temp.caf")!
do {
outputFile = try AVAudioFile(forWriting: outputFileURL, settings: format.settings)
} catch {
print(error)
}
mixer.installTap(onBus: 0, bufferSize: 1024, format: format) { [weak self] buffer, when in
do {
try self?.outputFile.write(from: buffer)
print(buffer)
} catch {
print(error)
}
}
}
func stopRecord() {
mixer.removeTap(onBus: 0)
print(outputFile)
stop()
}
func startPlaying() {
start()
player.scheduleFile(outputFile, at: nil) {
print("complete")
}
player.play()
}
func pausePlaying() {
player.pause()
}
}
The problem isn't defaultToSpeaker but rather the mixer setup. You've got a mixer (that you install a tap on) with no output and also mainMixerNode.
Do you need both mixers? If so, you could connect mixer to mainMixer:
engine.connect(mixer, to: mainMixer, format: format)
or remove mixer and use mainMixer everywhere, or some 3rd option that doesn't produce feedback.

MCSessionDelegate - Getting No Bytes When Receiving Stream

i'm pretty new to Swift/iOS, just started learning that a couple days ago. I'm using Swift 3 and want to develop two iPhone apps that can send audio stream from microphone to other iPhone devices using Multi-peer Connectivity. The first app would be the speaker's app and the other would be the listener's app.
Previously, I learned how to advertise, browse, and invite peers from this useful tutorial
and I learned how to get audio data from microphone and convert them to bytes from this answer and this answer. Thanks a lot to Rhythmic Fistman.
So, my code is combination of what those articles include.
This is ViewController of listener app
import UIKit
import MultipeerConnectivity
import AVFoundation
class ColorSwitchViewController: UIViewController {
#IBOutlet weak var connectionsLabel: UILabel!
let colorService = ColorServiceManager()
var engine = AVAudioEngine()
let player = AVAudioPlayerNode()
// Somewhere, schedule the stream in the mainRunLoop, set the delegate and open it. Choose the peer that you want to connect
var inputStream = InputStream()
var inputStreamIsSet: Bool!
var outputStreamIsSet: Bool!
public let peerID = MCPeerID(displayName: UIDevice.current.name)
//MARK: Private Functions
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func copyAudioBufferBytes(_ audioBuffer: AVAudioPCMBuffer) -> [UInt8] {
let srcLeft = audioBuffer.floatChannelData![0]
let bytesPerFrame = audioBuffer.format.streamDescription.pointee.mBytesPerFrame
let numBytes = Int(bytesPerFrame * audioBuffer.frameLength)
// initialize bytes to 0 (how to avoid?)
var audioByteArray = [UInt8] (repeating: 0, count: numBytes)
// copy data from buffer
srcLeft.withMemoryRebound(to: UInt8.self, capacity: numBytes) { srcByteData in
audioByteArray.withUnsafeMutableBufferPointer {
$0.baseAddress!.initialize(from: srcByteData, count: numBytes)
}
}
return audioByteArray
}
func bytesToAudioBuffer(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let fmt = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
let frameLength = UInt32(buf.count) / fmt.streamDescription.pointee.mBytesPerFrame
let audioBuffer = AVAudioPCMBuffer(pcmFormat: fmt, frameCapacity: frameLength)
audioBuffer.frameLength = frameLength
let dstLeft = audioBuffer.floatChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Float.self, capacity: Int(frameLength))
dstLeft.initialize(from: src, count: Int(frameLength))
}
return audioBuffer
}
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
}
func change(color : UIColor) {
UIView.animate(withDuration: 0.2) {
self.view.backgroundColor = color
}
}
}
extension ColorSwitchViewController : ColorServiceManagerDelegate {
func connectedDevicesChanged(manager: ColorServiceManager, connectedDevices: [String]) {
OperationQueue.main.addOperation {
self.connectionsLabel.text = "Connections: \(connectedDevices)"
}
}
func colorChanged(manager: ColorServiceManager, colorString: String) {
OperationQueue.main.addOperation {
switch colorString {
case "red":
self.change(color: .red)
case "yellow":
self.change(color: .yellow)
default:
NSLog("%#", "Unknown color value received: \(colorString)")
}
}
}
func streamReceived(manager: ColorServiceManager, stream: InputStream, streamName: String, fromPeer: MCPeerID) {
NSLog("%#", "name " + fromPeer.displayName)
if streamName == "stream" && fromPeer != peerID {
NSLog("%#", "voice received")
stream.schedule(in: RunLoop.current, forMode: .defaultRunLoopMode)
stream.open()
var bytes = [UInt8](repeating: 0, count: 17640)
if (stream.hasBytesAvailable == true) {
NSLog("%#", "has bytes available...")
} else {
NSLog("%#", "has NO byte ...")
}
let result = stream.read(&bytes, maxLength: bytes.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: stream.streamError))")
} else {
print("The number of bytes read is \(result)")
}
let audioBuffer = self.bytesToAudioBuffer(bytes) //Here is where the app crashes
engine.attach(player)
let outputFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
engine.connect(player, to: engine.mainMixerNode, format: outputFormat)
do {
try engine.start()
player.scheduleBuffer(audioBuffer, completionHandler: nil)
player.play()
} catch let error {
print(error.localizedDescription)
}
}
}
}
And the ViewController of the speaker app is similar except that it contains code for sending the stream and doesn't contain code for receiving
// ....
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
func startStream() {
let input = engine.inputNode!
engine.attach(player)
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
engine.connect(player, to: engine.mainMixerNode, format: inputFormat)
NSLog("%#", "sendStream: to \(self.colorService.session.connectedPeers.count) peers")
if self.colorService.session.connectedPeers.count > 0 {
do {
let outputStream = try self.colorService.session.startStream(withName: "stream", toPeer: self.colorService.session.connectedPeers.first!)
outputStream.schedule(in: RunLoop.main, forMode:RunLoopMode.defaultRunLoopMode)
outputStream.open()
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
input.installTap(onBus: bus, bufferSize: 2048, format: inputFormat, block: {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
self.player.scheduleBuffer(buffer)
let audioBuffer = self.copyAudioBufferBytes(buffer)
// NSLog("%#", "speaking...")
let result = outputStream.write(audioBuffer, maxLength: audioBuffer.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: outputStream.streamError))")
} else {
print("The number of bytes written is \(result)")
}
})
try! engine.start()
player.play()
}
catch let error {
NSLog("%#", "Error for sending: \(error)")
}
}
}
func stopStream() {
engine.inputNode?.removeTap(onBus: 0)
player.stop()
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
self.startStream()
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
self.stopStream()
}
// ...
Unfortunately, on the listener side, i get the app receiving the stream with no bytes available. NSLog("%#", "has NO byte ...") was called. I wonder if the listener app really receive the audio stream or not.
So, What's my mistake here? Any helps would be appreciated. Thank you in advance.

Resources