Terminating app due to uncaught exception
'com.apple.coreaudio.avfaudio', reason: 'required condition is false:
_engine->IsRunning()'
Getting crash in iphone 13 mini
public class SwiftSoundStreamPlugin: NSObject, FlutterPlugin {
private var channel: FlutterMethodChannel
private var registrar: FlutterPluginRegistrar
private var hasPermission: Bool = false
private var debugLogging: Bool = false
//========= Recorder's vars
private let mAudioEngine = AVAudioEngine()
private let mRecordBus = 0
private var mInputNode: AVAudioInputNode
private var mRecordSampleRate: Double = 16000 // 16Khz
private var mRecordBufferSize: AVAudioFrameCount = 8192
private var mRecordChannel = 0
private var mRecordSettings: [String:Int]!
private var mRecordFormat: AVAudioFormat!
//========= Player's vars
private let PLAYER_OUTPUT_SAMPLE_RATE: Double = 44100 // 32Khz
private let mPlayerBus = 0
private let mPlayerNode = AVAudioPlayerNode()
private var mPlayerSampleRate: Double = 44100 // 16Khz
private var mPlayerBufferSize: AVAudioFrameCount = 127000
private var mPlayerOutputFormat: AVAudioFormat!
private var mPlayerInputFormat: AVAudioFormat!
/** ======== Basic Plugin initialization ======== **/
public static func register(with registrar: FlutterPluginRegistrar) {
let channel = FlutterMethodChannel(name: "vn.casperpas.sound_stream:methods", binaryMessenger: registrar.messenger())
let instance = SwiftSoundStreamPlugin( channel, registrar: registrar)
registrar.addMethodCallDelegate(instance, channel: channel)
}
init( _ channel: FlutterMethodChannel, registrar: FlutterPluginRegistrar ) {
self.channel = channel
self.registrar = registrar
self.mInputNode = mAudioEngine.inputNode
super.init()
self.attachPlayer()
mAudioEngine.prepare()
}
public func handle(_ call: FlutterMethodCall, result: #escaping FlutterResult) {
switch call.method {
case "hasPermission":
hasPermission(result)
case "initializeRecorder":
initializeRecorder(call, result)
case "startRecording":
startRecording(result)
case "stopRecording":
stopRecording(result)
case "setBufferSize":
setBufferSize(call, result)
case "initializePlayer":
initializePlayer(call, result)
case "startPlayer":
startPlayer(result)
case "stopPlayer":
stopPlayer(result)
case "writeChunk":
writeChunk(call, result)
default:
print("Unrecognized method: \(call.method)")
sendResult(result, FlutterMethodNotImplemented)
}
}
private func sendResult(_ result: #escaping FlutterResult, _ arguments: Any?) {
DispatchQueue.main.async {
result( arguments )
}
}
private func invokeFlutter( _ method: String, _ arguments: Any? ) {
DispatchQueue.main.async {
self.channel.invokeMethod( method, arguments: arguments )
}
}
/** ======== Plugin methods ======== **/
private func checkAndRequestPermission(completion callback: #escaping ((Bool) -> Void)) {
if (hasPermission) {
callback(hasPermission)
return
}
var permission: AVAudioSession.RecordPermission
#if swift(>=4.2)
permission = AVAudioSession.sharedInstance().recordPermission
#else
permission = AVAudioSession.sharedInstance().recordPermission()
#endif
switch permission {
case .granted:
print("granted")
hasPermission = true
callback(hasPermission)
break
case .denied:
print("denied")
hasPermission = false
callback(hasPermission)
break
case .undetermined:
print("undetermined")
AVAudioSession.sharedInstance().requestRecordPermission() { [unowned self] allowed in
if allowed {
self.hasPermission = true
print("undetermined true")
callback(self.hasPermission)
} else {
self.hasPermission = false
print("undetermined false")
callback(self.hasPermission)
}
}
break
default:
callback(hasPermission)
break
}
}
private func hasPermission( _ result: #escaping FlutterResult) {
checkAndRequestPermission { value in
self.sendResult(result, value)
}
}
private func startEngine() {
guard !mAudioEngine.isRunning else {
return
}
if mAudioEngine.outputNode.outputFormat(forBus: mPlayerBus).channelCount == 0 {
// if count is 0 then it throws a exception or crash
mAudioEngine.reset()
/*
or try changing object
mAudioEngine = AVAudioEngine()
*/
}
mAudioEngine.reset()
try? mAudioEngine.start()
}
private func stopEngine() {
mAudioEngine.stop()
mAudioEngine.reset()
}
private func sendEventMethod(_ name: String, _ data: Any) {
var eventData: [String: Any] = [:]
eventData["name"] = name
eventData["data"] = data
invokeFlutter("platformEvent", eventData)
}
private func initializeRecorder(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>
else {
sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
return
}
mRecordSampleRate = argsArr["sampleRate"] as? Double ?? mRecordSampleRate
debugLogging = argsArr["showLogs"] as? Bool ?? debugLogging
mRecordFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: mRecordSampleRate, channels: 2, interleaved: true)
checkAndRequestPermission { isGranted in
if isGranted {
self.sendRecorderStatus(SoundStreamStatus.Initialized)
self.sendResult(result, true)
} else {
self.sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
}
}
}
private func resetEngineForRecord() {
mAudioEngine.inputNode.removeTap(onBus: mRecordBus)
let input = mAudioEngine.inputNode
let inputFormat = input.outputFormat(forBus: mRecordBus)
let converter = AVAudioConverter(from: inputFormat, to: mRecordFormat!)!
let ratio: Float = Float(inputFormat.sampleRate)/Float(mRecordFormat.sampleRate)
input.installTap(onBus: mRecordBus, bufferSize: mRecordBufferSize, format: inputFormat) { (buffer, time) -> Void in
let inputCallback: AVAudioConverterInputBlock = { inNumPackets, outStatus in
outStatus.pointee = .haveData
return buffer
}
let convertedBuffer = AVAudioPCMBuffer(pcmFormat: self.mRecordFormat!, frameCapacity: UInt32(Float(buffer.frameCapacity) / ratio))!
var error: NSError?
let status = converter.convert(to: convertedBuffer, error: &error, withInputFrom: inputCallback)
assert(status != .error)
if (self.mRecordFormat?.commonFormat == AVAudioCommonFormat.pcmFormatInt16) {
let values = self.audioBufferToBytes(convertedBuffer)
self.sendMicData(values)
}
}
}
private func startRecording(_ result: #escaping FlutterResult) {
resetEngineForRecord()
startEngine()
sendRecorderStatus(SoundStreamStatus.Playing)
result(true)
}
private func stopRecording(_ result: #escaping FlutterResult) {
mAudioEngine.inputNode.removeTap(onBus: mRecordBus)
sendRecorderStatus(SoundStreamStatus.Stopped)
result(true)
}
private func sendMicData(_ data: [UInt8]) {
let channelData = FlutterStandardTypedData(bytes: NSData(bytes: data, length: data.count) as Data)
sendEventMethod("dataPeriod", channelData)
}
private func sendRecorderStatus(_ status: SoundStreamStatus) {
sendEventMethod("recorderStatus", status.rawValue)
}
private func setBufferSize(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>
else {
sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
return
}
mPlayerBufferSize = argsArr["bufferSize"] as? AVAudioFrameCount ?? mPlayerBufferSize
// result(true)
}
private func initializePlayer(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>
else {
sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
return
}
mPlayerSampleRate = argsArr["sampleRate"] as? Double ?? mPlayerSampleRate
debugLogging = argsArr["showLogs"] as? Bool ?? debugLogging
mPlayerInputFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: mPlayerSampleRate, channels: 1, interleaved: true)
sendPlayerStatus(SoundStreamStatus.Initialized)
}
private func attachPlayer() {
mPlayerOutputFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: PLAYER_OUTPUT_SAMPLE_RATE, channels: 1, interleaved: true)
mAudioEngine.attach(mPlayerNode)
mAudioEngine.connect(mPlayerNode, to: mAudioEngine.outputNode, format: mPlayerOutputFormat)
// mAudioEngine.connect(mPlayerNode, to: mAudioEngine.mainMixerNode, format: mPlayerOutputFormat)
}
private func startPlayer(_ result: #escaping FlutterResult) {
startEngine()
if !mPlayerNode.isPlaying {
mPlayerNode.play()
}
sendPlayerStatus(SoundStreamStatus.Playing)
result(true)
}
private func stopPlayer(_ result: #escaping FlutterResult) {
if mPlayerNode.isPlaying {
mPlayerNode.stop()
}
sendPlayerStatus(SoundStreamStatus.Stopped)
result(true)
}
private func sendPlayerStatus(_ status: SoundStreamStatus) {
sendEventMethod("playerStatus", status.rawValue)
}
private func writeChunk(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>,
let data = argsArr["data"] as? FlutterStandardTypedData
else {
sendResult(result, FlutterError( code: SoundStreamErrors.FailedToWriteBuffer.rawValue,
message:"Failed to write Player buffer",
details: nil ))
return
}
let byteData = [UInt8](data.data)
pushPlayerChunk(byteData, result)
}
private func pushPlayerChunk(_ chunk: [UInt8], _ result: #escaping FlutterResult) {
let buffer = bytesToAudioBuffer(chunk)
mPlayerNode.scheduleBuffer(convertBufferFormat(
buffer,
from: mPlayerInputFormat,
to: mPlayerOutputFormat
));
result(true)
}
private func convertBufferFormat(_ buffer: AVAudioPCMBuffer, from: AVAudioFormat, to: AVAudioFormat) -> AVAudioPCMBuffer {
let formatConverter = AVAudioConverter(from: from, to: to)
let ratio: Float = Float(from.sampleRate)/Float(to.sampleRate)
let pcmBuffer = AVAudioPCMBuffer(pcmFormat: to, frameCapacity: UInt32(Float(buffer.frameCapacity) / ratio))!
var error: NSError? = nil
let inputBlock: AVAudioConverterInputBlock = {inNumPackets, outStatus in
outStatus.pointee = .haveData
return buffer
}
formatConverter?.convert(to: pcmBuffer, error: &error, withInputFrom: inputBlock)
return pcmBuffer
}
private func audioBufferToBytes(_ audioBuffer: AVAudioPCMBuffer) -> [UInt8] {
let srcLeft = audioBuffer.int16ChannelData![0]
let bytesPerFrame = audioBuffer.format.streamDescription.pointee.mBytesPerFrame
let numBytes = Int(bytesPerFrame * audioBuffer.frameLength)
// initialize bytes by 0
var audioByteArray = [UInt8](repeating: 0, count: numBytes)
srcLeft.withMemoryRebound(to: UInt8.self, capacity: numBytes) { srcByteData in
audioByteArray.withUnsafeMutableBufferPointer {
$0.baseAddress!.initialize(from: srcByteData, count: numBytes)
}
}
return audioByteArray
}
private func bytesToAudioBuffer(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let frameLength = UInt32(buf.count) / mPlayerInputFormat.streamDescription.pointee.mBytesPerFrame
let audioBuffer = AVAudioPCMBuffer(pcmFormat: mPlayerInputFormat, frameCapacity: frameLength)!
audioBuffer.frameLength = frameLength
let dstLeft = audioBuffer.int16ChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Int16.self, capacity: Int(frameLength))
dstLeft.initialize(from: src, count: Int(frameLength))
}
return audioBuffer
}
}
Related
I am using shazamkit package to recognize sound in flutter. android version works perfect but in iOS version when I start to use get this error:
ERROR: [0x190bf000] >avae> AVAudioNode.mm:568: CreateRecordingTap: required condition is false: IsFormatSampleRateAndChannelCountValid(format)
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: IsFormatSampleRateAndChannelCountValid(format)'
this is my swift code :
import Flutter
import UIKit
import ShazamKit
import AudioToolbox
public class SwiftFlutterShazamKitPlugin: NSObject, FlutterPlugin {
private var session: SHSession?
private let audioEngine = AVAudioEngine()
private let playerNode = AVAudioPlayerNode()
private let mixerNode = AVAudioMixerNode()
private var callbackChannel: FlutterMethodChannel?
private var sampleRate = 44800
public static func register(with registrar: FlutterPluginRegistrar) {
let channel = FlutterMethodChannel(name: "flutter_shazam_kit", binaryMessenger: registrar.messenger())
let instance = SwiftFlutterShazamKitPlugin(callbackChannel: FlutterMethodChannel(name: "flutter_shazam_kit_callback", binaryMessenger: registrar.messenger()))
registrar.addMethodCallDelegate(instance, channel: channel)
}
init(callbackChannel: FlutterMethodChannel? = nil) {
self.callbackChannel = callbackChannel
}
public func handle(_ call: FlutterMethodCall, result: #escaping FlutterResult) {
switch call.method {
case "configureShazamKitSession":
let args = call.arguments as! Dictionary<String, Any>
configureShazamKitSession(
customCatalogPath: args["customCatalogPath"] as? String,
sampleRate: args["sampleRate"] as! Int
)
result(nil)
case "startDetectionWithMicrophone":
do{
configureAudio()
try startListening(result: result)
}catch{
callbackChannel?.invokeMethod("didHasError", arguments: error.localizedDescription)
}
case "endDetectionWithMicrophone":
stopListening()
result(nil)
case "endSession":
session = nil
result(nil)
default:
result(nil)
}
}
}
//MARK: ShazamKit session delegation here
//MARK: Methods for AVAudio
extension SwiftFlutterShazamKitPlugin {
func configureShazamKitSession(customCatalogPath: String?, sampleRate: Int) {
self.sampleRate = sampleRate
do {
if session == nil {
if (customCatalogPath == nil) {
session = SHSession()
} else {
let documentsUrl = FileManager.default.urls(
for: .documentDirectory,
in: .userDomainMask
).first!
let catalog = SHCustomCatalog()
try catalog.add(from: URL(fileURLWithPath: customCatalogPath!))
session = SHSession(catalog: catalog)
}
session?.delegate = self
}
} catch let error {
callbackChannel?.invokeMethod("didHasError",
arguments: "configureShazamKitSession() failed")
}
}
func addAudio(buffer: AVAudioPCMBuffer, audioTime: AVAudioTime) {
// Add the audio to the current match request
session?.matchStreamingBuffer(buffer, at: audioTime)
}
func configureAudio() {
playerNode.stop()
audioEngine.stop()
let inputFormat = audioEngine.inputNode.inputFormat(forBus: 0)
// Set an output format compatible with ShazamKit.
let outputFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1)
// Create a mixer node to convert the input.
audioEngine.attach(mixerNode)
// Attach the mixer to the microphone input and the output of the audio engine.
audioEngine.connect(audioEngine.inputNode, to: mixerNode, format: inputFormat)
// audioEngine.connect(mixerNode, to: audioEngine.outputNode, format: outputFormat)
// Install a tap on the mixer node to capture the microphone audio.
mixerNode.installTap(onBus: 0,
bufferSize: 8192,
format: outputFormat) { buffer, audioTime in
// Add captured audio to the buffer used for making a match.
self.addAudio(buffer: buffer, audioTime: audioTime)
}
}
func startListening(result: FlutterResult) throws {
guard session != nil else{
callbackChannel?.invokeMethod("didHasError", arguments: "ShazamSession not found, please call configureShazamKitSession() first to initialize it.")
result(nil)
return
}
callbackChannel?.invokeMethod("detectStateChanged", arguments: 1)
// Throw an error if the audio engine is already running.
guard !audioEngine.isRunning else {
callbackChannel?.invokeMethod("didHasError", arguments: "Audio engine is currently running, please stop the audio engine first and then try again")
return
}
let audioSession = AVAudioSession.sharedInstance()
do {
try audioSession.setCategory(AVAudioSession.Category.playAndRecord, mode: AVAudioSession.Mode.measurement, options: AVAudioSession.CategoryOptions.defaultToSpeaker)
try audioSession.setActive(true, options: .notifyOthersOnDeactivation)
} catch {
print("audioSession properties weren't set because of an error.")
delegate?.showFeedbackError(title: "Sorry", message: "Mic is busy")
return false
}
// Ask the user for permission to use the mic if required then start the engine.
try audioSession.setCategory(.playAndRecord)
audioSession.requestRecordPermission { [weak self] success in
guard success else {
self?.callbackChannel?.invokeMethod("didHasError", arguments: "Recording permission not found, please allow permission first and then try again")
return
}
do{
self?.audioEngine.prepare()
try self?.audioEngine.start()
}catch{
self?.callbackChannel?.invokeMethod("didHasError", arguments: "Can't start the audio engine")
}
}
result(nil)
}
func stopListening() {
callbackChannel?.invokeMethod("detectStateChanged", arguments: 0)
// Check if the audio engine is already recording.
mixerNode.removeTap(onBus: 0)
audioEngine.stop()
}
}
//MARK: Delegate methods for SHSession
extension SwiftFlutterShazamKitPlugin: SHSessionDelegate{
public func session(_ session: SHSession, didFind match: SHMatch) {
var mediaItems: [[String: Any]] = []
match.mediaItems.forEach { rawItem in
var item: [String : Any] = [:]
var count: UInt32 = 0
let properties = class_copyPropertyList(class_getSuperclass(rawItem.classForCoder), &count)
for i in 0..<count {
guard let property = properties?[Int(i)] else { continue }
let name = String(cString: property_getName(property))
if (name == "properties") {
let props = rawItem.value(forKey: name) as! NSDictionary
for property in props.allKeys {
let prop = property as! String
var val = props.value(forKey: prop)!
if (String(describing: type(of: val)) == "__NSTaggedDate") {
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSZ"
val = dateFormatter.string(from: val as! Date)
}
item[prop] = val
}
}
}
mediaItems.append(item)
free(properties)
}
do {
let jsonData = try JSONSerialization.data(withJSONObject: mediaItems)
let jsonString = String(data: jsonData, encoding: .utf8)
self.callbackChannel?.invokeMethod("matchFound", arguments: jsonString)
} catch {
callbackChannel?.invokeMethod("didHasError", arguments: "Error when trying to format data, please try again")
}
}
public func session(_ session: SHSession, didNotFindMatchFor signature: SHSignature, error: Error?) {
callbackChannel?.invokeMethod("notFound", arguments: nil)
callbackChannel?.invokeMethod("didHasError", arguments: error?.localizedDescription)
}
}
Your help is greatly appreciated
https://pub.dev/packages/flutter_shazam_kit
We are trying to disable the microphone access for WebRTCClient class to use Receive-only Video & Audio for the display ReplayKit render Remote Video & Audio.
The ReplayKit renders Remote Video Integrate with WebRTC iOS Swift class:
class WebRTCClient: NSObject {
// The `RTCPeerConnectionFactory` is in charge of creating new RTCPeerConnection instances.
// A new RTCPeerConnection should be created for every new call, but the factory is shared.
private static let factory: RTCPeerConnectionFactory = {
RTCInitializeSSL()
let videoEncoderFactory = RTCDefaultVideoEncoderFactory()
let videoDecoderFactory = RTCDefaultVideoDecoderFactory()
return RTCPeerConnectionFactory(encoderFactory: videoEncoderFactory, decoderFactory: videoDecoderFactory)
}()
private let peerConnection: RTCPeerConnection
private let rtcAudioSession = RTCAudioSession.sharedInstance()
private let mediaConstrains = [kRTCMediaConstraintsOfferToReceiveAudio: kRTCMediaConstraintsValueTrue,
kRTCMediaConstraintsOfferToReceiveVideo: kRTCMediaConstraintsValueTrue]
private var videoCapturer: RTCVideoCapturer?
private var localVideoTrack: RTCVideoTrack?
private var remoteVideoTrack: RTCVideoTrack?
private var remoteDataChannel: RTCDataChannel?
private var remoteAudioTrack: RTCAudioTrack?
#available(*, unavailable)
override init() {
fatalError("WebRTCClient:init is unavailable")
}
required init(iceServers: [String]) {
let config = RTCConfiguration()
config.iceServers = [RTCIceServer(urlStrings: iceServers)]
// Unified plan is more superior than planB
config.sdpSemantics = .unifiedPlan
// gatherContinually will let WebRTC to listen to any network changes and send any new candidates to the other client
config.continualGatheringPolicy = .gatherContinually
let constraints = RTCMediaConstraints(mandatoryConstraints: nil,
optionalConstraints: ["DtlsSrtpKeyAgreement":kRTCMediaConstraintsValueTrue,
kRTCMediaConstraintsOfferToReceiveAudio: kRTCMediaConstraintsValueTrue,
kRTCMediaConstraintsOfferToReceiveVideo: kRTCMediaConstraintsValueTrue])
self.peerConnection = WebRTCClient.factory.peerConnection(with: config, constraints: constraints, delegate: nil)
super.init()
self.createMediaSenders()
self.configureAudioSession()
self.peerConnection.delegate = self
}
func classSetup() { }
// MARK: Signaling
func offer(completion: #escaping (_ sdp: RTCSessionDescription) -> Void) {
let constrains = RTCMediaConstraints(mandatoryConstraints: self.mediaConstrains,
optionalConstraints: nil)
self.peerConnection.offer(for: constrains) { (sdp, error) in
guard let sdp = sdp else {
return
}
self.peerConnection.setLocalDescription(sdp, completionHandler: { (error) in
completion(sdp)
})
}
}
func answer(completion: #escaping (_ sdp: RTCSessionDescription) -> Void) {
let constrains = RTCMediaConstraints(mandatoryConstraints: self.mediaConstrains,
optionalConstraints: nil)
self.peerConnection.answer(for: constrains) { (sdp, error) in
guard let sdp = sdp else {
return
}
self.peerConnection.setLocalDescription(sdp, completionHandler: { (error) in
completion(sdp)
})
}
}
func set(remoteSdp: RTCSessionDescription, completion: #escaping (Error?) -> ()) {
self.peerConnection.setRemoteDescription(remoteSdp, completionHandler: completion)
}
func set(remoteCandidate: RTCIceCandidate) {
self.peerConnection.add(remoteCandidate)
}
// MARK: Media
func startCaptureLocalVideo(renderer: RTCVideoRenderer, isFrontCamera: Bool) {
guard let capturer = self.videoCapturer as? RTCCameraVideoCapturer else {
return
}
guard
let frontCamera = (RTCCameraVideoCapturer.captureDevices().first { $0.position == .front }),
// choose highest res
let format = (RTCCameraVideoCapturer.supportedFormats(for: frontCamera).sorted { (f1, f2) -> Bool in
let width1 = CMVideoFormatDescriptionGetDimensions(f1.formatDescription).width
let width2 = CMVideoFormatDescriptionGetDimensions(f2.formatDescription).width
return width1 < width2
}).last,
// choose highest fps
let fps = (format.videoSupportedFrameRateRanges.sorted { return $0.maxFrameRate < $1.maxFrameRate }.last) else {
return
}
capturer.startCapture(with: frontCamera,
format: format,
fps: Int(fps.maxFrameRate))
self.localVideoTrack?.add(renderer)
}
func renderRemoteVideo(to renderer: RTCVideoRenderer) {
self.remoteVideoTrack?.add(renderer)
}
func removeRenderRemoteVideo(to renderer: RTCVideoRenderer) {
self.remoteVideoTrack?.remove(renderer)
}
private func configureAudioSession() {
self.rtcAudioSession.lockForConfiguration()
do {
try self.rtcAudioSession.setCategory(AVAudioSessionCategoryPlayAndRecord)
try self.rtcAudioSession.setMode(AVAudioSessionModeVoiceChat)
} catch let error {
debugPrint("Error changeing AVAudioSession category: \(error)")
}
self.rtcAudioSession.unlockForConfiguration()
}
func createMediaSenders() {
let streamId = "XYZ"
self.rtcAudioSession.useManualAudio = true
self.rtcAudioSession.isAudioEnabled = false
// Audio
let audioTrack = self.createAudioTrack()
self.peerConnection.add(audioTrack, streamIds: [streamId])
self.remoteAudioTrack = self.peerConnection.transceivers.first { $0.mediaType == .audio }?.receiver.track as? RTCAudioTrack
}
private func createAudioTrack() -> RTCAudioTrack {
let audioConstrains = RTCMediaConstraints(mandatoryConstraints: mediaConstrains, optionalConstraints: nil)
let audioSource = WebRTCClient.factory.audioSource(with: audioConstrains)
let audioTrack = WebRTCClient.factory.audioTrack(with: audioSource, trackId: "ARDAMSa0")
return audioTrack
}
private func createVideoTrack() -> RTCVideoTrack {
let videoSource = WebRTCClient.factory.videoSource()
#if TARGET_OS_SIMULATOR
self.videoCapturer = RTCFileVideoCapturer(delegate: videoSource)
#else
self.videoCapturer = RTCCameraVideoCapturer(delegate: videoSource)
#endif
let videoTrack = WebRTCClient.factory.videoTrack(with: videoSource, trackId: "ARDAMSv0")
return videoTrack
}
// MARK: Data Channels
private func createDataChannel() -> RTCDataChannel? {
let config = RTCDataChannelConfiguration()
guard let dataChannel = self.peerConnection.dataChannel(forLabel: "WebRTCData", configuration: config) else {
debugPrint("Warning: Couldn't create data channel.")
return nil
}
return dataChannel
}
func sendData(_ data: Data) {
let buffer = RTCDataBuffer(data: data, isBinary: true)
self.remoteDataChannel?.sendData(buffer)
}
}
This class always accesses microphone enabled by default when the socket is connected successfully.
We are trying to follow the code for disabling the microphone when creating Media Senders:
self.rtcAudioSession.isAudioEnabled = false
but, isn't any success to disable microphone access.
please give a solution for a receive-only WebRTCClient class.
Add to your initializator
let webRTCConfiguration = RTCAudioSessionConfiguration.init()
webRTCConfiguration.mode = AVAudioSession.Mode.moviePlayback.rawValue
webRTCConfiguration.category = AVAudioSession.Category.playback.rawValue
webRTCConfiguration.categoryOptions = AVAudioSession.CategoryOptions.duckOthers
RTCAudioSessionConfiguration.setWebRTC(webRTCConfiguration)
I have An AVAsset and I use AVAssetReaderAudioMixOutput to get CMSampleBuffer,and I want to use this CMSampleBuffer to create the AVAudioPlayerNode to scheduleBuffer
How to do it,anyone help?
this might help
https://developer.apple.com/documentation/speech/sfspeechaudiobufferrecognitionrequest/1649395-appendaudiosamplebuffer
func appendAudioSampleBuffer(_ sampleBuffer: CMSampleBuffer)
extension AVAudioPCMBuffer {
static func create(from sampleBuffer: CMSampleBuffer) -> AVAudioPCMBuffer? {
guard let description: CMFormatDescription = CMSampleBufferGetFormatDescription(sampleBuffer),
let sampleRate: Float64 = description.audioStreamBasicDescription?.mSampleRate,
let channelsPerFrame: UInt32 = description.audioStreamBasicDescription?.mChannelsPerFrame /*,
let numberOfChannels = description.audioChannelLayout?.numberOfChannels */
else { return nil }
guard let blockBuffer: CMBlockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer) else {
return nil
}
let samplesCount = CMSampleBufferGetNumSamples(sampleBuffer)
//let length: Int = CMBlockBufferGetDataLength(blockBuffer)
let audioFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: sampleRate, channels: AVAudioChannelCount(1), interleaved: false)
let buffer = AVAudioPCMBuffer(pcmFormat: audioFormat!, frameCapacity: AVAudioFrameCount(samplesCount))!
buffer.frameLength = buffer.frameCapacity
// GET BYTES
var dataPointer: UnsafeMutablePointer<Int8>?
CMBlockBufferGetDataPointer(blockBuffer, atOffset: 0, lengthAtOffsetOut: nil, totalLengthOut: nil, dataPointerOut: &dataPointer)
guard var channel: UnsafeMutablePointer<Float> = buffer.floatChannelData?[0],
let data = dataPointer else { return nil }
var data16 = UnsafeRawPointer(data).assumingMemoryBound(to: Int16.self)
for _ in 0...samplesCount - 1 {
channel.pointee = Float32(data16.pointee) / Float32(Int16.max)
channel += 1
for _ in 0...channelsPerFrame - 1 {
data16 += 1
}
}
return buffer
}
}
in watchOS, i am just trying to get HR value in real-time while a workoutSession is running.
func startHeartRateQuery(updateHandler: #escaping ([HKQuantitySample]?) -> Void) {
guard let quantityType = HKObjectType.quantityType(forIdentifier: .heartRate) else {
return
}
let heartRateQuery = HKAnchoredObjectQuery(type: quantityType, predicate: nil, anchor: anchor, limit: Int(HKObjectQueryNoLimit)) { (query, sampleObjects, deletedObjects, newAnchor, error) -> Void in
guard let newAnchor = newAnchor else {return}
self.anchor = newAnchor
updateHandler(sampleObjects as? [HKQuantitySample])
}
heartRateQuery.updateHandler = {(query, samples, deleteObjects, newAnchor, error) -> Void in
self.anchor = newAnchor!
updateHandler(samples as? [HKQuantitySample])
}
healthStore.execute(heartRateQuery)
activeDataQueries.append(heartRateQuery)
}
And This is how i do start workoutSession.
func session(_ session: WCSession, didReceiveUserInfo userInfo: [String : Any] = [:]) {
guard let d = delegate else {
return
}
if let u = userInfo["status"] as? String {
d.receivedWorkoutRunningStatus(u)
}
if let c = userInfo["clock"] as? String {
d.receiveWorkoutRestPeriodClock(c)
}
}
You can find receivedWorkoutRunningStatus function on InterfaceController.
This is InterfaceController.swift which is first screen on Watch App.
class InterfaceController: WKInterfaceController, HKWorkoutSessionDelegate {
#IBOutlet var lblHeartRate: WKInterfaceLabel!
#IBOutlet var lblSplitIntervalNumber: WKInterfaceLabel!
#IBOutlet var lblRestPeriodClock: WKInterfaceLabel!
private let healthStoreManager = WatchHealthKitManager()
private let parentConnector = ParentConnector()
private var workoutSession: HKWorkoutSession!
override func awake(withContext context: Any?) {
super.awake(withContext: context)
// Create a workout session with the workout configuration
do {
let workoutConfiguration = HKWorkoutConfiguration()
workoutConfiguration.locationType = .indoor
workoutConfiguration.activityType = .rowing
workoutSession = try HKWorkoutSession(configuration: workoutConfiguration)
} catch {
fatalError(error.localizedDescription)
}
//// initial setup
parentConnector.activate()
parentConnector.delegate = self
workoutSession.delegate = self
}
// MARK: - Data Accumulation
private func startAccumulatingData() {
healthStoreManager.startHeartRateQuery() { quantitySamples in
DispatchQueue.main.async {
guard !self.isPaused() else {
return
}
guard let heartRateSamples = quantitySamples else {
return
}
let hrUnit = HKUnit(from: "count/min")
guard let sample = heartRateSamples.first else {
return
}
let value = sample.quantity.doubleValue(for: hrUnit)
self.updateHeartRate(value: value)
self.parentConnector.transfer(value: value)
}
}
}
func workoutSession(_ workoutSession: HKWorkoutSession,
didChangeTo toState: HKWorkoutSessionState,
from fromState: HKWorkoutSessionState,
date: Date) {
switch (toState) {
case .running:
startAccumulatingData()
case .ended:
stopAccumulatingData()
default:
print("Error")
}
}
func receivedWorkoutRunningStatus(_ status: String) {
if (status == "Start") {
healthStoreManager.start(workoutSession)
} else if (status == "Finish") {
healthStoreManager.end(workoutSession)
lblHeartRate.setText("No Active Workout")
}
DispatchQueue.main.async {
self.lblSplitIntervalNumber.setText(status)
}
}
On the iPhone App, I send a "Start" string using transferUserInfo function to trigger the workoutSession beginning. This is not working properly, it only sometimes works, it is very inconsistent.
I would appreciate if you have any advice or alternative approaches.
Thank you in advance.
i'm pretty new to Swift/iOS, just started learning that a couple days ago. I'm using Swift 3 and want to develop two iPhone apps that can send audio stream from microphone to other iPhone devices using Multi-peer Connectivity. The first app would be the speaker's app and the other would be the listener's app.
Previously, I learned how to advertise, browse, and invite peers from this useful tutorial
and I learned how to get audio data from microphone and convert them to bytes from this answer and this answer. Thanks a lot to Rhythmic Fistman.
So, my code is combination of what those articles include.
This is ViewController of listener app
import UIKit
import MultipeerConnectivity
import AVFoundation
class ColorSwitchViewController: UIViewController {
#IBOutlet weak var connectionsLabel: UILabel!
let colorService = ColorServiceManager()
var engine = AVAudioEngine()
let player = AVAudioPlayerNode()
// Somewhere, schedule the stream in the mainRunLoop, set the delegate and open it. Choose the peer that you want to connect
var inputStream = InputStream()
var inputStreamIsSet: Bool!
var outputStreamIsSet: Bool!
public let peerID = MCPeerID(displayName: UIDevice.current.name)
//MARK: Private Functions
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func copyAudioBufferBytes(_ audioBuffer: AVAudioPCMBuffer) -> [UInt8] {
let srcLeft = audioBuffer.floatChannelData![0]
let bytesPerFrame = audioBuffer.format.streamDescription.pointee.mBytesPerFrame
let numBytes = Int(bytesPerFrame * audioBuffer.frameLength)
// initialize bytes to 0 (how to avoid?)
var audioByteArray = [UInt8] (repeating: 0, count: numBytes)
// copy data from buffer
srcLeft.withMemoryRebound(to: UInt8.self, capacity: numBytes) { srcByteData in
audioByteArray.withUnsafeMutableBufferPointer {
$0.baseAddress!.initialize(from: srcByteData, count: numBytes)
}
}
return audioByteArray
}
func bytesToAudioBuffer(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let fmt = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
let frameLength = UInt32(buf.count) / fmt.streamDescription.pointee.mBytesPerFrame
let audioBuffer = AVAudioPCMBuffer(pcmFormat: fmt, frameCapacity: frameLength)
audioBuffer.frameLength = frameLength
let dstLeft = audioBuffer.floatChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Float.self, capacity: Int(frameLength))
dstLeft.initialize(from: src, count: Int(frameLength))
}
return audioBuffer
}
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
}
func change(color : UIColor) {
UIView.animate(withDuration: 0.2) {
self.view.backgroundColor = color
}
}
}
extension ColorSwitchViewController : ColorServiceManagerDelegate {
func connectedDevicesChanged(manager: ColorServiceManager, connectedDevices: [String]) {
OperationQueue.main.addOperation {
self.connectionsLabel.text = "Connections: \(connectedDevices)"
}
}
func colorChanged(manager: ColorServiceManager, colorString: String) {
OperationQueue.main.addOperation {
switch colorString {
case "red":
self.change(color: .red)
case "yellow":
self.change(color: .yellow)
default:
NSLog("%#", "Unknown color value received: \(colorString)")
}
}
}
func streamReceived(manager: ColorServiceManager, stream: InputStream, streamName: String, fromPeer: MCPeerID) {
NSLog("%#", "name " + fromPeer.displayName)
if streamName == "stream" && fromPeer != peerID {
NSLog("%#", "voice received")
stream.schedule(in: RunLoop.current, forMode: .defaultRunLoopMode)
stream.open()
var bytes = [UInt8](repeating: 0, count: 17640)
if (stream.hasBytesAvailable == true) {
NSLog("%#", "has bytes available...")
} else {
NSLog("%#", "has NO byte ...")
}
let result = stream.read(&bytes, maxLength: bytes.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: stream.streamError))")
} else {
print("The number of bytes read is \(result)")
}
let audioBuffer = self.bytesToAudioBuffer(bytes) //Here is where the app crashes
engine.attach(player)
let outputFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
engine.connect(player, to: engine.mainMixerNode, format: outputFormat)
do {
try engine.start()
player.scheduleBuffer(audioBuffer, completionHandler: nil)
player.play()
} catch let error {
print(error.localizedDescription)
}
}
}
}
And the ViewController of the speaker app is similar except that it contains code for sending the stream and doesn't contain code for receiving
// ....
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
func startStream() {
let input = engine.inputNode!
engine.attach(player)
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
engine.connect(player, to: engine.mainMixerNode, format: inputFormat)
NSLog("%#", "sendStream: to \(self.colorService.session.connectedPeers.count) peers")
if self.colorService.session.connectedPeers.count > 0 {
do {
let outputStream = try self.colorService.session.startStream(withName: "stream", toPeer: self.colorService.session.connectedPeers.first!)
outputStream.schedule(in: RunLoop.main, forMode:RunLoopMode.defaultRunLoopMode)
outputStream.open()
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
input.installTap(onBus: bus, bufferSize: 2048, format: inputFormat, block: {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
self.player.scheduleBuffer(buffer)
let audioBuffer = self.copyAudioBufferBytes(buffer)
// NSLog("%#", "speaking...")
let result = outputStream.write(audioBuffer, maxLength: audioBuffer.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: outputStream.streamError))")
} else {
print("The number of bytes written is \(result)")
}
})
try! engine.start()
player.play()
}
catch let error {
NSLog("%#", "Error for sending: \(error)")
}
}
}
func stopStream() {
engine.inputNode?.removeTap(onBus: 0)
player.stop()
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
self.startStream()
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
self.stopStream()
}
// ...
Unfortunately, on the listener side, i get the app receiving the stream with no bytes available. NSLog("%#", "has NO byte ...") was called. I wonder if the listener app really receive the audio stream or not.
So, What's my mistake here? Any helps would be appreciated. Thank you in advance.