How to create AVAudioPCMBuffer with CMSampleBuffer? - ios

I have An AVAsset and I use AVAssetReaderAudioMixOutput to get CMSampleBuffer,and I want to use this CMSampleBuffer to create the AVAudioPlayerNode to scheduleBuffer
How to do it,anyone help?

this might help
https://developer.apple.com/documentation/speech/sfspeechaudiobufferrecognitionrequest/1649395-appendaudiosamplebuffer
func appendAudioSampleBuffer(_ sampleBuffer: CMSampleBuffer)

extension AVAudioPCMBuffer {
static func create(from sampleBuffer: CMSampleBuffer) -> AVAudioPCMBuffer? {
guard let description: CMFormatDescription = CMSampleBufferGetFormatDescription(sampleBuffer),
let sampleRate: Float64 = description.audioStreamBasicDescription?.mSampleRate,
let channelsPerFrame: UInt32 = description.audioStreamBasicDescription?.mChannelsPerFrame /*,
let numberOfChannels = description.audioChannelLayout?.numberOfChannels */
else { return nil }
guard let blockBuffer: CMBlockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer) else {
return nil
}
let samplesCount = CMSampleBufferGetNumSamples(sampleBuffer)
//let length: Int = CMBlockBufferGetDataLength(blockBuffer)
let audioFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: sampleRate, channels: AVAudioChannelCount(1), interleaved: false)
let buffer = AVAudioPCMBuffer(pcmFormat: audioFormat!, frameCapacity: AVAudioFrameCount(samplesCount))!
buffer.frameLength = buffer.frameCapacity
// GET BYTES
var dataPointer: UnsafeMutablePointer<Int8>?
CMBlockBufferGetDataPointer(blockBuffer, atOffset: 0, lengthAtOffsetOut: nil, totalLengthOut: nil, dataPointerOut: &dataPointer)
guard var channel: UnsafeMutablePointer<Float> = buffer.floatChannelData?[0],
let data = dataPointer else { return nil }
var data16 = UnsafeRawPointer(data).assumingMemoryBound(to: Int16.self)
for _ in 0...samplesCount - 1 {
channel.pointee = Float32(data16.pointee) / Float32(Int16.max)
channel += 1
for _ in 0...channelsPerFrame - 1 {
data16 += 1
}
}
return buffer
}
}

Related

mAudioEngine.start() crash in ios

Terminating app due to uncaught exception
'com.apple.coreaudio.avfaudio', reason: 'required condition is false:
_engine->IsRunning()'
Getting crash in iphone 13 mini
public class SwiftSoundStreamPlugin: NSObject, FlutterPlugin {
private var channel: FlutterMethodChannel
private var registrar: FlutterPluginRegistrar
private var hasPermission: Bool = false
private var debugLogging: Bool = false
//========= Recorder's vars
private let mAudioEngine = AVAudioEngine()
private let mRecordBus = 0
private var mInputNode: AVAudioInputNode
private var mRecordSampleRate: Double = 16000 // 16Khz
private var mRecordBufferSize: AVAudioFrameCount = 8192
private var mRecordChannel = 0
private var mRecordSettings: [String:Int]!
private var mRecordFormat: AVAudioFormat!
//========= Player's vars
private let PLAYER_OUTPUT_SAMPLE_RATE: Double = 44100 // 32Khz
private let mPlayerBus = 0
private let mPlayerNode = AVAudioPlayerNode()
private var mPlayerSampleRate: Double = 44100 // 16Khz
private var mPlayerBufferSize: AVAudioFrameCount = 127000
private var mPlayerOutputFormat: AVAudioFormat!
private var mPlayerInputFormat: AVAudioFormat!
/** ======== Basic Plugin initialization ======== **/
public static func register(with registrar: FlutterPluginRegistrar) {
let channel = FlutterMethodChannel(name: "vn.casperpas.sound_stream:methods", binaryMessenger: registrar.messenger())
let instance = SwiftSoundStreamPlugin( channel, registrar: registrar)
registrar.addMethodCallDelegate(instance, channel: channel)
}
init( _ channel: FlutterMethodChannel, registrar: FlutterPluginRegistrar ) {
self.channel = channel
self.registrar = registrar
self.mInputNode = mAudioEngine.inputNode
super.init()
self.attachPlayer()
mAudioEngine.prepare()
}
public func handle(_ call: FlutterMethodCall, result: #escaping FlutterResult) {
switch call.method {
case "hasPermission":
hasPermission(result)
case "initializeRecorder":
initializeRecorder(call, result)
case "startRecording":
startRecording(result)
case "stopRecording":
stopRecording(result)
case "setBufferSize":
setBufferSize(call, result)
case "initializePlayer":
initializePlayer(call, result)
case "startPlayer":
startPlayer(result)
case "stopPlayer":
stopPlayer(result)
case "writeChunk":
writeChunk(call, result)
default:
print("Unrecognized method: \(call.method)")
sendResult(result, FlutterMethodNotImplemented)
}
}
private func sendResult(_ result: #escaping FlutterResult, _ arguments: Any?) {
DispatchQueue.main.async {
result( arguments )
}
}
private func invokeFlutter( _ method: String, _ arguments: Any? ) {
DispatchQueue.main.async {
self.channel.invokeMethod( method, arguments: arguments )
}
}
/** ======== Plugin methods ======== **/
private func checkAndRequestPermission(completion callback: #escaping ((Bool) -> Void)) {
if (hasPermission) {
callback(hasPermission)
return
}
var permission: AVAudioSession.RecordPermission
#if swift(>=4.2)
permission = AVAudioSession.sharedInstance().recordPermission
#else
permission = AVAudioSession.sharedInstance().recordPermission()
#endif
switch permission {
case .granted:
print("granted")
hasPermission = true
callback(hasPermission)
break
case .denied:
print("denied")
hasPermission = false
callback(hasPermission)
break
case .undetermined:
print("undetermined")
AVAudioSession.sharedInstance().requestRecordPermission() { [unowned self] allowed in
if allowed {
self.hasPermission = true
print("undetermined true")
callback(self.hasPermission)
} else {
self.hasPermission = false
print("undetermined false")
callback(self.hasPermission)
}
}
break
default:
callback(hasPermission)
break
}
}
private func hasPermission( _ result: #escaping FlutterResult) {
checkAndRequestPermission { value in
self.sendResult(result, value)
}
}
private func startEngine() {
guard !mAudioEngine.isRunning else {
return
}
if mAudioEngine.outputNode.outputFormat(forBus: mPlayerBus).channelCount == 0 {
// if count is 0 then it throws a exception or crash
mAudioEngine.reset()
/*
or try changing object
mAudioEngine = AVAudioEngine()
*/
}
mAudioEngine.reset()
try? mAudioEngine.start()
}
private func stopEngine() {
mAudioEngine.stop()
mAudioEngine.reset()
}
private func sendEventMethod(_ name: String, _ data: Any) {
var eventData: [String: Any] = [:]
eventData["name"] = name
eventData["data"] = data
invokeFlutter("platformEvent", eventData)
}
private func initializeRecorder(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>
else {
sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
return
}
mRecordSampleRate = argsArr["sampleRate"] as? Double ?? mRecordSampleRate
debugLogging = argsArr["showLogs"] as? Bool ?? debugLogging
mRecordFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: mRecordSampleRate, channels: 2, interleaved: true)
checkAndRequestPermission { isGranted in
if isGranted {
self.sendRecorderStatus(SoundStreamStatus.Initialized)
self.sendResult(result, true)
} else {
self.sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
}
}
}
private func resetEngineForRecord() {
mAudioEngine.inputNode.removeTap(onBus: mRecordBus)
let input = mAudioEngine.inputNode
let inputFormat = input.outputFormat(forBus: mRecordBus)
let converter = AVAudioConverter(from: inputFormat, to: mRecordFormat!)!
let ratio: Float = Float(inputFormat.sampleRate)/Float(mRecordFormat.sampleRate)
input.installTap(onBus: mRecordBus, bufferSize: mRecordBufferSize, format: inputFormat) { (buffer, time) -> Void in
let inputCallback: AVAudioConverterInputBlock = { inNumPackets, outStatus in
outStatus.pointee = .haveData
return buffer
}
let convertedBuffer = AVAudioPCMBuffer(pcmFormat: self.mRecordFormat!, frameCapacity: UInt32(Float(buffer.frameCapacity) / ratio))!
var error: NSError?
let status = converter.convert(to: convertedBuffer, error: &error, withInputFrom: inputCallback)
assert(status != .error)
if (self.mRecordFormat?.commonFormat == AVAudioCommonFormat.pcmFormatInt16) {
let values = self.audioBufferToBytes(convertedBuffer)
self.sendMicData(values)
}
}
}
private func startRecording(_ result: #escaping FlutterResult) {
resetEngineForRecord()
startEngine()
sendRecorderStatus(SoundStreamStatus.Playing)
result(true)
}
private func stopRecording(_ result: #escaping FlutterResult) {
mAudioEngine.inputNode.removeTap(onBus: mRecordBus)
sendRecorderStatus(SoundStreamStatus.Stopped)
result(true)
}
private func sendMicData(_ data: [UInt8]) {
let channelData = FlutterStandardTypedData(bytes: NSData(bytes: data, length: data.count) as Data)
sendEventMethod("dataPeriod", channelData)
}
private func sendRecorderStatus(_ status: SoundStreamStatus) {
sendEventMethod("recorderStatus", status.rawValue)
}
private func setBufferSize(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>
else {
sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
return
}
mPlayerBufferSize = argsArr["bufferSize"] as? AVAudioFrameCount ?? mPlayerBufferSize
// result(true)
}
private func initializePlayer(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>
else {
sendResult(result, FlutterError( code: SoundStreamErrors.Unknown.rawValue,
message:"Incorrect parameters",
details: nil ))
return
}
mPlayerSampleRate = argsArr["sampleRate"] as? Double ?? mPlayerSampleRate
debugLogging = argsArr["showLogs"] as? Bool ?? debugLogging
mPlayerInputFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatInt16, sampleRate: mPlayerSampleRate, channels: 1, interleaved: true)
sendPlayerStatus(SoundStreamStatus.Initialized)
}
private func attachPlayer() {
mPlayerOutputFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: PLAYER_OUTPUT_SAMPLE_RATE, channels: 1, interleaved: true)
mAudioEngine.attach(mPlayerNode)
mAudioEngine.connect(mPlayerNode, to: mAudioEngine.outputNode, format: mPlayerOutputFormat)
// mAudioEngine.connect(mPlayerNode, to: mAudioEngine.mainMixerNode, format: mPlayerOutputFormat)
}
private func startPlayer(_ result: #escaping FlutterResult) {
startEngine()
if !mPlayerNode.isPlaying {
mPlayerNode.play()
}
sendPlayerStatus(SoundStreamStatus.Playing)
result(true)
}
private func stopPlayer(_ result: #escaping FlutterResult) {
if mPlayerNode.isPlaying {
mPlayerNode.stop()
}
sendPlayerStatus(SoundStreamStatus.Stopped)
result(true)
}
private func sendPlayerStatus(_ status: SoundStreamStatus) {
sendEventMethod("playerStatus", status.rawValue)
}
private func writeChunk(_ call: FlutterMethodCall, _ result: #escaping FlutterResult) {
guard let argsArr = call.arguments as? Dictionary<String,AnyObject>,
let data = argsArr["data"] as? FlutterStandardTypedData
else {
sendResult(result, FlutterError( code: SoundStreamErrors.FailedToWriteBuffer.rawValue,
message:"Failed to write Player buffer",
details: nil ))
return
}
let byteData = [UInt8](data.data)
pushPlayerChunk(byteData, result)
}
private func pushPlayerChunk(_ chunk: [UInt8], _ result: #escaping FlutterResult) {
let buffer = bytesToAudioBuffer(chunk)
mPlayerNode.scheduleBuffer(convertBufferFormat(
buffer,
from: mPlayerInputFormat,
to: mPlayerOutputFormat
));
result(true)
}
private func convertBufferFormat(_ buffer: AVAudioPCMBuffer, from: AVAudioFormat, to: AVAudioFormat) -> AVAudioPCMBuffer {
let formatConverter = AVAudioConverter(from: from, to: to)
let ratio: Float = Float(from.sampleRate)/Float(to.sampleRate)
let pcmBuffer = AVAudioPCMBuffer(pcmFormat: to, frameCapacity: UInt32(Float(buffer.frameCapacity) / ratio))!
var error: NSError? = nil
let inputBlock: AVAudioConverterInputBlock = {inNumPackets, outStatus in
outStatus.pointee = .haveData
return buffer
}
formatConverter?.convert(to: pcmBuffer, error: &error, withInputFrom: inputBlock)
return pcmBuffer
}
private func audioBufferToBytes(_ audioBuffer: AVAudioPCMBuffer) -> [UInt8] {
let srcLeft = audioBuffer.int16ChannelData![0]
let bytesPerFrame = audioBuffer.format.streamDescription.pointee.mBytesPerFrame
let numBytes = Int(bytesPerFrame * audioBuffer.frameLength)
// initialize bytes by 0
var audioByteArray = [UInt8](repeating: 0, count: numBytes)
srcLeft.withMemoryRebound(to: UInt8.self, capacity: numBytes) { srcByteData in
audioByteArray.withUnsafeMutableBufferPointer {
$0.baseAddress!.initialize(from: srcByteData, count: numBytes)
}
}
return audioByteArray
}
private func bytesToAudioBuffer(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let frameLength = UInt32(buf.count) / mPlayerInputFormat.streamDescription.pointee.mBytesPerFrame
let audioBuffer = AVAudioPCMBuffer(pcmFormat: mPlayerInputFormat, frameCapacity: frameLength)!
audioBuffer.frameLength = frameLength
let dstLeft = audioBuffer.int16ChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Int16.self, capacity: Int(frameLength))
dstLeft.initialize(from: src, count: Int(frameLength))
}
return audioBuffer
}
}

-50 error when converting PCM buffer with AVAudioConverter

I'm trying to convert a AVAudioPCMBuffer with a 44100 sample rate to one with a 48000 sample rate, but I always get an exception (-50 error) when converting. Here's the code:
guard let deviceFormat = AVAudioFormat(standardFormatWithSampleRate: 48000.0, channels: 1) else {
preconditionFailure()
}
// This file is saved as mono 44100
guard let lowToneURL = Bundle.main.url(forResource: "Tone220", withExtension: "wav") else {
preconditionFailure()
}
guard let audioFile = try? AVAudioFile(forReading: lowToneURL) else {
preconditionFailure()
}
let tempBuffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat,
frameCapacity: AVAudioFrameCount(audioFile.length))!
tempBuffer.frameLength = tempBuffer.frameCapacity
do { try audioFile.read(into: tempBuffer) } catch {
assertionFailure("*** Caught: \(error)")
}
guard let converter = AVAudioConverter(from: audioFile.processingFormat, to: deviceFormat) else {
preconditionFailure()
}
guard let convertedBuffer = AVAudioPCMBuffer(pcmFormat: deviceFormat,
frameCapacity: AVAudioFrameCount(audioFile.length)) else {
preconditionFailure()
}
convertedBuffer.frameLength = tempBuffer.frameCapacity
do { try converter.convert(to: convertedBuffer, from: tempBuffer) } catch {
assertionFailure("*** Caught: \(error)")
}
Any ideas?
An Apple engineer answered this on their dev forums. I missed that the convert(to:from:) variant of AVAudioConverter can't convert sample rate so you have to use the withInputFrom variant. The docs on that aren't too clear but I came up with:
private func pcmBufferForFile(filename: String, sampleRate: Float) -> AVAudioPCMBuffer {
guard let newFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1) else {
preconditionFailure()
}
guard let url = Bundle.main.url(forResource: filename, withExtension: "wav") else {
preconditionFailure()
}
guard let audioFile = try? AVAudioFile(forReading: url) else {
preconditionFailure()
}
guard let tempBuffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat,
frameCapacity: AVAudioFrameCount(audioFile.length)) else {
preconditionFailure()
}
let conversionRatio = sampleRate / Float(tempBuffer.format.sampleRate)
let newLength = Float(audioFile.length) * conversionRatio
guard let newBuffer = AVAudioPCMBuffer(pcmFormat: newFormat,
frameCapacity: AVAudioFrameCount(newLength)) else {
preconditionFailure()
}
do { try audioFile.read(into: tempBuffer) } catch {
preconditionFailure()
}
guard let converter = AVAudioConverter(from: audioFile.processingFormat, to: newFormat) else {
preconditionFailure()
}
var error: NSError?
converter.convert(to: newBuffer, error: &error, withInputFrom: { (packetCount, statusPtr) -> AVAudioBuffer? in
statusPtr.pointee = .haveData
return tempBuffer
})
if error != nil {
print("*** Conversion error: \(error!)")
}
return newBuffer
}

How to get Audio recording buffer data live?

I am working on get audio data from iPhone mic and send it to socket, I already try AVAudioEngine to get audio buffer but some how its not woking. so can you please suggest me what it better way to get recording buffer data in live.
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
SocketIOManager.sharedInstance.socket.on("listen") {data, ack in
let BuffurData:Data = data[0] as! Data
// let playData = self?.audioBufferToNSData(PCMBuffer: BuffurData as! AVAudioPCMBuffer)
do {
// let data = NSData(bytes: &BuffurData, length: BuffurData.count)
let player = try AVAudioPlayer(data:BuffurData)
player.play()
} catch let error as NSError {
print(error.description)
}
print("socket connected \(data)")
}
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize:4096, format:format, block: { [weak self] buffer, when in
guard let this = self else {
return
}
// writing to file: for testing purposes only
do {
try this.file!.write(from: buffer)
} catch {
}
if let channel1Buffer = buffer.floatChannelData?[0] {
let test = self?.copyAudioBufferBytes(buffer)
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!);
// socket.on("listen", function (data)
/*! #property floatChannelData
#abstract Access the buffer's float audio samples.
#discussion
floatChannelData returns pointers to the buffer's audio samples if the buffer's format is
32-bit float, or nil if it is another format.
The returned pointer is to format.channelCount pointers to float. Each of these pointers
is to "frameLength" valid samples, which are spaced by "stride" samples.
If format.interleaved is false (as with the standard deinterleaved float format), then
the pointers will be to separate chunks of memory. "stride" is 1.
If format.interleaved is true, then the pointers will refer into the same chunk of interleaved
samples, each offset by 1 frame. "stride" is the number of interleaved channels.
*/
// #TODO: send data, better to pass into separate queue for processing
}
})
engine.prepare()
do {
try engine.start()
} catch {
// #TODO: error out
}
}
Try this code:
var audioPlayerQueue = DispatchQueue(label: "audioPlayerQueue", qos: DispatchQoS.userInteractive)
var peerAudioPlayer: AVAudioPlayerNode = AVAudioPlayerNode()
var peerInputFormat: AVAudioFormat?
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
engine.attach(self.peerAudioPlayer)
self.peerInputFormat = AVAudioFormat.init(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false)
self.peerAudioEngine.connect(peerAudioPlayer, to: self.peerAudioEngine.mainMixerNode, format: peerInput?.outputFormat(forBus: 0))
do {
peerAudioEngine.prepare()
try peerAudioEngine.start()
} catch let error {
print(error.localizedDescription)
}
SocketIOManager.sharedInstance.socket.on("listen") { data, ack in
let pcmBuffer = toPCMBuffer(data: data)
self.audioPlayerQueue.async {
self.peerAudioPlayer.scheduleBuffer(pcmBuffer, completionHandler: nil)
if self.peerAudioEngine.isRunning {
self.peerAudioPlayer.play()
} else {
do {
try self.peerAudioEngine.start()
} catch {
print(error.localizedDescription)
}
}
}
}
print("socket connected \(data)")
}
func toPCMBuffer(data: NSData) -> AVAudioPCMBuffer {
let audioFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false) // given NSData audio format
let PCMBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(data.length) / audioFormat.streamDescription.pointee.mBytesPerFrame)
PCMBuffer.frameLength = PCMBuffer.frameCapacity
let channels = UnsafeBufferPointer(start: PCMBuffer.floatChannelData, count: Int(PCMBuffer.format.channelCount))
data.getBytes(UnsafeMutableRawPointer(channels[0]) , length: data.length)
return PCMBuffer
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize: 4410, format: format, block: { (buffer: AVAudioPCMBuffer, AVAudioTime) in
guard let this = self else {
return
}
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!)
})
do {
engine.prepare()
try engine.start()
} catch {
// #TODO: error out
}
}
// **Edit: For Enable Lound Speaker**
func speakerEnabled(_ enabled:Bool) -> Bool {
let session = AVAudioSession.sharedInstance()
var options = session.categoryOptions
if (enabled) {
options.insert(.defaultToSpeaker)
} else {
options.remove(.defaultToSpeaker)
}
try! session.setCategory(AVAudioSessionCategoryPlayAndRecord, with: options)
return true
}

MCSessionDelegate - Getting No Bytes When Receiving Stream

i'm pretty new to Swift/iOS, just started learning that a couple days ago. I'm using Swift 3 and want to develop two iPhone apps that can send audio stream from microphone to other iPhone devices using Multi-peer Connectivity. The first app would be the speaker's app and the other would be the listener's app.
Previously, I learned how to advertise, browse, and invite peers from this useful tutorial
and I learned how to get audio data from microphone and convert them to bytes from this answer and this answer. Thanks a lot to Rhythmic Fistman.
So, my code is combination of what those articles include.
This is ViewController of listener app
import UIKit
import MultipeerConnectivity
import AVFoundation
class ColorSwitchViewController: UIViewController {
#IBOutlet weak var connectionsLabel: UILabel!
let colorService = ColorServiceManager()
var engine = AVAudioEngine()
let player = AVAudioPlayerNode()
// Somewhere, schedule the stream in the mainRunLoop, set the delegate and open it. Choose the peer that you want to connect
var inputStream = InputStream()
var inputStreamIsSet: Bool!
var outputStreamIsSet: Bool!
public let peerID = MCPeerID(displayName: UIDevice.current.name)
//MARK: Private Functions
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func copyAudioBufferBytes(_ audioBuffer: AVAudioPCMBuffer) -> [UInt8] {
let srcLeft = audioBuffer.floatChannelData![0]
let bytesPerFrame = audioBuffer.format.streamDescription.pointee.mBytesPerFrame
let numBytes = Int(bytesPerFrame * audioBuffer.frameLength)
// initialize bytes to 0 (how to avoid?)
var audioByteArray = [UInt8] (repeating: 0, count: numBytes)
// copy data from buffer
srcLeft.withMemoryRebound(to: UInt8.self, capacity: numBytes) { srcByteData in
audioByteArray.withUnsafeMutableBufferPointer {
$0.baseAddress!.initialize(from: srcByteData, count: numBytes)
}
}
return audioByteArray
}
func bytesToAudioBuffer(_ buf: [UInt8]) -> AVAudioPCMBuffer {
let fmt = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
let frameLength = UInt32(buf.count) / fmt.streamDescription.pointee.mBytesPerFrame
let audioBuffer = AVAudioPCMBuffer(pcmFormat: fmt, frameCapacity: frameLength)
audioBuffer.frameLength = frameLength
let dstLeft = audioBuffer.floatChannelData![0]
buf.withUnsafeBufferPointer {
let src = UnsafeRawPointer($0.baseAddress!).bindMemory(to: Float.self, capacity: Int(frameLength))
dstLeft.initialize(from: src, count: Int(frameLength))
}
return audioBuffer
}
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
}
func change(color : UIColor) {
UIView.animate(withDuration: 0.2) {
self.view.backgroundColor = color
}
}
}
extension ColorSwitchViewController : ColorServiceManagerDelegate {
func connectedDevicesChanged(manager: ColorServiceManager, connectedDevices: [String]) {
OperationQueue.main.addOperation {
self.connectionsLabel.text = "Connections: \(connectedDevices)"
}
}
func colorChanged(manager: ColorServiceManager, colorString: String) {
OperationQueue.main.addOperation {
switch colorString {
case "red":
self.change(color: .red)
case "yellow":
self.change(color: .yellow)
default:
NSLog("%#", "Unknown color value received: \(colorString)")
}
}
}
func streamReceived(manager: ColorServiceManager, stream: InputStream, streamName: String, fromPeer: MCPeerID) {
NSLog("%#", "name " + fromPeer.displayName)
if streamName == "stream" && fromPeer != peerID {
NSLog("%#", "voice received")
stream.schedule(in: RunLoop.current, forMode: .defaultRunLoopMode)
stream.open()
var bytes = [UInt8](repeating: 0, count: 17640)
if (stream.hasBytesAvailable == true) {
NSLog("%#", "has bytes available...")
} else {
NSLog("%#", "has NO byte ...")
}
let result = stream.read(&bytes, maxLength: bytes.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: stream.streamError))")
} else {
print("The number of bytes read is \(result)")
}
let audioBuffer = self.bytesToAudioBuffer(bytes) //Here is where the app crashes
engine.attach(player)
let outputFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: true)
engine.connect(player, to: engine.mainMixerNode, format: outputFormat)
do {
try engine.start()
player.scheduleBuffer(audioBuffer, completionHandler: nil)
player.play()
} catch let error {
print(error.localizedDescription)
}
}
}
}
And the ViewController of the speaker app is similar except that it contains code for sending the stream and doesn't contain code for receiving
// ....
override func viewDidLoad() {
super.viewDidLoad()
colorService.delegate = self
}
func startStream() {
let input = engine.inputNode!
engine.attach(player)
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
engine.connect(player, to: engine.mainMixerNode, format: inputFormat)
NSLog("%#", "sendStream: to \(self.colorService.session.connectedPeers.count) peers")
if self.colorService.session.connectedPeers.count > 0 {
do {
let outputStream = try self.colorService.session.startStream(withName: "stream", toPeer: self.colorService.session.connectedPeers.first!)
outputStream.schedule(in: RunLoop.main, forMode:RunLoopMode.defaultRunLoopMode)
outputStream.open()
let bus = 0
let inputFormat = input.inputFormat(forBus: bus)
input.installTap(onBus: bus, bufferSize: 2048, format: inputFormat, block: {
(buffer: AVAudioPCMBuffer!, time: AVAudioTime!) -> Void in
self.player.scheduleBuffer(buffer)
let audioBuffer = self.copyAudioBufferBytes(buffer)
// NSLog("%#", "speaking...")
let result = outputStream.write(audioBuffer, maxLength: audioBuffer.count)
if result == 0 {
print("Stream at capacity")
} else if result == -1 {
print("Operation failed: \(String(describing: outputStream.streamError))")
} else {
print("The number of bytes written is \(result)")
}
})
try! engine.start()
player.play()
}
catch let error {
NSLog("%#", "Error for sending: \(error)")
}
}
}
func stopStream() {
engine.inputNode?.removeTap(onBus: 0)
player.stop()
}
#IBAction func redTapped() {
self.change(color: .red)
colorService.send(colorName: "red")
self.startStream()
}
#IBAction func yellowTapped() {
self.change(color: .yellow)
colorService.send(colorName: "yellow")
self.stopStream()
}
// ...
Unfortunately, on the listener side, i get the app receiving the stream with no bytes available. NSLog("%#", "has NO byte ...") was called. I wonder if the listener app really receive the audio stream or not.
So, What's my mistake here? Any helps would be appreciated. Thank you in advance.

Black frames while converting array of UIImages to Video

I'm trying to convert an array of UIImages to video but I have a lot of black frames in resulting file (like, 4 black frames at the beginning, and 3 good frames after them, and after that 3 black frames and 2 good frames and this pattern is repeated till the end of the video).
My code is based on this solution but I believe that the main source of problem should be in this part of code:
func build(progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {
//videosizes and path to temp output file
let inputSize = CGSize(width: 568, height: 320)
let outputSize = CGSize(width: 568, height: 320)
var error: NSError?
let documentsPath = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0] as! NSString
let videoOutputURL = NSURL(fileURLWithPath: documentsPath.stringByAppendingPathComponent("TempVideo.mov"))!
NSFileManager.defaultManager().removeItemAtURL(videoOutputURL, error: nil)
videoWriter = AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeMPEG4, error: &error)
if let videoWriter = videoWriter {
let videoSettings: [NSObject : AnyObject] = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : outputSize.width,
AVVideoHeightKey : outputSize.height,
]
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: [
kCVPixelBufferPixelFormatTypeKey : kCVPixelFormatType_32ARGB,
kCVPixelBufferWidthKey : inputSize.width,
kCVPixelBufferHeightKey : inputSize.height,
]
)
assert(videoWriter.canAddInput(videoWriterInput))
videoWriter.addInput(videoWriterInput)
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 30
let frameDuration = CMTimeMake(1, fps)
let currentProgress = NSProgress(totalUnitCount: Int64(self.photoURLs.count))
var frameCount: Int64 = 0
for var i = 0; i < self.photoURLs.count - 1; i++ {
var currentFrame = self.photoURLs[i]
var lastFrameTime = CMTimeMake(Int64(i), fps)
var presentationTime = CMTimeAdd(lastFrameTime, frameDuration)
//this one is needed because sometimes videoWriter is not ready, and we have to wait for a while
while videoWriterInput.readyForMoreMediaData == false {
var maxDate = NSDate(timeIntervalSinceNow: 0.5)
var currentRunLoop = NSRunLoop()
currentRunLoop.runUntilDate(maxDate)
}
self.appendPixelBufferForImageAtURL(currentFrame, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime)
frameCount++
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
success(videoOutputURL)
}
}
})
} else {
error = NSError(
domain: kErrorDomain,
code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
}
if let error = error {
failure(error)
}
}
Obviously I'm doing something wrong but what? I think it should be here because some of the images don't have any problems with conversion, but there are two more functions for pixelbuffer:
func appendPixelBufferForImageAtURL(image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = true
autoreleasepool {
var pixelBuffer: Unmanaged<CVPixelBuffer>?
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(
kCFAllocatorDefault,
pixelBufferAdaptor.pixelBufferPool,
&pixelBuffer
)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer.takeRetainedValue()
fillPixelBufferFromImage(image, pixelBuffer: managedPixelBuffer)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(
managedPixelBuffer,
withPresentationTime: presentationTime
)
} else {
NSLog("error: Failed to allocate pixel buffer from pool")
}
}
return appendSucceeded
}
func fillPixelBufferFromImage(image: UIImage, pixelBuffer: CVPixelBufferRef) {
let imageData = CGDataProviderCopyData(CGImageGetDataProvider(image.CGImage))
let lockStatus:UInt8 = UInt8(CVPixelBufferLockBaseAddress(pixelBuffer, 0))
let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer)
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(
pixelData,
Int(568),
Int(320),
8,
Int(8 * 320),
rgbColorSpace,
bitmapInfo
)
var imageDataProvider = CGDataProviderCreateWithCFData(imageData)
var imageRef = CGImageCreateWithJPEGDataProvider(imageDataProvider, nil, true, kCGRenderingIntentDefault)
CGContextDrawImage(context, CGRectMake(0, 0, 568, 320), imageRef)
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
}
So I was able to solve this by rewriting the fillPixelBufferFromImage using an example I found here: CVPixelBufferPool Error ( kCVReturnInvalidArgument/-6661)
Here's the Swift 2 - Xcode 7 GM solution that's working for me:
public func build(progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {
let inputSize = CGSize(width: 600, height: 600)
let outputSize = CGSize(width: 600, height: 600)
var error: NSError?
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("AssembledVideo.mov")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
}catch{
fatalError("Unable to delete file: \(error) : \(__FUNCTION__).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeQuickTimeMovie) else{
fatalError("AVAssetWriter error")
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : NSNumber(float: Float(outputSize.width)),
AVVideoHeightKey : NSNumber(float: Float(outputSize.height)),
]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(unsignedInt: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(float: Float(inputSize.width)),
kCVPixelBufferHeightKey as String: NSNumber(float: Float(inputSize.height)),
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary
)
assert(videoWriter.canAddInput(videoWriterInput))
videoWriter.addInput(videoWriterInput)
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
let currentProgress = NSProgress(totalUnitCount: Int64(self.photoURLs.count))
var frameCount: Int64 = 0
var remainingPhotoURLs = [String](self.photoURLs)
while (videoWriterInput.readyForMoreMediaData && !remainingPhotoURLs.isEmpty) {
let nextPhotoURL = remainingPhotoURLs.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
if !self.appendPixelBufferForImageAtURL(nextPhotoURL, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(domain: kErrorDomain, code: kFailedToAppendPixelBufferError,
userInfo: [
"description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
"rawError": videoWriter.error ?? "(none)"
])
break
}
frameCount++
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
success(videoOutputURL)
}
}
})
} else {
error = NSError(domain: kErrorDomain, code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
if let error = error {
failure(error)
}
}
public func appendPixelBufferForImageAtURL(urlString: String, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = true
autoreleasepool {
if let image = UIImage(contentsOfFile: urlString) {
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
fillPixelBufferFromImage(image.CGImage!, pixelBuffer: managedPixelBuffer)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
NSLog("error: Failed to allocate pixel buffer from pool")
}
}
}
return appendSucceeded
}
func fillPixelBufferFromImage(image: CGImage, pixelBuffer: CVPixelBuffer){
let frameSize = CGSizeMake(CGFloat(CGImageGetWidth(image)), CGFloat(CGImageGetHeight(image)))
CVPixelBufferLockBaseAddress(pixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(frameSize.width), Int(frameSize.height), 8, CVPixelBufferGetBytesPerRow(pixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(CGImageGetWidth(image)), CGFloat(CGImageGetHeight(image))), image)
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
}
Working project files here:
https://github.com/justinlevi/imagesToVideo

Resources