-50 error when converting PCM buffer with AVAudioConverter - ios

I'm trying to convert a AVAudioPCMBuffer with a 44100 sample rate to one with a 48000 sample rate, but I always get an exception (-50 error) when converting. Here's the code:
guard let deviceFormat = AVAudioFormat(standardFormatWithSampleRate: 48000.0, channels: 1) else {
preconditionFailure()
}
// This file is saved as mono 44100
guard let lowToneURL = Bundle.main.url(forResource: "Tone220", withExtension: "wav") else {
preconditionFailure()
}
guard let audioFile = try? AVAudioFile(forReading: lowToneURL) else {
preconditionFailure()
}
let tempBuffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat,
frameCapacity: AVAudioFrameCount(audioFile.length))!
tempBuffer.frameLength = tempBuffer.frameCapacity
do { try audioFile.read(into: tempBuffer) } catch {
assertionFailure("*** Caught: \(error)")
}
guard let converter = AVAudioConverter(from: audioFile.processingFormat, to: deviceFormat) else {
preconditionFailure()
}
guard let convertedBuffer = AVAudioPCMBuffer(pcmFormat: deviceFormat,
frameCapacity: AVAudioFrameCount(audioFile.length)) else {
preconditionFailure()
}
convertedBuffer.frameLength = tempBuffer.frameCapacity
do { try converter.convert(to: convertedBuffer, from: tempBuffer) } catch {
assertionFailure("*** Caught: \(error)")
}
Any ideas?

An Apple engineer answered this on their dev forums. I missed that the convert(to:from:) variant of AVAudioConverter can't convert sample rate so you have to use the withInputFrom variant. The docs on that aren't too clear but I came up with:
private func pcmBufferForFile(filename: String, sampleRate: Float) -> AVAudioPCMBuffer {
guard let newFormat = AVAudioFormat(standardFormatWithSampleRate: Double(sampleRate), channels: 1) else {
preconditionFailure()
}
guard let url = Bundle.main.url(forResource: filename, withExtension: "wav") else {
preconditionFailure()
}
guard let audioFile = try? AVAudioFile(forReading: url) else {
preconditionFailure()
}
guard let tempBuffer = AVAudioPCMBuffer(pcmFormat: audioFile.processingFormat,
frameCapacity: AVAudioFrameCount(audioFile.length)) else {
preconditionFailure()
}
let conversionRatio = sampleRate / Float(tempBuffer.format.sampleRate)
let newLength = Float(audioFile.length) * conversionRatio
guard let newBuffer = AVAudioPCMBuffer(pcmFormat: newFormat,
frameCapacity: AVAudioFrameCount(newLength)) else {
preconditionFailure()
}
do { try audioFile.read(into: tempBuffer) } catch {
preconditionFailure()
}
guard let converter = AVAudioConverter(from: audioFile.processingFormat, to: newFormat) else {
preconditionFailure()
}
var error: NSError?
converter.convert(to: newBuffer, error: &error, withInputFrom: { (packetCount, statusPtr) -> AVAudioBuffer? in
statusPtr.pointee = .haveData
return tempBuffer
})
if error != nil {
print("*** Conversion error: \(error!)")
}
return newBuffer
}

Related

WebRTC iOS: Record Remote Audio stream using WebRTC

I am working on an audio streaming application with recording functionality for a receiver.
I got stuck at the point where the user want to record audio stream on the receiver side.
Below is my code
Initialisation
var engine = AVAudioEngine()
var recordingFile: AVAudioFile?
var audioPlayer: AVAudioPlayer?
let player = AVAudioPlayerNode()
var isRecording: Bool = false
Initialise AudioEngine
func initializeAudioEngine() {
let input = self.engine.inputNode
let format = input.inputFormat(forBus: 0)
self.engine.attach(self.player)
let mainMixerNode = self.engine.mainMixerNode
self.engine.connect(input, to:mainMixerNode, format: format)
self.engine.prepare()
do {
try self.engine.start()
self.startRecording()
} catch (let error) {
print("START FAILED", error)
}
}
Start Recording
func startRecording() {
self.createRecordingFile()
self.engine.mainMixerNode.installTap(onBus: 0,
bufferSize: 1024,
format: self.engine.mainMixerNode.outputFormat(forBus: 0)) { (buffer, time) -> Void in
do {
self.isRecording = true
try self.recordingFile?.write(from: buffer)
} catch (let error) {
print("RECORD ERROR", error);
}
return
}
}
Create Buffer
private func createBuffer(forFileNamed fileName: String) -> AVAudioPCMBuffer? {
var res: AVAudioPCMBuffer?
if let fileURL = Bundle.main.url(forResource: fileName, withExtension: "caf") {
do {
let file = try AVAudioFile(forReading: fileURL)
res = AVAudioPCMBuffer(pcmFormat: file.processingFormat, frameCapacity:AVAudioFrameCount(file.length))
if let _ = res {
do {
try file.read(into: res!)
} catch (let error) {
print("ERROR read file", error)
}
}
} catch (let error) {
print("ERROR file creation", error)
}
}
return res
}
Stop Recording
func stopRecording() {
self.engine.mainMixerNode.removeTap(onBus: 0)
}
I am trying to record using earphone, but It's not working
Its will work because once you setup
let audiosession = AVAudioSession()
As AVAudioSessionCategoryPlayAndRecord and set
audiosession.setActive(true)
It will start recording whichever audio dump to device.
WebRTC does not have any Internal API to start or stop recording.
We can try using AVAudioSession instead.
First setUp Audio session
func setUPAudioSession() -> Bool {
let audiosession = AVAudioSession()
do {
try audiosession.setCategory(AVAudioSessionCategoryPlayAndRecord)
} catch(let error) {
print("--> \(error.localizedDescription)")
}
do {
try audiosession.setActive(true)
} catch (let error) {
print("--> \(error.localizedDescription)")
}
return audiosession.isInputAvailable;
}
After setUp the audio session now start recording as below
func startRecording() -> Bool {
var settings: [String: Any] = [String: String]()
settings[AVFormatIDKey] = kAudioFormatLinearPCM
settings[AVSampleRateKey] = 8000.0
settings[AVNumberOfChannelsKey] = 1
settings[AVLinearPCMBitDepthKey] = 16
settings[AVLinearPCMIsBigEndianKey] = false
settings[AVLinearPCMIsFloatKey] = false
settings[AVAudioQualityMax] = AVEncoderAudioQualityKey
//Create device directory where recorded file will be save automatically
let searchPaths: [String] = NSSearchPathForDirectoriesInDomains(.documentDirectory, .allDomainsMask, true)
let documentPath_ = searchPaths.first
let pathToSave = "\(documentPath_)/\(dateString)"
let url: URL = URL(pathToSave)
recorder = try? AVAudioRecorder(url: url, settings: settings)
// Initialize degate, metering, etc.
recorder.delegate = self;
recorder.meteringEnabled = true;
recorder?.prepareToRecord()
if let recordIs = recorder {
return recordIs.record()
}
return false
}
Play recorded file
func playrecodingFile() {
//Get the path of recorded file saved in previous method
let searchPaths: [String] = NSSearchPathForDirectoriesInDomains(.documentDirectory, .allDomainsMask, true)
let documentPath_ = searchPaths.first
let fileManager = FileManager.default
let arrayListOfRecordSound: [String]
if fileManager.fileExists(atPath: recordingFolder()) {
let arrayListOfRecordSound = try? fileManager.contentsOfDirectory(atPath: documentPath_)
}
let selectedSound = "\(documentPath_)/\(arrayListOfRecordSound.first)"
let url = URL.init(fileURLWithPath: selectedSound)
let player = try? AVAudioPlayer(contentsOf: url)
player?.delegate = self;
try? AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)
player?.prepareToPlay()
player?.play()
}
Stop recording
func stopRecording() {
recorder?.stop()
}
pauseRecording
func pauseRecording() {
recorder?.pause()
}
Stop recording
func stopRecording() {
recorder?.stop()
}

base64 String to Audio mp3 in iOS?

I have an base64 string and want to convert it into mp3 audio file.
var audioData = Data(base64Encoded: strBase64, options: .ignoreUnknownCharacters)
print(audioData)
It always returns nil.
let base64String : String = "some sample base64"
let audioData = Data(base64Encoded: base64String, options: .ignoreUnknownCharacters)
if audioData != nil {
if let audData = audioData {
self.playAudio(audioData: audData)
}
}
func playAudio(audioData : Data) {
let filename = documentsDirectory.appendingPathComponent("output.mp3")
do {
try audioData.write(to: filename, options: .atomicWrite)
do {
audioPlayer = try AVAudioPlayer(contentsOf: filename)
guard let player = audioPlayer else { return }
player.prepareToPlay()
player.play()
} catch let error {
print(error.localizedDescription)
}
} catch {
}
}

Replaykit asset writer status fails randomly

Use Case
I'm using iOS 11 Replaykit framework to try to record frames from the screen, and audio from both the app and the microphone.
Problem
Randomly, when I call my .append(sampleBuffer) get AVAssetWriterStatus.failed with the AssetWriter.Error showing
Error Domain=AVFoundationErrorDomain Code=-11823 "Cannot Save" UserInfo={NSLocalizedRecoverySuggestion=Try saving again., NSLocalizedDescription=Cannot Save, NSUnderlyingError=0x1c044c360 {Error Domain=NSOSStatusErrorDomain Code=-12412 "(null)"}}
Side issue: I play a repeating sound when the app is recording to try to verify the audio is recorded, but the sound stops when I start recording, even where I the video and external audio mic is working.
If you require more info, I can upload the other code to GitHub too.
Ideas
Since sometimes the recording saves (I can export to Photos app and replay the video) I think it must be async issues where I'm loading things out of order. Please let me know if you see any!
One I idea I will be trying is saving to my own folder in /Documents instead of directly to /Documents in case of weird permissions errors. Although I believe this would be causing consistent errors, instead of only sometimes breaking.
My Code
func startRecording() {
guard let firstDocumentDirectoryPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).first else { return }
let directoryContents = try! FileManager.default.contentsOfDirectory(at: URL(fileURLWithPath: firstDocumentDirectoryPath), includingPropertiesForKeys: nil, options: [])
print(directoryContents)
videoURL = URL(fileURLWithPath: firstDocumentDirectoryPath.appending("/\(arc4random()).mp4"))
print(videoURL.absoluteString)
assetWriter = try! AVAssetWriter(url: videoURL, fileType: AVFileType.mp4)
let compressionProperties:[String:Any] = [...]
let videoSettings:[String:Any] = [...]
let audioSettings:[String:Any] = [...]
videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
audioMicInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
audioAppInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
guard let assetWriter = assetWriter else { return }
guard let videoInput = videoInput else { return }
guard let audioAppInput = audioAppInput else { return }
guard let audioMicInput = audioMicInput else { return }
videoInput.mediaTimeScale = 60
videoInput.expectsMediaDataInRealTime = true
audioMicInput.expectsMediaDataInRealTime = true
audioAppInput.expectsMediaDataInRealTime = true
if assetWriter.canAdd(videoInput) {
assetWriter.add(videoInput)
}
if assetWriter.canAdd(audioAppInput) {
assetWriter.add(audioAppInput)
}
if assetWriter.canAdd(audioMicInput) {
assetWriter.add(audioMicInput)
}
assetWriter.movieTimeScale = 60
RPScreenRecorder.shared().startCapture(handler: recordingHandler(sampleBuffer:sampleBufferType:error:)) { (error:Error?) in
if error != nil {
print("RPScreenRecorder.shared().startCapture: \(error.debugDescription)")
} else {
print("start capture complete")
}
}
}
func recordingHandler (sampleBuffer:CMSampleBuffer, sampleBufferType:RPSampleBufferType, error:Error?){
if error != nil {
print("recordingHandler: \(error.debugDescription)")
}
if CMSampleBufferDataIsReady(sampleBuffer) {
guard let assetWriter = assetWriter else { return }
guard let videoInput = videoInput else { return }
guard let audioAppInput = audioAppInput else { return }
guard let audioMicInput = audioMicInput else { return }
if assetWriter.status == AVAssetWriterStatus.unknown {
print("AVAssetWriterStatus.unknown")
if !assetWriter.startWriting() {
return
}
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
if assetWriter.status == AVAssetWriterStatus.failed {
print("AVAssetWriterStatus.failed")
print("assetWriter.error: \(assetWriter.error.debugDescription)")
return
}
if sampleBufferType == RPSampleBufferType.video {
if videoInput.isReadyForMoreMediaData {
print("=appending video data")
videoInput.append(sampleBuffer)
}
}
if sampleBufferType == RPSampleBufferType.audioApp {
if audioAppInput.isReadyForMoreMediaData {
print("==appending app audio data")
audioAppInput.append(sampleBuffer)
}
}
if sampleBufferType == RPSampleBufferType.audioMic {
if audioMicInput.isReadyForMoreMediaData {
print("===appending mic audio data")
audioMicInput.append(sampleBuffer)
}
}
}
}
func stopRecording() {
RPScreenRecorder.shared().stopCapture { (error) in
guard let assetWriter = self.assetWriter else { return }
guard let videoInput = self.videoInput else { return }
guard let audioAppInput = self.audioAppInput else { return }
guard let audioMicInput = self.audioMicInput else { return }
if error != nil {
print("recordingHandler: \(error.debugDescription)")
} else {
videoInput.markAsFinished()
audioMicInput.markAsFinished()
audioAppInput.markAsFinished()
assetWriter.finishWriting(completionHandler: {
print(self.videoURL)
self.saveToCameraRoll(URL: self.videoURL)
})
}
}
}
I got it to work. I believe it was indeed an async issue. The problem, for some reason is you must make sure
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
happen strictly serially.
Change your code from this:
if assetWriter.status == AVAssetWriterStatus.unknown {
print("AVAssetWriterStatus.unknown")
if !assetWriter.startWriting() {
return
}
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
to this:
DispatchQueue.main.async { [weak self] in
if self?.assetWriter.status == AVAssetWriterStatus.unknown {
print("AVAssetWriterStatus.unknown")
if !self?.assetWriter.startWriting() {
return
}
self?.assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
}
Or even better, the whole block inside CMSampleBufferDataIsReady ie.
if CMSampleBufferDataIsReady(sampleBuffer) {
DispatchQueue.main.async { [weak self] in
...
...
}
}
Let me know if it works!
I had similar issue. I fixed it by first check whether videoURL file is already existed. If so, remove it first then the error will go away.

How to get Audio recording buffer data live?

I am working on get audio data from iPhone mic and send it to socket, I already try AVAudioEngine to get audio buffer but some how its not woking. so can you please suggest me what it better way to get recording buffer data in live.
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
SocketIOManager.sharedInstance.socket.on("listen") {data, ack in
let BuffurData:Data = data[0] as! Data
// let playData = self?.audioBufferToNSData(PCMBuffer: BuffurData as! AVAudioPCMBuffer)
do {
// let data = NSData(bytes: &BuffurData, length: BuffurData.count)
let player = try AVAudioPlayer(data:BuffurData)
player.play()
} catch let error as NSError {
print(error.description)
}
print("socket connected \(data)")
}
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize:4096, format:format, block: { [weak self] buffer, when in
guard let this = self else {
return
}
// writing to file: for testing purposes only
do {
try this.file!.write(from: buffer)
} catch {
}
if let channel1Buffer = buffer.floatChannelData?[0] {
let test = self?.copyAudioBufferBytes(buffer)
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!);
// socket.on("listen", function (data)
/*! #property floatChannelData
#abstract Access the buffer's float audio samples.
#discussion
floatChannelData returns pointers to the buffer's audio samples if the buffer's format is
32-bit float, or nil if it is another format.
The returned pointer is to format.channelCount pointers to float. Each of these pointers
is to "frameLength" valid samples, which are spaced by "stride" samples.
If format.interleaved is false (as with the standard deinterleaved float format), then
the pointers will be to separate chunks of memory. "stride" is 1.
If format.interleaved is true, then the pointers will refer into the same chunk of interleaved
samples, each offset by 1 frame. "stride" is the number of interleaved channels.
*/
// #TODO: send data, better to pass into separate queue for processing
}
})
engine.prepare()
do {
try engine.start()
} catch {
// #TODO: error out
}
}
Try this code:
var audioPlayerQueue = DispatchQueue(label: "audioPlayerQueue", qos: DispatchQoS.userInteractive)
var peerAudioPlayer: AVAudioPlayerNode = AVAudioPlayerNode()
var peerInputFormat: AVAudioFormat?
override func viewDidLoad() {
super.viewDidLoad()
// initialize engine
engine = AVAudioEngine()
guard nil != engine?.inputNode else {
// #TODO: error out
return
}
engine.attach(self.peerAudioPlayer)
self.peerInputFormat = AVAudioFormat.init(commonFormat: .pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false)
self.peerAudioEngine.connect(peerAudioPlayer, to: self.peerAudioEngine.mainMixerNode, format: peerInput?.outputFormat(forBus: 0))
do {
peerAudioEngine.prepare()
try peerAudioEngine.start()
} catch let error {
print(error.localizedDescription)
}
SocketIOManager.sharedInstance.socket.on("listen") { data, ack in
let pcmBuffer = toPCMBuffer(data: data)
self.audioPlayerQueue.async {
self.peerAudioPlayer.scheduleBuffer(pcmBuffer, completionHandler: nil)
if self.peerAudioEngine.isRunning {
self.peerAudioPlayer.play()
} else {
do {
try self.peerAudioEngine.start()
} catch {
print(error.localizedDescription)
}
}
}
}
print("socket connected \(data)")
}
func toPCMBuffer(data: NSData) -> AVAudioPCMBuffer {
let audioFormat = AVAudioFormat(commonFormat: AVAudioCommonFormat.pcmFormatFloat32, sampleRate: 44100, channels: 1, interleaved: false) // given NSData audio format
let PCMBuffer = AVAudioPCMBuffer(pcmFormat: audioFormat, frameCapacity: UInt32(data.length) / audioFormat.streamDescription.pointee.mBytesPerFrame)
PCMBuffer.frameLength = PCMBuffer.frameCapacity
let channels = UnsafeBufferPointer(start: PCMBuffer.floatChannelData, count: Int(PCMBuffer.format.channelCount))
data.getBytes(UnsafeMutableRawPointer(channels[0]) , length: data.length)
return PCMBuffer
}
func installTap() {
engine = AVAudioEngine()
guard let engine = engine, let input = engine.inputNode else {
// #TODO: error out
return
}
let format = input.inputFormat(forBus: 0)
input.installTap(onBus: 0, bufferSize: 4410, format: format, block: { (buffer: AVAudioPCMBuffer, AVAudioTime) in
guard let this = self else {
return
}
let stram = self?.toNSData(PCMBuffer: buffer)
SocketIOManager.sharedInstance.socket.emit("talk",stram!)
})
do {
engine.prepare()
try engine.start()
} catch {
// #TODO: error out
}
}
// **Edit: For Enable Lound Speaker**
func speakerEnabled(_ enabled:Bool) -> Bool {
let session = AVAudioSession.sharedInstance()
var options = session.categoryOptions
if (enabled) {
options.insert(.defaultToSpeaker)
} else {
options.remove(.defaultToSpeaker)
}
try! session.setCategory(AVAudioSessionCategoryPlayAndRecord, with: options)
return true
}

Video recording start automatically when camera Open?

I have read lots of tutorials for video player. But How to start recording when camera open in a custom view with time limit.
Here is the working solution with a swift version
var session: AVCaptureSession?
var userreponsevideoData = NSData()
var userreponsethumbimageData = NSData()
override func viewDidLoad() {
super.viewDidLoad()
createSession()
}
func for stopRecording(){
session?.stopRunning()
}
func createSession() {
var input: AVCaptureDeviceInput?
let movieFileOutput = AVCaptureMovieFileOutput()
videosPreviewLayer?.frame.size = photoPreviewImageView.frame.size
session = AVCaptureSession()
let error: NSError? = nil
do { input = try AVCaptureDeviceInput(device: self.cameraWithPosition(position: .back)!) } catch {return}
if error == nil {
session?.addInput(input!)
} else {
print("camera input error: \(String(describing: error))")
}
videosPreviewLayer = AVCaptureVideoPreviewLayer(session: session!)
videosPreviewLayer?.frame.size = self.photoPreviewImageView.frame.size
videosPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videosPreviewLayer?.connection?.videoOrientation = .portrait
photoPreviewImageView.layer.sublayers?.forEach { $0.removeFromSuperlayer() }
photoPreviewImageView.layer.addSublayer(videosPreviewLayer!)
switchCameraButton.isHidden=true
flashButton.isHidden=true
msgLabel.isHidden=true
galleryCollectionView.isHidden=true
timerLabel.isHidden=false
let documentsURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
fileURL = URL(string:"\(documentsURL.appendingPathComponent("temp"))" + ".mov")
print("*****fileurl%#",fileURL ?? "00000")
let maxDuration: CMTime = CMTimeMake(600, 10)
movieFileOutput.maxRecordedDuration = maxDuration
movieFileOutput.minFreeDiskSpaceLimit = 1024 * 1024
if self.session!.canAddOutput(movieFileOutput) {
self.session!.addOutput(movieFileOutput)
}
session?.startRunning()
movieFileOutput.startRecording(to: fileURL!, recordingDelegate: self)
}
func cameraWithPosition(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let devices = AVCaptureDevice.devices(for: AVMediaType.video)
for device in devices {
if device.position == position {
return device
}
}
return nil
}
}
extension SwipeGallerymainViewController: AVCaptureFileOutputRecordingDelegate
{
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
print(outputFileURL)
let filemainurl = outputFileURL
do
{
let asset = AVURLAsset(url:filemainurl as URL, options:nil)
print(asset)
let imgGenerator = AVAssetImageGenerator(asset: asset)
imgGenerator.appliesPreferredTrackTransform = true
let cgImage = try imgGenerator.copyCGImage(at: CMTimeMake(0, 1), actualTime: nil)
let uiImage = UIImage(cgImage: cgImage)
previewImage = uiImage
userreponsethumbimageData = NSData(contentsOf: filemainurl as URL)!
print(userreponsethumbimageData.length)
print(uiImage)
}
catch let error as NSError
{
print(error)
return
}
let VideoFilePath = URL(fileURLWithPath:NSTemporaryDirectory()).appendingPathComponent("mergeVideo\(arc4random()%1000)d").appendingPathExtension("mp4").absoluteString
if FileManager.default.fileExists(atPath: VideoFilePath)
{
print("exist")
do
{
try FileManager.default.removeItem(atPath: VideoFilePath)
}
catch { }
}
let tempfilemainurl = NSURL(string: VideoFilePath)!
let sourceAsset = AVURLAsset(url:filemainurl as URL, options:nil)
let assetExport: AVAssetExportSession = AVAssetExportSession(asset: sourceAsset, presetName: AVAssetExportPresetMediumQuality)!
assetExport.outputFileType = AVFileType.mov
assetExport.outputURL = tempfilemainurl as URL
assetExport.exportAsynchronously { () -> Void in
switch assetExport.status
{
case AVAssetExportSessionStatus.completed:
DispatchQueue.main.async {
do
{
self.userreponsevideoData = try NSData(contentsOf: tempfilemainurl as URL, options: NSData.ReadingOptions())
print("MB - \(self.userreponsevideoData.length) byte")
self.isVideoLoad=true
self.performSegue(withIdentifier:"previewSegue", sender:self)
}
catch
{
print(error)
}
}
case AVAssetExportSessionStatus.failed:
print("failed \(String(describing: assetExport.error))")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(String(describing: assetExport.error))")
default:
print("complete")
}
}
}
func captureOutput(captureOutput: AVCaptureFileOutput!, didStartRecordingToOutputFileAtURL fileURL: NSURL!, fromConnections connections: [AnyObject]!) {
print(fileURL)
}
}
I have done same thing in java using Robot class on laptop. Robot class is capable of sending keystrokes and mouse movements automatically. You can try same thing on your platform. Automate the button press event so that recording get started.

Resources