Replaykit asset writer status fails randomly - ios

Use Case
I'm using iOS 11 Replaykit framework to try to record frames from the screen, and audio from both the app and the microphone.
Problem
Randomly, when I call my .append(sampleBuffer) get AVAssetWriterStatus.failed with the AssetWriter.Error showing
Error Domain=AVFoundationErrorDomain Code=-11823 "Cannot Save" UserInfo={NSLocalizedRecoverySuggestion=Try saving again., NSLocalizedDescription=Cannot Save, NSUnderlyingError=0x1c044c360 {Error Domain=NSOSStatusErrorDomain Code=-12412 "(null)"}}
Side issue: I play a repeating sound when the app is recording to try to verify the audio is recorded, but the sound stops when I start recording, even where I the video and external audio mic is working.
If you require more info, I can upload the other code to GitHub too.
Ideas
Since sometimes the recording saves (I can export to Photos app and replay the video) I think it must be async issues where I'm loading things out of order. Please let me know if you see any!
One I idea I will be trying is saving to my own folder in /Documents instead of directly to /Documents in case of weird permissions errors. Although I believe this would be causing consistent errors, instead of only sometimes breaking.
My Code
func startRecording() {
guard let firstDocumentDirectoryPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).first else { return }
let directoryContents = try! FileManager.default.contentsOfDirectory(at: URL(fileURLWithPath: firstDocumentDirectoryPath), includingPropertiesForKeys: nil, options: [])
print(directoryContents)
videoURL = URL(fileURLWithPath: firstDocumentDirectoryPath.appending("/\(arc4random()).mp4"))
print(videoURL.absoluteString)
assetWriter = try! AVAssetWriter(url: videoURL, fileType: AVFileType.mp4)
let compressionProperties:[String:Any] = [...]
let videoSettings:[String:Any] = [...]
let audioSettings:[String:Any] = [...]
videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
audioMicInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
audioAppInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
guard let assetWriter = assetWriter else { return }
guard let videoInput = videoInput else { return }
guard let audioAppInput = audioAppInput else { return }
guard let audioMicInput = audioMicInput else { return }
videoInput.mediaTimeScale = 60
videoInput.expectsMediaDataInRealTime = true
audioMicInput.expectsMediaDataInRealTime = true
audioAppInput.expectsMediaDataInRealTime = true
if assetWriter.canAdd(videoInput) {
assetWriter.add(videoInput)
}
if assetWriter.canAdd(audioAppInput) {
assetWriter.add(audioAppInput)
}
if assetWriter.canAdd(audioMicInput) {
assetWriter.add(audioMicInput)
}
assetWriter.movieTimeScale = 60
RPScreenRecorder.shared().startCapture(handler: recordingHandler(sampleBuffer:sampleBufferType:error:)) { (error:Error?) in
if error != nil {
print("RPScreenRecorder.shared().startCapture: \(error.debugDescription)")
} else {
print("start capture complete")
}
}
}
func recordingHandler (sampleBuffer:CMSampleBuffer, sampleBufferType:RPSampleBufferType, error:Error?){
if error != nil {
print("recordingHandler: \(error.debugDescription)")
}
if CMSampleBufferDataIsReady(sampleBuffer) {
guard let assetWriter = assetWriter else { return }
guard let videoInput = videoInput else { return }
guard let audioAppInput = audioAppInput else { return }
guard let audioMicInput = audioMicInput else { return }
if assetWriter.status == AVAssetWriterStatus.unknown {
print("AVAssetWriterStatus.unknown")
if !assetWriter.startWriting() {
return
}
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
if assetWriter.status == AVAssetWriterStatus.failed {
print("AVAssetWriterStatus.failed")
print("assetWriter.error: \(assetWriter.error.debugDescription)")
return
}
if sampleBufferType == RPSampleBufferType.video {
if videoInput.isReadyForMoreMediaData {
print("=appending video data")
videoInput.append(sampleBuffer)
}
}
if sampleBufferType == RPSampleBufferType.audioApp {
if audioAppInput.isReadyForMoreMediaData {
print("==appending app audio data")
audioAppInput.append(sampleBuffer)
}
}
if sampleBufferType == RPSampleBufferType.audioMic {
if audioMicInput.isReadyForMoreMediaData {
print("===appending mic audio data")
audioMicInput.append(sampleBuffer)
}
}
}
}
func stopRecording() {
RPScreenRecorder.shared().stopCapture { (error) in
guard let assetWriter = self.assetWriter else { return }
guard let videoInput = self.videoInput else { return }
guard let audioAppInput = self.audioAppInput else { return }
guard let audioMicInput = self.audioMicInput else { return }
if error != nil {
print("recordingHandler: \(error.debugDescription)")
} else {
videoInput.markAsFinished()
audioMicInput.markAsFinished()
audioAppInput.markAsFinished()
assetWriter.finishWriting(completionHandler: {
print(self.videoURL)
self.saveToCameraRoll(URL: self.videoURL)
})
}
}
}

I got it to work. I believe it was indeed an async issue. The problem, for some reason is you must make sure
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
happen strictly serially.
Change your code from this:
if assetWriter.status == AVAssetWriterStatus.unknown {
print("AVAssetWriterStatus.unknown")
if !assetWriter.startWriting() {
return
}
assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
to this:
DispatchQueue.main.async { [weak self] in
if self?.assetWriter.status == AVAssetWriterStatus.unknown {
print("AVAssetWriterStatus.unknown")
if !self?.assetWriter.startWriting() {
return
}
self?.assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sampleBuffer))
}
}
Or even better, the whole block inside CMSampleBufferDataIsReady ie.
if CMSampleBufferDataIsReady(sampleBuffer) {
DispatchQueue.main.async { [weak self] in
...
...
}
}
Let me know if it works!

I had similar issue. I fixed it by first check whether videoURL file is already existed. If so, remove it first then the error will go away.

Related

Is it possible to run multiple instances of SFSpeechRecognizer?

I've implemented Apple's SpeechRecognizer to convert speech to text. I have multiple audio recordings so I'm creating mulitple SFSpeechRecognizer instance so that all of those are converted parallely and I've also used DispatchGroup so that I can get completion at last one's end. But I'm keep getting error kAFAssistantErrorDomain error 209.
private var dispatchGroup = DispatchGroup()
allURLs.forEach { (singleURL) in
DispatchQueue.main.async {
thisSelf.dispatchGroup.enter()
let request = SFSpeechURLRecognitionRequest(url: url)
guard let recognizer = SFSpeechRecognizer() else {
thisSelf.dispatchGroup.leave()
completion(.failure(thisSelf.speechReconInitError))
return
}
request.shouldReportPartialResults = false
if !recognizer.isAvailable {
thisSelf.dispatchGroup.leave()
return
}
recognizer.recognitionTask(with: request) { [weak thisSelf] (result, error) in
guard let reconSelf = thisSelf else { return }
if let error = error {
completion(.failure(error))
if let nsError = error as NSError? {
print("Error while transcripting audio: \(url.path), Code, Domain, Description: \(nsError.code), \(nsError.domain), \(nsError.localizedDescription)")
} else {
print("Error while transcripting audio: \(url.path), Error: \(error.localizedDescription)")
}
reconSelf.dispatchGroup.leave()
} else if let transcriptionResult = result, transcriptionResult.isFinal {
transcribedText += transcriptionResult.bestTranscription.formattedString
reconSelf.dispatchGroup.leave()
}
}
thisSelf.dispatchGroup.notify(queue: .main) {
if !transcribedText.isEmpty {
completion(transcribedText)
}
}
}
}
And If I transcribe only one audio to text at one time then I don't get any error.
TIA

WebRTC iOS: Record Remote Audio stream using WebRTC

I am working on an audio streaming application with recording functionality for a receiver.
I got stuck at the point where the user want to record audio stream on the receiver side.
Below is my code
Initialisation
var engine = AVAudioEngine()
var recordingFile: AVAudioFile?
var audioPlayer: AVAudioPlayer?
let player = AVAudioPlayerNode()
var isRecording: Bool = false
Initialise AudioEngine
func initializeAudioEngine() {
let input = self.engine.inputNode
let format = input.inputFormat(forBus: 0)
self.engine.attach(self.player)
let mainMixerNode = self.engine.mainMixerNode
self.engine.connect(input, to:mainMixerNode, format: format)
self.engine.prepare()
do {
try self.engine.start()
self.startRecording()
} catch (let error) {
print("START FAILED", error)
}
}
Start Recording
func startRecording() {
self.createRecordingFile()
self.engine.mainMixerNode.installTap(onBus: 0,
bufferSize: 1024,
format: self.engine.mainMixerNode.outputFormat(forBus: 0)) { (buffer, time) -> Void in
do {
self.isRecording = true
try self.recordingFile?.write(from: buffer)
} catch (let error) {
print("RECORD ERROR", error);
}
return
}
}
Create Buffer
private func createBuffer(forFileNamed fileName: String) -> AVAudioPCMBuffer? {
var res: AVAudioPCMBuffer?
if let fileURL = Bundle.main.url(forResource: fileName, withExtension: "caf") {
do {
let file = try AVAudioFile(forReading: fileURL)
res = AVAudioPCMBuffer(pcmFormat: file.processingFormat, frameCapacity:AVAudioFrameCount(file.length))
if let _ = res {
do {
try file.read(into: res!)
} catch (let error) {
print("ERROR read file", error)
}
}
} catch (let error) {
print("ERROR file creation", error)
}
}
return res
}
Stop Recording
func stopRecording() {
self.engine.mainMixerNode.removeTap(onBus: 0)
}
I am trying to record using earphone, but It's not working
Its will work because once you setup
let audiosession = AVAudioSession()
As AVAudioSessionCategoryPlayAndRecord and set
audiosession.setActive(true)
It will start recording whichever audio dump to device.
WebRTC does not have any Internal API to start or stop recording.
We can try using AVAudioSession instead.
First setUp Audio session
func setUPAudioSession() -> Bool {
let audiosession = AVAudioSession()
do {
try audiosession.setCategory(AVAudioSessionCategoryPlayAndRecord)
} catch(let error) {
print("--> \(error.localizedDescription)")
}
do {
try audiosession.setActive(true)
} catch (let error) {
print("--> \(error.localizedDescription)")
}
return audiosession.isInputAvailable;
}
After setUp the audio session now start recording as below
func startRecording() -> Bool {
var settings: [String: Any] = [String: String]()
settings[AVFormatIDKey] = kAudioFormatLinearPCM
settings[AVSampleRateKey] = 8000.0
settings[AVNumberOfChannelsKey] = 1
settings[AVLinearPCMBitDepthKey] = 16
settings[AVLinearPCMIsBigEndianKey] = false
settings[AVLinearPCMIsFloatKey] = false
settings[AVAudioQualityMax] = AVEncoderAudioQualityKey
//Create device directory where recorded file will be save automatically
let searchPaths: [String] = NSSearchPathForDirectoriesInDomains(.documentDirectory, .allDomainsMask, true)
let documentPath_ = searchPaths.first
let pathToSave = "\(documentPath_)/\(dateString)"
let url: URL = URL(pathToSave)
recorder = try? AVAudioRecorder(url: url, settings: settings)
// Initialize degate, metering, etc.
recorder.delegate = self;
recorder.meteringEnabled = true;
recorder?.prepareToRecord()
if let recordIs = recorder {
return recordIs.record()
}
return false
}
Play recorded file
func playrecodingFile() {
//Get the path of recorded file saved in previous method
let searchPaths: [String] = NSSearchPathForDirectoriesInDomains(.documentDirectory, .allDomainsMask, true)
let documentPath_ = searchPaths.first
let fileManager = FileManager.default
let arrayListOfRecordSound: [String]
if fileManager.fileExists(atPath: recordingFolder()) {
let arrayListOfRecordSound = try? fileManager.contentsOfDirectory(atPath: documentPath_)
}
let selectedSound = "\(documentPath_)/\(arrayListOfRecordSound.first)"
let url = URL.init(fileURLWithPath: selectedSound)
let player = try? AVAudioPlayer(contentsOf: url)
player?.delegate = self;
try? AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback)
player?.prepareToPlay()
player?.play()
}
Stop recording
func stopRecording() {
recorder?.stop()
}
pauseRecording
func pauseRecording() {
recorder?.pause()
}
Stop recording
func stopRecording() {
recorder?.stop()
}

How to replace/update data being save in core data

I have save a json data in core data. The problem for my app is whenever I called api again, the data that being called and save is appending into new row in core data. what I want is whenever I called the api again, the data from before is being replace or update. How can I achieve this? I read it I can use NSBatchUpdateRequest but I still don't understand how to implemented.
This is my code
ApiSession.shared.send(request) { (response) in
if let err = response.error {
print("Failed to get youtube data:", err)
return
}
guard let results = response.value else { return }
self.nextPageToken = results.nextPageToken
results.items.forEach({ (result) in
let context = CoreDataManager.shared.persistenceContainer.viewContext
if let video = NSEntityDescription.insertNewObject(forEntityName: "Video", into: context) as? Video {
video.videoTitle = result.snippet.title
video.videoId = result.id.videoID
}
let request: NSFetchRequest<Video> = Video.fetchRequest()
do {
try context.save()
let videos = try context.fetch(request)
DispatchQueue.main.async {
self.videos = videos
self.tableView.reloadData()
}
} catch let saveErr {
print("Failed to save video:", saveErr)
}
})
}
Try something like this:
let fetchRequest:NSFetchRequest<NSFetchRequestResult> = NSFetchRequest.init(entityName: "Video")
fetchRequest.predicate = NSPredicate(format: "videoId = %#", result.id.videoID)
var videoList: [NSManagedObject] = []
var video: Video
do {
videoList = try context.fetch(fetchRequest)
}
catch {
print("error executing fetch request: \(error)")
}
if videoList.count > 0 {
video = videoList[0] as! Video
} else {
video = NSEntityDescription.insertNewObject(forEntityName: "Video", into: context) as? Video
video.videoId = result.id.videoID
}
if let v = video {
v.videoTitle = result.snippet.title
}
To update an existing record you have to fetch it by given ID and check if the record exists.
If it exists, update it.
If not, create a new one.
ApiSession.shared.send(request) { response in
if let err = response.error {
print("Failed to get youtube data:", err)
return
}
guard let results = response.value else { return }
self.nextPageToken = results.nextPageToken
let context = CoreDataManager.shared.persistenceContainer.viewContext
results.items.forEach({ result in
let request: NSFetchRequest<Video> = Video.fetchRequest()
request.predicate = NSPredicate(format: "videoId == %#", result.id.videoID) // if videoID is Int change the placeholder to `%ld`
do {
let videos = try context.fetch(request)
if let video = videos.first {
video.videoTitle = result.snippet.title
self.videos.append(video)
} else {
let video = NSEntityDescription.insertNewObject(forEntityName: "Video", into: context) as! Video
video.videoTitle = result.snippet.title
video.videoId = result.id.videoID
}
} catch {
print("Failed to fetch video:", error)
}
})
do {
try context.save()
} catch {
print("Failed to save video:", error)
}
DispatchQueue.main.async {
self.tableView.reloadData()
}
}

How to get an AVAssetReader to loop?

Hi I have been trying to figure out how to implement movie looping in GPUImage2, but have been unsuccessful so far. The MovieInput class in GPUImage2 uses AVAssetReader to playback the movie files, so I researched ways to loop AVAssetReader. I found this question on StackOverFlow dealing with this topic. AVFoundation to reproduce a video loop
The best answer was
"AVAssetReader doesn't support seeking or restarting, it is essentially a sequential decoder. You have to create a new AVAssetReader object to read the same samples again."
I tried to figure out how to connect the old assetReader to a new one one and I was not very successful and it crashed every time.
I was recommended to try something like this, but I am not exactly sure how to write the function generateAssetReader.
public func start() {
self.assetReader = generateAssetReader(asset: asset, readAudio: readAudio, videoOutputSettings: videoOutputSettings, audioOutputSettings: audioOutputSettings)
asset.loadValuesAsynchronously(forKeys:["tracks"], completionHandler:{
DispatchQueue.global(priority:DispatchQueue.GlobalQueuePriority.default).async(execute: {
guard (self.asset.statusOfValue(forKey: "tracks", error:nil) == .loaded) else { return }
guard self.assetReader.startReading() else {
print("Couldn't start reading")
return
}
var readerVideoTrackOutput:AVAssetReaderOutput? = nil;
for output in self.assetReader.outputs {
if(output.mediaType == AVMediaTypeVideo) {
readerVideoTrackOutput = output;
}
}
while (self.assetReader.status == .reading) {
self.readNextVideoFrame(from:readerVideoTrackOutput!)
}
if assetReader.status == .completed {
assetReader.cancelReading()
self.assetReader = nil
if self.loop {
self.start()
} else {
self.endProcessing()
}
}
}
Would anyone have a clue into solving this looping problem? This is a link to entire code of the MovieInput class.
https://github.com/BradLarson/GPUImage2/blob/master/framework/Source/iOS/MovieInput.swift
I found the answer in case anyone is wondering.
public func createReader() -> AVAssetReader
{
var assetRead:AVAssetReader!
do{
assetRead = try AVAssetReader.init(asset: self.asset)
let outputSettings:[String:AnyObject] = [(kCVPixelBufferPixelFormatTypeKey as String):NSNumber(value:Int32(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange))]
let readerVideoTrackOutput = AVAssetReaderTrackOutput(track:self.asset.tracks(withMediaType: AVMediaTypeVideo)[0], outputSettings:outputSettings)
readerVideoTrackOutput.alwaysCopiesSampleData = false
assetRead.add(readerVideoTrackOutput)
}catch{
}
return assetRead
}
public func start() {
self.assetReader = createReader()
asset.loadValuesAsynchronously(forKeys:["tracks"], completionHandler:{
DispatchQueue.global(priority:DispatchQueue.GlobalQueuePriority.default).async(execute: {
guard (self.asset.statusOfValue(forKey: "tracks", error:nil) == .loaded) else { return }
guard self.assetReader.startReading() else {
print("Couldn't start reading")
return
}
var readerVideoTrackOutput:AVAssetReaderOutput? = nil;
for output in self.assetReader.outputs {
if(output.mediaType == AVMediaTypeVideo) {
readerVideoTrackOutput = output;
}
}
while (self.assetReader.status == .reading) {
self.readNextVideoFrame(from:readerVideoTrackOutput!)
}
if (self.assetReader.status == .completed) {
self.assetReader.cancelReading()
if (self.loop) {
// TODO: Restart movie processing
self.start()
} else {
self.endProcessing()
}
}
})
})
}

avassetwriter startWriting() crash

I'm trying to make ios recorder app, but some times (really rare) I'm getting weird crash
public func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
guard CMSampleBufferDataIsReady(sampleBuffer) else { return }
if assetWriter == nil { return }
let lastSampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
if assetWriter.status == .Unknown {
if assetWriter.startWriting() {
assetWriter.startSessionAtSourceTime(lastSampleTime)
} else {
print("assetWriter.startWriting error")
return
}
}
guard assetWriter.status == .Writing else { return }
if captureOutput == videoOutput && videoInput.readyForMoreMediaData {
if !videoInput.appendSampleBuffer(sampleBuffer) {
print("Unable to write to video input")
}
} else if captureOutput == audioOutput && audioInput.readyForMoreMediaData {
if !audioInput.appendSampleBuffer(sampleBuffer) {
print("Unable to write to audio input")
}
}
}
The app stops on assetWriter.startWriting() method. The error I get:
*** Terminating app due to uncaught exception 'NSInvalidArgumentException', reason: '*** -[AVAssetWriterFailedTerminalHelper initWithConfigurationState:terminalError:] invalid parameter not satisfying: terminalError != ((void *)0)'
EDIT: Below you can see the setup AssetWriter code:
private func initWriter(url: NSURL) throws {
do {
assetWriter = try AVAssetWriter(URL: url, fileType: AVFileTypeMPEG4)
let videoSettings = videoOutput.recommendedVideoSettingsForAssetWriterWithOutputFileType(AVFileTypeMPEG4) as! [String: AnyObject]
guard assetWriter.canApplyOutputSettings(videoSettings, forMediaType: AVMediaTypeVideo)
else {
throw NSError(domain: "com.conn.VideoKit", code: 0, userInfo: [NSLocalizedDescriptionKey: "Couldn't apply video output settings"])
}
videoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
videoInput.transform = CGAffineTransformIdentity
videoInput.transform = CGAffineTransformMakeRotation( CGFloat( 270 * M_PI ) / 180.0 )
videoInput.expectsMediaDataInRealTime = true
if assetWriter.canAddInput(videoInput) {
assetWriter.addInput(videoInput)
} else {
print("Cannot add video input to asset writer")
}
let audioSettings = audioOutput.recommendedAudioSettingsForAssetWriterWithOutputFileType(AVFileTypeMPEG4) as! [String: AnyObject]
guard assetWriter.canApplyOutputSettings(audioSettings, forMediaType: AVMediaTypeAudio)
else {
throw NSError(domain: "com.conn.VideoKit", code: 0, userInfo: [NSLocalizedDescriptionKey: "Couldn't apply audio output settings"])
}
audioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioSettings)
audioInput.expectsMediaDataInRealTime = true
if assetWriter.canAddInput(audioInput) {
assetWriter.addInput(audioInput)
} else {
print("Cannot add audio input to asset writer")
}
} catch let error { throw error }
}
The problem was in buffering. I had to use separated concurred queues for video/audio:
let videoConcurrentQueue = dispatch_queue_create("com.videoConcurrentQueue", DISPATCH_QUEUE_CONCURRENT)
let audioConcurrentQueue = dispatch_queue_create("com.audioConcurrentQueue", DISPATCH_QUEUE_CONCURRENT)
videoOutput.setSampleBufferDelegate(self, queue: videoConcurrentQueue)
audioOutput.setSampleBufferDelegate(self, queue: audioConcurrentQueue)

Resources