I'm streaming a content of my app to my RTMP server and using RPBroadcastSampleHandler.
One of the methods is
override func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, with sampleBufferType: RPSampleBufferType) {
switch sampleBufferType {
case .video:
streamer.appendSampleBuffer(sampleBuffer, withType: .video)
captureOutput(sampleBuffer)
case .audioApp:
streamer.appendSampleBuffer(sampleBuffer, withType: .audio)
captureAudioOutput(sampleBuffer)
case .audioMic:
()
}
}
And the captureOutput method is
self.lastSampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
// Append the sampleBuffer into videoWriterInput
if self.isRecordingVideo {
if self.videoWriterInput!.isReadyForMoreMediaData {
if self.videoWriter!.status == AVAssetWriterStatus.writing {
let whetherAppendSampleBuffer = self.videoWriterInput!.append(sampleBuffer)
print(">>>>>>>>>>>>>The time::: \(self.lastSampleTime.value)/\(self.lastSampleTime.timescale)")
if whetherAppendSampleBuffer {
print("DEBUG::: Append sample buffer successfully")
} else {
print("WARN::: Append sample buffer failed")
}
} else {
print("WARN:::The videoWriter status is not writing")
}
} else {
print("WARN:::Cannot append sample buffer into videoWriterInput")
}
}
Since this sample buffer contains audio/video data, I figured I can use AVKit to save it locally while streaming. So what I'm doing is creating an asset writer at the start of the stream:
let fileManager = FileManager.default
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
self.videoOutputFullFileName = documentsPath.stringByAppendingPathComponent(str: "test_capture_video.mp4")
if self.videoOutputFullFileName == nil {
print("ERROR:The video output file name is nil")
return
}
self.isRecordingVideo = true
if fileManager.fileExists(atPath: self.videoOutputFullFileName!) {
print("WARN:::The file: \(self.videoOutputFullFileName!) exists, will delete the existing file")
do {
try fileManager.removeItem(atPath: self.videoOutputFullFileName!)
} catch let error as NSError {
print("WARN:::Cannot delete existing file: \(self.videoOutputFullFileName!), error: \(error.debugDescription)")
}
} else {
print("DEBUG:::The file \(self.videoOutputFullFileName!) doesn't exist")
}
let screen = UIScreen.main
let screenBounds = info.size
let videoCompressionPropertys = [
AVVideoAverageBitRateKey: screenBounds.width * screenBounds.height * 10.1
]
let videoSettings: [String: Any] = [
AVVideoCodecKey: AVVideoCodecH264,
AVVideoWidthKey: screenBounds.width,
AVVideoHeightKey: screenBounds.height,
AVVideoCompressionPropertiesKey: videoCompressionPropertys
]
self.videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
guard let videoWriterInput = self.videoWriterInput else {
print("ERROR:::No video writer input")
return
}
videoWriterInput.expectsMediaDataInRealTime = true
// Add the audio input
var acl = AudioChannelLayout()
memset(&acl, 0, MemoryLayout<AudioChannelLayout>.size)
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
let audioOutputSettings: [String: Any] =
[ AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey : 44100,
AVNumberOfChannelsKey : 1,
AVEncoderBitRateKey : 64000,
AVChannelLayoutKey : Data(bytes: &acl, count: MemoryLayout<AudioChannelLayout>.size)]
audioWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioOutputSettings)
guard let audioWriterInput = self.audioWriterInput else {
print("ERROR:::No audio writer input")
return
}
audioWriterInput.expectsMediaDataInRealTime = true
do {
self.videoWriter = try AVAssetWriter(outputURL: URL(fileURLWithPath: self.videoOutputFullFileName!), fileType: AVFileTypeMPEG4)
} catch let error as NSError {
print("ERROR:::::>>>>>>>>>>>>>Cannot init videoWriter, error:\(error.localizedDescription)")
}
guard let videoWriter = self.videoWriter else {
print("ERROR:::No video writer")
return
}
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
} else {
print("ERROR:::Cannot add videoWriterInput into videoWriter")
}
//Add audio input
if videoWriter.canAdd(audioWriterInput) {
videoWriter.add(audioWriterInput)
} else {
print("ERROR:::Cannot add audioWriterInput into videoWriter")
}
if videoWriter.status != AVAssetWriterStatus.writing {
print("DEBUG::::::::::::::::The videoWriter status is not writing, and will start writing the video.")
let hasStartedWriting = videoWriter.startWriting()
if hasStartedWriting {
videoWriter.startSession(atSourceTime: self.lastSampleTime)
print("DEBUG:::Have started writting on videoWriter, session at source time: \(self.lastSampleTime)")
LOG(videoWriter.status.rawValue)
} else {
print("WARN:::Fail to start writing on videoWriter")
}
} else {
print("WARN:::The videoWriter.status is writing now, so cannot start writing action on videoWriter")
}
And then saving and finishing writing at the end of the stream:
print("DEBUG::: Starting to process recorder final...")
print("DEBUG::: videoWriter status: \(self.videoWriter!.status.rawValue)")
self.isRecordingVideo = false
guard let videoWriterInput = self.videoWriterInput else {
print("ERROR:::No video writer input")
return
}
guard let videoWriter = self.videoWriter else {
print("ERROR:::No video writer")
return
}
guard let audioWriterInput = self.audioWriterInput else {
print("ERROR:::No audio writer input")
return
}
videoWriterInput.markAsFinished()
audioWriterInput.markAsFinished()
videoWriter.finishWriting {
if videoWriter.status == AVAssetWriterStatus.completed {
print("DEBUG:::The videoWriter status is completed")
let fileManager = FileManager.default
if fileManager.fileExists(atPath: self.videoOutputFullFileName!) {
print("DEBUG:::The file: \(self.videoOutputFullFileName ?? "") has been saved in documents folder, and is ready to be moved to camera roll")
let sharedFileURL = fileManager.containerURL(forSecurityApplicationGroupIdentifier: "group.jp.awalker.co.Hotter")
guard let documentsPath = sharedFileURL?.path else {
LOG("ERROR:::No shared file URL path")
return
}
let finalFilename = documentsPath.stringByAppendingPathComponent(str: "test_capture_video.mp4")
//Check whether file exists
if fileManager.fileExists(atPath: finalFilename) {
print("WARN:::The file: \(finalFilename) exists, will delete the existing file")
do {
try fileManager.removeItem(atPath: finalFilename)
} catch let error as NSError {
print("WARN:::Cannot delete existing file: \(finalFilename), error: \(error.debugDescription)")
}
} else {
print("DEBUG:::The file \(self.videoOutputFullFileName!) doesn't exist")
}
do {
try fileManager.copyItem(at: URL(fileURLWithPath: self.videoOutputFullFileName!), to: URL(fileURLWithPath: finalFilename))
}
catch let error as NSError {
LOG("ERROR:::\(error.debugDescription)")
}
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: URL(fileURLWithPath: finalFilename))
}) { completed, error in
if completed {
print("Video \(self.videoOutputFullFileName ?? "") has been moved to camera roll")
}
if error != nil {
print ("ERROR:::Cannot move the video \(self.videoOutputFullFileName ?? "") to camera roll, error: \(error!.localizedDescription)")
}
}
} else {
print("ERROR:::The file: \(self.videoOutputFullFileName ?? "") doesn't exist, so can't move this file camera roll")
}
} else {
print("WARN:::The videoWriter status is not completed, stauts: \(videoWriter.status)")
}
}
The problem I'm having is the finishWriting completion code is never being reached. The writer stays in "writing" status therefore the video file is not saved.
If I remove the "finishWriting" line and just leave the completion code to run, a file is being saved, but not properly finished and when I'm trying to view it it's unplayable because it's probably missing metadata.
Is there any other way to do this? I don't want to actually start capturing using AVKit to save the recording, because it's taking too much of the CPU and the RPBroadcastSampleHandler's CMSampleBuffer already has the video data, but maybe using AVKit at all is a wrong move here?
What should I change? How do I save the video from that CMSampleBuffer?
From https://developer.apple.com/documentation/avfoundation/avassetwriter/1390432-finishwritingwithcompletionhandl
This method returns immediately and causes its work to be performed asynchronously
When broadcastFinished returns, your extension is killed. The only way I've been able to get this to work is by blocking the method from returning until the video processing is done. I'm not sure if this is the correct way to do it (seems weird), but it works. Something like this:
var finishedWriting = false
videoWriter.finishWriting {
NSLog("DEBUG:::The videoWriter finished writing.")
if videoWriter.status == .completed {
NSLog("DEBUG:::The videoWriter status is completed")
let fileManager = FileManager.default
if fileManager.fileExists(atPath: self.videoOutputFullFileName!) {
NSLog("DEBUG:::The file: \(self.videoOutputFullFileName ?? "") has been saved in documents folder, and is ready to be moved to camera roll")
let sharedFileURL = fileManager.containerURL(forSecurityApplicationGroupIdentifier: "group.xxx.com")
guard let documentsPath = sharedFileURL?.path else {
NSLog("ERROR:::No shared file URL path")
finishedWriting = true
return
}
let finalFilename = documentsPath + "/test_capture_video.mp4"
//Check whether file exists
if fileManager.fileExists(atPath: finalFilename) {
NSLog("WARN:::The file: \(finalFilename) exists, will delete the existing file")
do {
try fileManager.removeItem(atPath: finalFilename)
} catch let error as NSError {
NSLog("WARN:::Cannot delete existing file: \(finalFilename), error: \(error.debugDescription)")
}
} else {
NSLog("DEBUG:::The file \(self.videoOutputFullFileName!) doesn't exist")
}
do {
try fileManager.copyItem(at: URL(fileURLWithPath: self.videoOutputFullFileName!), to: URL(fileURLWithPath: finalFilename))
}
catch let error as NSError {
NSLog("ERROR:::\(error.debugDescription)")
}
PHPhotoLibrary.shared().performChanges({
PHAssetCollectionChangeRequest.creationRequestForAssetCollection(withTitle: "xxx")
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: URL(fileURLWithPath: finalFilename))
}) { completed, error in
if completed {
NSLog("Video \(self.videoOutputFullFileName ?? "") has been moved to camera roll")
}
if error != nil {
NSLog("ERROR:::Cannot move the video \(self.videoOutputFullFileName ?? "") to camera roll, error: \(error!.localizedDescription)")
}
finishedWriting = true
}
} else {
NSLog("ERROR:::The file: \(self.videoOutputFullFileName ?? "") doesn't exist, so can't move this file camera roll")
finishedWriting = true
}
} else {
NSLog("WARN:::The videoWriter status is not completed, status: \(videoWriter.status)")
finishedWriting = true
}
}
while finishedWriting == false {
// NSLog("DEBUG:::Waiting to finish writing...")
}
I would think you'd have to also call extensionContext.completeRequest at some point, but mine's working fine without it shrug.
#Marty's answer should be accepted because he pointed out the problem and its DispatchGroup solution works perfectly.
Since he used a while loop and didn't describe how to use DispatchGroups, here's the way I implemented it.
override func broadcastFinished() {
let dispatchGroup = DispatchGroup()
dispatchGroup.enter()
self.writerInput.markAsFinished()
self.writer.finishWriting {
// Do your work to here to make video available
dispatchGroup.leave()
}
dispatchGroup.wait() // <= blocks the thread here
}
You can try this:
override func broadcastFinished() {
Log(#function)
...
// Need to give the end CMTime, if not set, the video cannot be used
videoWriter.endSession(atSourceTime: ...)
videoWriter.finishWriting {
// Callback cannot be executed here
}
...
// The program has been executed.
}
Related
When trying to record and save audio/video with ReplayKit I keep getting errors. I'm using
Xcode: Version 11.2.1
Swift 5
iOS 13
iPhone 7+ physical device
When I set the filePath I'm already using URL(fileURLWithPath: ). The file extension and AVFileType are both .mp4. I check to see if the file already exists in the FileManager and if so I remove it: do { try FileManager.default.removeItem(at: videoURL) }. I tried to change the path itself to "Library/Caches/" like in #florianSAP answer which didn't work.
Here the 3 errors:
// 1. from recording
if !self.assetWriter.startWriting() {
print("Can't write")
return
}
// 2. from recording
if self.assetWriter.status == AVAssetWriter.Status.failed {
print("StartCapture Error Occurred, Status = \(self.assetWriter.status.rawValue), \(self.assetWriter.error?.localizedDescription) \(self.assetWriter.error?.debugDescription)")
return
}
// 3. this one is when trying to save the url in the PHAssetChangeRequest.creationRequestForAssetFromVideo completionHandler
if let error = error {
print("PHAssetChangeRequest Video Error: \(error.localizedDescription)")
return
}
// 4. this isn't an error but inside the switch rpSampleBufferType { } statement "not a video sample" kept printing out
The error messages are:
StartCapture Error Occurred, Status = 3, The operation could not be
completed Optional(Error Domain=AVFoundationErrorDomain Code=-11800
"The operation could not be completed"
UserInfo={NSLocalizedFailureReason=An unknown error occurred (-17508),
NSLocalizedDescription=The operation could not be completed,
NSUnderlyingError=0x2833a93b0 {Error Domain=NSOSStatusErrorDomain
Code=-17508 "(null)"}})
PHAssetChangeRequest Video Error: The operation couldn’t be completed. (PHPhotosErrorDomain
error -1.)
Where am I going to wrong at?
Start Recording
let recorder = RPScreenRecorder.shared()
var assetWriter: AVAssetWriter!
var videoURL: URL!
var videoInput: AVAssetWriterInput!
var audioMicInput: AVAssetWriterInput!
guard let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).first else { return }
videoURL = URL(fileURLWithPath: documentsPath.appending(UUID().uuidString + ".mp4"))
guard let videoURL = videoURL else { return }
do {
try FileManager.default.removeItem(at: videoURL)
} catch {}
do {
try assetWriter = AVAssetWriter(outputURL: videoURL, fileType: .mp4) // AVAssetWriter(url: videoURL, fileType: .mp4) didn't make a difference
} catch {}
let videoSettings: [String : Any] = [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: view.bounds.width,
AVVideoHeightKey: view.bounds.height
]
videoInput = AVAssetWriterInput(mediaType: .video, outputSettings: videoSettings)
videoInput.expectsMediaDataInRealTime = true
if assetWriter.canAdd(videoInput) {
assetWriter.add(videoInput)
}
let audioSettings: [String:Any] = [AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey : 2,
AVSampleRateKey : 44100.0,
AVEncoderBitRateKey: 192000
]
audioMicInput = AVAssetWriterInput(mediaType: .audio, outputSettings: audioSettings)
audioMicInput.expectsMediaDataInRealTime = true
if assetWriter.canAdd(audioMicInput) {
assetWriter.add(audioMicInput)
}
guard recorder.isAvailable else { return }
recorder.startCapture(handler: { (cmSampleBuffer, rpSampleBufferType, err) in
if let err = err { return }
// I tried to check if this was ready and added the below code to it but it made no difference
// if CMSampleBufferDataIsReady(cmSampleBuffer) { ... the code below was put in here ... }
DispatchQueue.main.async {
switch rpSampleBufferType {
case .video:
if self.assetWriter.status == AVAssetWriter.Status.unknown {
if !self.assetWriter.startWriting() {
print("Can't write")
return
}
print("Starting writing")
self.assetWriter.startWriting()
self.assetWriter.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(cmSampleBuffer))
}
if self.assetWriter.status == AVAssetWriter.Status.failed {
print("StartCapture Error Occurred, Status = \(self.assetWriter.status.rawValue), \(self.assetWriter.error?.localizedDescription) \(self.assetWriter.error?.debugDescription)")
return
}
if self.assetWriter.status == AVAssetWriter.Status.writing {
if self.videoInput.isReadyForMoreMediaData {
if self.videoInput.append(cmSampleBuffer) == false {
print("problem writing video")
}
}
}
case .audioMic:
if self.audioMicInput.isReadyForMoreMediaData {
print("audioMic data added")
self.audioMicInput.append(cmSampleBuffer)
}
default:
print("not a video sample")
}
}
}
}, completionHandler: { (error) in
if let error = error { return }
})
Stop Recording:
recorder.stopCapture { (error) in
if let error = error { return }
guard let videoInput = self.videoInput else { return }
guard let audioMicInput = self.audioMicInput else { return }
guard let assetWriter = self.assetWriter else { return }
guard let videoURL = videoURL else { return }
videoInput.markAsFinished()
audioMicInput.markAsFinished()
assetWriter.finishWriting(completionHandler: {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: videoUrl)
}) { (saved, error) in
if let error = error {
print("PHAssetChangeRequest Video Error: \(error.localizedDescription)")
return
}
if saved {
// ... show success message
}
}
})
}
RPScreenRecorder Delegate that never gets called:
func screenRecorder(_ screenRecorder: RPScreenRecorder, didStopRecordingWith previewViewController: RPPreviewViewController?, error: Error?) {
if let error = error {
print(error.localizedDescription)
}
}
I was able to solve this by doing 2 things:
1- The first thing I did was I change the videoURL’s filePath from:
// Old Way that was causing some sort of path error
videoURL = URL(fileURLWithPath: documentsPath.appending(UUID().uuidString + ".mp4"))
// This is what the Old Path looked like. Look at the series of numbers beginning with 506... directly after Documents
///var/mobile/Containers/Data/Application/AAEF38A2-7AF1-4A32-A612-296B1584A764/Documents506D36BA-0C27-466A-A0BA-C197481F471A.mp4
to
// New Way that got the path to work
let dirPath = "\(documentsPath)/Videos_\(UUID().uuidString).mp4"
videoURL = URL(fileURLWithPath: dirPath)
// This is what the new path looks like. After Documents there is now a forward slash, the word Videos with an underscore, and then the series of numbers beginning with 506...
///var/mobile/Containers/Data/Application/AAEF38A2-7AF1-4A32-A612-296B1584A764/Documents/Videos_506D36BA-0C27-466A-A0BA-C197481F471A.mp4
2 -The second thing I did was change the code inside the recorder.startCapture(handler: { (cmSampleBuffer, rpSampleBufferType, err):
recorder.startCapture(handler: { (cmSampleBuffer, rpSampleBufferType, err) in
if let err = err { return }
if CMSampleBufferDataIsReady(cmSampleBuffer) {
DispatchQueue.main.async {
switch rpSampleBufferType {
case .video:
print("writing sample....")
if self.assetWriter?.status == AVAssetWriter.Status.unknown {
print("Started writing")
self.assetWriter?.startWriting()
self.assetWriter?.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(cmSampleBuffer))
}
if self.assetWriter.status == AVAssetWriter.Status.failed {
print("StartCapture Error Occurred, Status = \(self.assetWriter.status.rawValue), \(self.assetWriter.error!.localizedDescription) \(self.assetWriter.error.debugDescription)")
return
}
if self.assetWriter.status == AVAssetWriter.Status.writing {
if self.videoInput.isReadyForMoreMediaData {
print("Writing a sample")
if self.videoInput.append(cmSampleBuffer) == false {
print("problem writing video")
}
}
}
case .audioMic:
if self.audioMicInput.isReadyForMoreMediaData {
print("audioMic data added")
self.audioMicInput.append(cmSampleBuffer)
}
default:
print("not a video sample")
}
}
}, completionHandler: { (error) in
if let error = error { return }
})
This has nothing to do with the actual problem I ran into but if audio isn't syncing then you have to add this code below to viewDidLoad. I got it from the comments section here.
do {
try AVAudioSession.sharedInstance().setCategory(.playAndRecord, mode: .videoRecording, options: [.defaultToSpeaker])
try AVAudioSession.sharedInstance().setActive(true, options: .notifyOthersOnDeactivation)
} catch {
#if DEBUG
print("Setting category to AVAudioSessionCategoryPlayback failed.")
#endif
}
If you need to find the meaning of error codes you can look here https://www.osstatus.com. It helped me find 11800 for this problem but not 17508.
In iOS/Swift I am working with ReplayKit to use AVAssetWriter to create a mov or MP4 video of the user's screen and microphone audio.
When I create a video, it plays fine locally and the audio and video are in sync. However when I convert this video to HLS (HTTP Live Stream) format using AWS Mediaconvert, the audio is out of sync with the video. Does anyone know what could be causing this? I read about timecoding, maybe I need to add a timecode to my video? Is there an easier way to fix this or has anyone experience similar issues?
private func startRecordingVideo(){
//Initialize MP4 Output File for Screen Recorded Video
let fileManager = FileManager.default
let urls = fileManager.urls(for: .documentDirectory, in: .userDomainMask)
guard let documentDirectory: NSURL = urls.first as NSURL? else {
fatalError("documentDir Error")
}
videoOutputURL = documentDirectory.appendingPathComponent("OutputVideo.mov")
if FileManager.default.fileExists(atPath: videoOutputURL!.path) {
do {
try FileManager.default.removeItem(atPath: videoOutputURL!.path)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
//Initialize Asset Writer to Write Video to User's Storage
assetWriter = try! AVAssetWriter(outputURL: videoOutputURL!, fileType:
AVFileType.mov)
let videoOutputSettings: Dictionary<String, Any> = [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : UIScreen.main.bounds.size.width,
AVVideoHeightKey : UIScreen.main.bounds.size.height,
];
let audioSettings = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVNumberOfChannelsKey : 1,
AVSampleRateKey : 44100.0,
AVEncoderBitRateKey: 96000,
] as [String : Any]
videoInput = AVAssetWriterInput(mediaType: AVMediaType.video,outputSettings: videoOutputSettings)
audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio,outputSettings:audioSettings )
videoInput?.expectsMediaDataInRealTime = true
audioInput?.expectsMediaDataInRealTime = true
assetWriter?.add(videoInput!)
assetWriter?.add(audioInput!)
let sharedRecorder = RPScreenRecorder.shared()
sharedRecorder.isMicrophoneEnabled = true
sharedRecorder.startCapture(handler: {
(sample, bufferType, error) in
//Audio/Video Buffer Data returned from the Screen Recorder
if CMSampleBufferDataIsReady(sample) {
DispatchQueue.main.async { [weak self] in
//Start the Asset Writer if it has not yet started
if self?.assetWriter?.status == AVAssetWriter.Status.unknown {
if !(self?.assetWriter?.startWriting())! {
return
}
self?.assetWriter?.startSession(atSourceTime: CMSampleBufferGetPresentationTimeStamp(sample))
self?.startSession = true
}
}
//Handle errors
if self.assetWriter?.status == AVAssetWriter.Status.failed {
print("Error occured, status = \(String(describing: self.assetWriter?.status.rawValue)), \(String(describing: self.assetWriter?.error!.localizedDescription)) \(String(describing: self.assetWriter?.error))")
return
}
//Add video buffer to AVAssetWriter Video Input
if (bufferType == .video)
{
if(self.videoInput!.isReadyForMoreMediaData) && self.startSession {
self.videoInput?.append(sample)
}
}
//Add audio microphone buffer to AVAssetWriter Audio Input
if (bufferType == .audioMic)
{
print("MIC BUFFER RECEIVED")
if self.audioInput!.isReadyForMoreMediaData
{
print("Audio Buffer Came")
self.audioInput?.append(sample)
}
}
}
}, completionHandler: {
error in
print("COMP HANDLER ERROR", error?.localizedDescription)
})
}
private func stopRecordingVideo(){
self.startSession = false
RPScreenRecorder.shared().stopCapture{ (error) in
self.videoInput?.markAsFinished()
self.audioInput?.markAsFinished()
if error == nil{
self.assetWriter?.finishWriting{
self.startSession = false
print("FINISHED WRITING!")
DispatchQueue.main.async {
self.setUpVideoPreview()
}
}
}else{
//DELETE DIRECTORY
}
}
}
I’m sure you’ve either figured this out or moved on, but for all Googlers you basically have to set the mediaTimeScale on the video input. You can see an example here
Here’s the relevant part of that code (This code is using a AVSampleBufferDisplayLayer, but the same concept applies:
double pts = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer));
if(!timebaseSet && pts != 0)
{
timebaseSet = true;
CMTimebaseRef controlTimebase;
CMTimebaseCreateWithMasterClock( CFAllocatorGetDefault(), CMClockGetHostTimeClock(), &controlTimebase );
displayLayer.controlTimebase = controlTimebase;
CMTimebaseSetTime(displayLayer.controlTimebase, CMTimeMake(pts, 1));
CMTimebaseSetRate(displayLayer.controlTimebase, 1.0);
}
if([displayLayer isReadyForMoreMediaData])
{
[displayLayer enqueueSampleBuffer:sampleBuffer];
}
I am making an app that records video. Up until now, I have been able to successfully record video and audio using AVCaptureMovieFileOutput, however, I now have a need to edit the video frames in real time to overlay some data onto the video. I began the switch to AVAssetWriter.
After the switch, I am able to record video (with my overlays) just fine using AVCaptureVideoDataOutput, however, AVCaptureAudioDataOutput never calls the delegate method so my audio doesn't record.
This is how I set up my AVCaptureSession:
fileprivate func setupCamera() {
//Set queues
queue = DispatchQueue(label: "myqueue", qos: .utility, attributes: .concurrent, autoreleaseFrequency: DispatchQueue.AutoreleaseFrequency.inherit, target: DispatchQueue.global())
//The size of output video will be 720x1280
print("Established AVCaptureSession")
cameraSession.sessionPreset = AVCaptureSession.Preset.hd1280x720
//Setup your camera
//Detect which type of camera should be used via `isUsingFrontFacingCamera`
let videoDevice: AVCaptureDevice
videoDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera, for: AVMediaType.video, position: AVCaptureDevice.Position.front)!
print("Created AVCaptureDeviceInput: video")
//Setup your microphone
var audioDevice: AVCaptureDevice
//audioDevice = AVCaptureDevice.default(for: AVMediaType.audio)!
audioDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInMicrophone, for: AVMediaType.audio, position: AVCaptureDevice.Position.unspecified)!
print("Created AVCaptureDeviceInput: audio")
do {
cameraSession.beginConfiguration()
cameraSession.automaticallyConfiguresApplicationAudioSession = false
cameraSession.usesApplicationAudioSession = true
// Add camera to your session
let videoInput = try AVCaptureDeviceInput(device: videoDevice)
if cameraSession.canAddInput(videoInput) {
cameraSession.addInput(videoInput)
print("Added AVCaptureDeviceInput: video")
} else
{
print("Could not add VIDEO!!!")
}
// Add microphone to your session
let audioInput = try AVCaptureDeviceInput(device: audioDevice)
if cameraSession.canAddInput(audioInput) {
cameraSession.addInput(audioInput)
print("Added AVCaptureDeviceInput: audio")
} else
{
print("Could not add MIC!!!")
}
//Define your video output
videoDataOutput.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
if cameraSession.canAddOutput(videoDataOutput) {
videoDataOutput.setSampleBufferDelegate(self, queue: queue)
cameraSession.addOutput(videoDataOutput)
print("Added AVCaptureDataOutput: video")
}
//Define your audio output
if cameraSession.canAddOutput(audioDataOutput) {
audioDataOutput.setSampleBufferDelegate(self, queue: queue)
cameraSession.addOutput(audioDataOutput)
print("Added AVCaptureDataOutput: audio")
}
//Set up the AVAssetWriter (to write to file)
do {
videoWriter = try AVAssetWriter(outputURL: getURL()!, fileType: AVFileType.mp4)
print("Setup AVAssetWriter")
//Video Settings
let videoSettings: [String : Any] = [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : 720,
AVVideoHeightKey : 1280,
]
videoWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
videoWriterVideoInput?.expectsMediaDataInRealTime = true;
print("Setup AVAssetWriterInput: Video")
if (videoWriter?.canAdd(videoWriterVideoInput!))!
{
videoWriter?.add(videoWriterVideoInput!)
print("Added AVAssetWriterInput: Video")
} else{
print("Could not add VideoWriterInput to VideoWriter")
}
// Add the audio input
//Audio Settings
let audioSettings : [String : Any] = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVSampleRateKey : 44100,
AVEncoderBitRateKey : 64000,
AVNumberOfChannelsKey: 1
]
videoWriterAudioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioSettings)
videoWriterAudioInput?.expectsMediaDataInRealTime = true;
print("Setup AVAssetWriterInput: Audio")
if (videoWriter?.canAdd(videoWriterAudioInput!))!
{
videoWriter?.add(videoWriterAudioInput!)
print("Added AVAssetWriterInput: Audio")
} else{
print("Could not add AudioWriterInput to VideoWriter")
}
}
catch {
print("ERROR")
return
}
//PixelWriter
videoWriterInputPixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterVideoInput!, sourcePixelBufferAttributes: [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
kCVPixelBufferWidthKey as String: 1280,
kCVPixelBufferHeightKey as String: 768,
kCVPixelFormatOpenGLESCompatibility as String: true,
])
print("Created AVAssetWriterInputPixelBufferAdaptor")
//Present the preview of video
previewLayer = AVCaptureVideoPreviewLayer(session: cameraSession)
previewLayer.position = CGPoint.init(x: CGFloat(self.view.frame.width/2), y: CGFloat(self.view.frame.height/2))
previewLayer.bounds = self.view.bounds
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraView.layer.addSublayer(previewLayer)
print("Created AVCaptureVideoPreviewLayer")
//Don't forget start running your session
//this doesn't mean start record!
cameraSession.commitConfiguration()
cameraSession.startRunning()
}
catch let error {
debugPrint(error.localizedDescription)
}
}
Start recording:
func startRecording()
{
print("Begin Recording...")
let recordingClock = self.cameraSession.masterClock
isRecording = true
videoWriter?.startWriting()
videoWriter?.startSession(atSourceTime: CMClockGetTime(recordingClock!))
}
Stop recording:
func stopRecording()
{
if (videoWriter?.status.rawValue == 1) {
videoWriterVideoInput?.markAsFinished()
videoWriterAudioInput?.markAsFinished()
print("video finished")
print("audio finished")
}else{
print("not writing")
}
self.videoWriter?.finishWriting(){
self.isRecording = false
print("finished writing")
DispatchQueue.main.async{
if self.videoWriter?.status == AVAssetWriterStatus.failed {
print("status: failed")
}else if self.videoWriter?.status == AVAssetWriterStatus.completed{
print("status: completed")
}else if self.videoWriter?.status == AVAssetWriterStatus.cancelled{
print("status: cancelled")
}else{
print("status: unknown")
}
if let e=self.videoWriter?.error{
print("stop record error:", e)
}
}
}
print("Stop Recording!")
}
And this is the delegate method, which gets called for video, but not for audio:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
autoreleasepool {
guard captureOutput != nil,
sampleBuffer != nil,
connection != nil,
CMSampleBufferDataIsReady(sampleBuffer) else { return }
guard CMSampleBufferDataIsReady(sampleBuffer) else{
return
}
if (connection.isVideoOrientationSupported) {
connection.videoOrientation = currentVideoOrientation()
} else
{
return
}
if (connection.isVideoStabilizationSupported) {
//connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
if !self.isRecording
{
return
}
var string = ""
if let audio = self.videoWriterAudioInput
{
if connection.audioChannels.count > 0
{
//EXECUTION NEVER REACHES HERE
if audio.isReadyForMoreMediaData
{
queue!.async() {
audio.append(sampleBuffer)
}
return
}
}
}
print ("\(string)")
if let camera = self.videoWriterVideoInput, camera.isReadyForMoreMediaData {
//This is getting called!!!
queue!.async() {
self.videoWriterInputPixelBufferAdaptor.append(self.imageToBuffer(from: image!)!, withPresentationTime: timestamp)
}
}
}//End autoreleasepool
}
}
I am sure the problem does not lie with my devices or inputs, as I was able to successfully record video and audio using AVCaptureMovieFileOutput. I have also read other relevant posts with no luck:
Corrupt video capturing audio and video using AVAssetWriter
VAssetWriter audio with video together
Ripped my hair out for days on this. My mistake was simple - The delegate method was being called, but was being returned BEFORE I reached the audio statements. These were the culprits which needed to be moved to after the audio processing portion of my code:
if (connection.isVideoOrientationSupported) {
connection.videoOrientation = currentVideoOrientation()
} else
{
return
}
if (connection.isVideoStabilizationSupported) {
//connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
I am trying to capture camera video in memory using AVCaptureSession so that I can later write the video data to a movie file. While I have been able to successfully start a capture session, I am not able to successful write the CMSampleBuffers I've captured to a compressed movie file using AVAssetWriter.
Appending sample buffers using AVAssetWriterInput's append method fails and when I inspect the AVAssetWriter's error property, I get the following:
Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could not be completed" UserInfo={NSUnderlyingError=0x17005d070 {Error Domain=NSOSStatusErrorDomain Code=-12780 "(null)"}, NSLocalizedFailureReason=An unknown error occurred (-12780), NSLocalizedDescription=The operation could not be completed}
As far as I can tell -11800 indicates an AVErrorUnknown, however I have not been able to find information about the -12780 error code, which as far as I can tell is undocumented. Below I have pasted the main files in the example project I setup to demonstrate the issue.
Any guidance would be greatly appreciated. Thanks!
ViewController.swift
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
private let recordingClipQueue = DispatchQueue(label: "com.example.recordingClipQueue")
private let videoDataOutputQueue = DispatchQueue(label: "com.example.videoDataOutputQueue")
private let session = AVCaptureSession()
private var backfillSampleBufferList = [CMSampleBuffer]()
override func viewDidLoad() {
super.viewDidLoad()
session.sessionPreset = AVCaptureSessionPreset640x480
let videoDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo);
let videoDeviceInput: AVCaptureDeviceInput;
do {
videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
} catch {
print("Error creating device input from video device: \(error).")
return
}
guard session.canAddInput(videoDeviceInput) else {
print("Could not add video device input to capture session.")
return
}
session.addInput(videoDeviceInput)
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.videoSettings = [ kCVPixelBufferPixelFormatTypeKey as NSString : Int(kCMPixelFormat_32BGRA) ]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
videoDataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
guard session.canAddOutput(videoDataOutput) else {
print("Could not add video data output to capture session.")
return
}
session.addOutput(videoDataOutput)
videoDataOutput.connection(withMediaType: AVMediaTypeVideo).isEnabled = true
session.startRunning()
}
private func backfillSizeInSeconds() -> Double {
if backfillSampleBufferList.count < 1 {
return 0.0
}
let earliestSampleBuffer = backfillSampleBufferList.first!
let latestSampleBuffer = backfillSampleBufferList.last!
let earliestSampleBufferPTS = CMSampleBufferGetOutputPresentationTimeStamp(earliestSampleBuffer).value
let latestSampleBufferPTS = CMSampleBufferGetOutputPresentationTimeStamp(latestSampleBuffer).value
let timescale = CMSampleBufferGetOutputPresentationTimeStamp(latestSampleBuffer).timescale
return Double(latestSampleBufferPTS - earliestSampleBufferPTS) / Double(timescale)
}
private func createClipFromBackfill() {
guard backfillSampleBufferList.count > 0 else {
print("createClipFromBackfill() called before any samples were recorded.")
return
}
let clipURL = URL(fileURLWithPath:
NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] +
"/recorded_clip.mp4")
if FileManager.default.fileExists(atPath: clipURL.path) {
do {
try FileManager.default.removeItem(atPath: clipURL.path)
} catch {
print("Could not delete existing clip file: \(error).")
}
}
var _videoFileWriter: AVAssetWriter?
do {
_videoFileWriter = try AVAssetWriter(url: clipURL, fileType: AVFileTypeQuickTimeMovie)
} catch {
print("Could not create video file writer: \(error).")
return
}
guard let videoFileWriter = _videoFileWriter else {
print("Video writer was nil.")
return
}
let settingsAssistant = AVOutputSettingsAssistant(preset: AVOutputSettingsPreset640x480)!
guard videoFileWriter.canApply(outputSettings: settingsAssistant.videoSettings, forMediaType: AVMediaTypeVideo) else {
print("Video file writer could not apply video output settings.")
return
}
let earliestRecordedSampleBuffer = backfillSampleBufferList.first!
let _formatDescription = CMSampleBufferGetFormatDescription(earliestRecordedSampleBuffer)
guard let formatDescription = _formatDescription else {
print("Earliest recording pixel buffer format description was nil.")
return
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo,
outputSettings: settingsAssistant.videoSettings,
sourceFormatHint: formatDescription)
guard videoFileWriter.canAdd(videoWriterInput) else {
print("Could not add video writer input to video file writer.")
return
}
videoFileWriter.add(videoWriterInput)
guard videoFileWriter.startWriting() else {
print("Video file writer not ready to write file.")
return
}
videoFileWriter.startSession(atSourceTime: CMSampleBufferGetOutputPresentationTimeStamp(earliestRecordedSampleBuffer))
videoWriterInput.requestMediaDataWhenReady(on: recordingClipQueue) {
while videoWriterInput.isReadyForMoreMediaData {
if self.backfillSampleBufferList.count > 0 {
let sampleBufferToAppend = self.backfillSampleBufferList.first!.deepCopy()
let appendSampleBufferSucceeded = videoWriterInput.append(sampleBufferToAppend)
if !appendSampleBufferSucceeded {
print("Failed to append sample buffer to asset writer input: \(videoFileWriter.error!)")
print("Video file writer status: \(videoFileWriter.status.rawValue)")
}
self.backfillSampleBufferList.remove(at: 0)
} else {
videoWriterInput.markAsFinished()
videoFileWriter.finishWriting {
print("Saved clip to \(clipURL)")
}
break
}
}
}
}
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ captureOutput: AVCaptureOutput!,
didOutputSampleBuffer sampleBuffer: CMSampleBuffer!,
from connection: AVCaptureConnection!) {
guard let buffer = sampleBuffer else {
print("Captured sample buffer was nil.")
return
}
let sampleBufferCopy = buffer.deepCopy()
backfillSampleBufferList.append(sampleBufferCopy)
if backfillSizeInSeconds() > 3.0 {
session.stopRunning()
createClipFromBackfill()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!,
didDrop sampleBuffer: CMSampleBuffer!,
from connection: AVCaptureConnection!) {
print("Sample buffer dropped.")
}
}
CVPixelBuffer+Copy.swift:
import CoreVideo
extension CVPixelBuffer {
func deepCopy() -> CVPixelBuffer {
precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "deepCopy() cannot copy a non-CVPixelBuffer")
var _copy : CVPixelBuffer?
CVPixelBufferCreate(
nil,
CVPixelBufferGetWidth(self),
CVPixelBufferGetHeight(self),
CVPixelBufferGetPixelFormatType(self),
CVBufferGetAttachments(self, CVAttachmentMode.shouldPropagate),
&_copy)
guard let copy = _copy else {
print("Pixel buffer copy was nil.")
fatalError()
}
CVBufferPropagateAttachments(self, copy)
CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags.readOnly)
CVPixelBufferLockBaseAddress(copy, CVPixelBufferLockFlags(rawValue: 0))
let sourceBaseAddress = CVPixelBufferGetBaseAddress(self)
let copyBaseAddress = CVPixelBufferGetBaseAddress(copy)
memcpy(copyBaseAddress, sourceBaseAddress, CVPixelBufferGetHeight(self) * CVPixelBufferGetBytesPerRow(self))
CVPixelBufferUnlockBaseAddress(copy, CVPixelBufferLockFlags(rawValue: 0))
CVPixelBufferUnlockBaseAddress(self, CVPixelBufferLockFlags.readOnly)
return copy
}
}
CMSampleBuffer+Copy.swift:
import CoreMedia
extension CMSampleBuffer {
func deepCopy() -> CMSampleBuffer {
let _pixelBuffer = CMSampleBufferGetImageBuffer(self)
guard let pixelBuffer = _pixelBuffer else {
print("Pixel buffer to copy was nil.")
fatalError()
}
let pixelBufferCopy = pixelBuffer.deepCopy()
let _formatDescription = CMSampleBufferGetFormatDescription(self)
guard let formatDescription = _formatDescription else {
print("Format description to copy was nil.")
fatalError()
}
var timingInfo = kCMTimingInfoInvalid
let getTimingInfoResult = CMSampleBufferGetSampleTimingInfo(self, 0, &timingInfo)
guard getTimingInfoResult == noErr else {
print("Could not get timing info to copy: \(getTimingInfoResult).")
fatalError()
}
timingInfo.presentationTimeStamp = CMSampleBufferGetOutputPresentationTimeStamp(self)
var _copy : CMSampleBuffer?
let createCopyResult = CMSampleBufferCreateForImageBuffer(kCFAllocatorDefault,
pixelBufferCopy,
true,
nil,
nil,
formatDescription,
&timingInfo,
&_copy);
guard createCopyResult == noErr else {
print("Error creating copy of sample buffer: \(createCopyResult).")
fatalError()
}
guard let copy = _copy else {
print("Copied sample buffer was nil.")
fatalError()
}
return copy
}
}
I also ran into this while trying to synthesize videos. I finally figured out that -[AVAssetWriterInput appendSampleBuffer:] only works on device (as of iOS 11.2.6 anyway) if the underlying pixel buffer is backed by an IOSurface.
If you modify your CVPixelBuffer.deepCopy() method to include the (id)kCVPixelBufferIOSurfacePropertiesKey: #{} key-value pair in the attributes dictionary you pass to CVPixelBufferCreate, it'll probably work.
After more research and experimentation, it appears using AVAssetWriterInputPixelBufferAdaptor to append the CVPixelBuffers of the CMSampleBuffers I'm storing to the AVAssetWriterInput works without generating an error.
Below is the modified version of ViewController.swift implementation that uses AVAssetWriterInputPixelBufferAdaptor to append pixel buffers.
ViewController.swift
import UIKit
import AVFoundation
import Photos
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
private let recordingClipQueue = DispatchQueue(label: "com.example.recordingClipQueue")
private let videoDataOutputQueue = DispatchQueue(label: "com.example.videoDataOutputQueue")
private let session = AVCaptureSession()
private var backfillSampleBufferList = [CMSampleBuffer]()
override func viewDidLoad() {
super.viewDidLoad()
session.sessionPreset = AVCaptureSessionPreset640x480
let videoDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo);
let videoDeviceInput: AVCaptureDeviceInput;
do {
videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
} catch {
print("Error creating device input from video device: \(error).")
return
}
guard session.canAddInput(videoDeviceInput) else {
print("Could not add video device input to capture session.")
return
}
session.addInput(videoDeviceInput)
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.videoSettings = [ kCVPixelBufferPixelFormatTypeKey as NSString : Int(kCMPixelFormat_32BGRA) ]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
videoDataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
guard session.canAddOutput(videoDataOutput) else {
print("Could not add video data output to capture session.")
return
}
session.addOutput(videoDataOutput)
videoDataOutput.connection(withMediaType: AVMediaTypeVideo).isEnabled = true
session.startRunning()
}
private func backfillSizeInSeconds() -> Double {
if backfillSampleBufferList.count < 1 {
return 0.0
}
let earliestSampleBuffer = backfillSampleBufferList.first!
let latestSampleBuffer = backfillSampleBufferList.last!
let earliestSampleBufferPTS = CMSampleBufferGetOutputPresentationTimeStamp(earliestSampleBuffer).value
let latestSampleBufferPTS = CMSampleBufferGetOutputPresentationTimeStamp(latestSampleBuffer).value
let timescale = CMSampleBufferGetOutputPresentationTimeStamp(latestSampleBuffer).timescale
return Double(latestSampleBufferPTS - earliestSampleBufferPTS) / Double(timescale)
}
private func createClipFromBackfill() {
guard backfillSampleBufferList.count > 0 else {
print("createClipFromBackfill() called before any samples were recorded.")
return
}
let clipURL = URL(fileURLWithPath:
NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] +
"/recorded_clip.mp4")
if FileManager.default.fileExists(atPath: clipURL.path) {
do {
try FileManager.default.removeItem(atPath: clipURL.path)
} catch {
print("Could not delete existing clip file: \(error).")
}
}
var _videoFileWriter: AVAssetWriter?
do {
_videoFileWriter = try AVAssetWriter(url: clipURL, fileType: AVFileTypeMPEG4)
} catch {
print("Could not create video file writer: \(error).")
return
}
guard let videoFileWriter = _videoFileWriter else {
print("Video writer was nil.")
return
}
let settingsAssistant = AVOutputSettingsAssistant(preset: AVOutputSettingsPreset640x480)!
guard videoFileWriter.canApply(outputSettings: settingsAssistant.videoSettings, forMediaType: AVMediaTypeVideo) else {
print("Video file writer could not apply video output settings.")
return
}
let earliestRecordedSampleBuffer = backfillSampleBufferList.first!
let _formatDescription = CMSampleBufferGetFormatDescription(earliestRecordedSampleBuffer)
guard let formatDescription = _formatDescription else {
print("Earliest recording pixel buffer format description was nil.")
return
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo,
outputSettings: settingsAssistant.videoSettings,
sourceFormatHint: formatDescription)
guard videoFileWriter.canAdd(videoWriterInput) else {
print("Could not add video writer input to video file writer.")
return
}
videoFileWriter.add(videoWriterInput)
let pixelAdapterBufferAttributes = [ kCVPixelBufferPixelFormatTypeKey as String : Int(kCMPixelFormat_32BGRA) ]
let pixelAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: pixelAdapterBufferAttributes)
guard videoFileWriter.startWriting() else {
print("Video file writer not ready to write file.")
return
}
videoFileWriter.startSession(atSourceTime: CMSampleBufferGetOutputPresentationTimeStamp(earliestRecordedSampleBuffer))
videoWriterInput.requestMediaDataWhenReady(on: recordingClipQueue) {
while videoWriterInput.isReadyForMoreMediaData {
if self.backfillSampleBufferList.count > 0 {
let sampleBufferToAppend = self.backfillSampleBufferList.first!.deepCopy()
let appendSampleBufferSucceeded = pixelAdapter.append(CMSampleBufferGetImageBuffer(sampleBufferToAppend)!,
withPresentationTime: CMSampleBufferGetOutputPresentationTimeStamp(sampleBufferToAppend))
if !appendSampleBufferSucceeded {
print("Failed to append sample buffer to asset writer input: \(videoFileWriter.error!)")
print("Video file writer status: \(videoFileWriter.status.rawValue)")
}
self.backfillSampleBufferList.remove(at: 0)
} else {
videoWriterInput.markAsFinished()
videoFileWriter.finishWriting {
print("Saving clip to \(clipURL)")
}
break
}
}
}
}
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ captureOutput: AVCaptureOutput!,
didOutputSampleBuffer sampleBuffer: CMSampleBuffer!,
from connection: AVCaptureConnection!) {
guard let buffer = sampleBuffer else {
print("Captured sample buffer was nil.")
return
}
let sampleBufferCopy = buffer.deepCopy()
backfillSampleBufferList.append(sampleBufferCopy)
if backfillSizeInSeconds() > 3.0 {
session.stopRunning()
createClipFromBackfill()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!,
didDrop sampleBuffer: CMSampleBuffer!,
from connection: AVCaptureConnection!) {
print("Sample buffer dropped.")
}
}
I ran into issues with the same error codes when creating CVPixelBuffers and CMSampleBuffers manually to create a video with individual frames rendered by CoreGraphics. I could solve the problem by using a AVAssetWriterInputPixelBufferAdaptor instead, like you suggested in your own answer. For some reason, this was only needed when the code was run on an actual device. On the simulator, manually creating the buffers worked fine.
I noticed that the same error codes AVFoundationErrorDomain Code -11800 and NSOSStatusErrorDomain Code -12780 can also occur for other reasons, for example:
There exists already a file at the destination URL provided to AVAssetWriter
The destination URL is not a file URL (it must be created with URL.init(fileURLWithPath:) and not with URL.init(string:)).
(Posting this for the sake of completeness, your code already handles this correctly.)
I am writing a video to photo library / document directory using capture session and AVAssetWriter. What I want to know when I append pixel buffer to the adapter I do get false here print("video is (bobo)")same with audio.
This doesn't save my output file and I do get an error on export and saving.
I am working on it from so long any suggestions or mistake would help me a lot.
Main problem is this issue is very random lets say 1 in 10 times but it do persist and I want to eliminate this issue.
My code where I am appending pixel buffer to adapter
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
{
starTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
if captureOutput == videoOutput
{
if self.record == true{
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
if self.record == true
{
if self.writerInput.isReadyForMoreMediaData
{
DispatchQueue(label: "newQeueLocalFeedVideo2", attributes: DispatchQueue.Attributes.concurrent).sync(execute: {
starTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
let bobo = self.adapter.append(pixelBuffer!, withPresentationTime: self.starTime)
print("video is \(bobo)")
})
}
}
}
}else if captureOutput == audioOutput{
if self.record == true
{
if audioWriterInput.isReadyForMoreMediaData
{
let bo = audioWriterInput.append(sampleBuffer)
print("audio conversion is \(bo)")
}
}
}
}
/*****------******/
Code where I am setting asset writer
{
let fileUrl = URL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent("\(getCurrentDate())-capturedvideo.mp4")
lastPath = fileUrl.path
videoWriter = try? AVAssetWriter(outputURL: fileUrl, fileType: AVFileTypeMPEG4)
lastPathURL = fileUrl
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width) as Float), AVVideoHeightKey : NSNumber(value: Float(outputSize.height) as Float)] as [String : Any]
writerInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
writerInput.expectsMediaDataInRealTime = true
// writerInput.performsMultiPassEncodingIfSupported = true
audioWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: DejalActivityView.getAudioDictionary() as? [String:AnyObject])
videoWriter.add(writerInput)
videoWriter.add(audioWriterInput)
adapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: DejalActivityView.getAdapterDictionary() as? [String:AnyObject])
videoWriter.startWriting()
videoWriter.startSession(atSourceTime: starTime)
//self.client?.recordCaptureSession.captureSession.startRunning()
record = true
}
And to export the file to a video I am using this code.
self.videoWriter.finishWriting { () -> Void in
Thread.sleep(forTimeInterval: 1.0)
if self.videoWriter.status == AVAssetWriterStatus.failed {
print("oh noes, an error: \(self.videoWriter.error.debugDescription)")
completionHandler(true)
} else {
let content = FileManager.default.contents(atPath: self.lastPathURL.path)
print("wrote video: \(self.lastPathURL.path) at size: \(content?.count)")
// This below line will save the video to photo library
HEPhotoLibraryHelper.saveVideosToPhotoLibrary(self.lastPathURL, withCompletionBlock: { (result) in
if result == true
{
do
{
try HEDocDirectory.shared.fileManagerDefault .removeItem(atPath: self.lastPath)
}catch let err as NSError
{
print("Error in removing file from doc dir \(err.localizedDescription)")
}
}
})
}
}