I am using an AVCaptureSession to use video and audio input and encode an H.264 video with AVAssetWriter.
If I don't write the audio, the video is encoded as expected. But if I write the audio, I am getting a corrupt video.
If I inspect the audio CMSampleBuffer being supplied to the AVAssetWriter it shows this information:
invalid = NO
dataReady = YES
makeDataReadyCallback = 0x0
makeDataReadyRefcon = 0x0
formatDescription = <CMAudioFormatDescription 0x17410ba30 [0x1b3a70bb8]> {
mediaType:'soun'
mediaSubType:'lpcm'
mediaSpecific: {
ASBD: {
mSampleRate: 44100.000000
mFormatID: 'lpcm'
mFormatFlags: 0xc
mBytesPerPacket: 2
mFramesPerPacket: 1
mBytesPerFrame: 2
mChannelsPerFrame: 1
mBitsPerChannel: 16 }
cookie: {(null)}
ACL: {(null)}
FormatList Array: {(null)}
}
extensions: {(null)}
Since it is supplying lpcm audio, I have configured the AVAssetWriterInput with this setting for sound (I have tried both one and two channels):
var channelLayout = AudioChannelLayout()
memset(&channelLayout, 0, MemoryLayout<AudioChannelLayout>.size);
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Mono
let audioOutputSettings:[String: Any] = [AVFormatIDKey as String:UInt(kAudioFormatLinearPCM),
AVNumberOfChannelsKey as String:1,
AVSampleRateKey as String:44100.0,
AVLinearPCMIsBigEndianKey as String:false,
AVLinearPCMIsFloatKey as String:false,
AVLinearPCMBitDepthKey as String:16,
AVLinearPCMIsNonInterleaved as String:false,
AVChannelLayoutKey: NSData(bytes:&channelLayout, length:MemoryLayout<AudioChannelLayout>.size)]
self.assetWriterAudioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: audioOutputSettings)
self.assetWriter.add(self.assetWriterAudioInput)
When I use the lpcm setting above, I cannot open the video with any application. I have tried using kAudioFormatMPEG4AAC and kAudioFormatAppleLossless and I still get a corrupt video but I am able to view the video using QuickTime Player 8 (not QuickTime Player 7), but it is confused about the duration of the video and no sound is played.
When recording is complete I am calling:
func endRecording(_ completionHandler: #escaping () -> ()) {
isRecording = false
assetWriterVideoInput.markAsFinished()
assetWriterAudioInput.markAsFinished()
assetWriter.finishWriting(completionHandler: completionHandler)
}
This is how the AVCaptureSession is being configured:
func setupCapture() {
captureSession = AVCaptureSession()
if (captureSession == nil) {
fatalError("ERROR: Couldnt create a capture session")
}
captureSession?.beginConfiguration()
captureSession?.sessionPreset = AVCaptureSessionPreset1280x720
let frontDevices = AVCaptureDevice.devices().filter{ ($0 as AnyObject).hasMediaType(AVMediaTypeVideo) && ($0 as AnyObject).position == AVCaptureDevicePosition.front }
if let captureDevice = frontDevices.first as? AVCaptureDevice {
do {
let videoDeviceInput: AVCaptureDeviceInput
do {
videoDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
}
catch {
fatalError("Could not create AVCaptureDeviceInput instance with error: \(error).")
}
guard (captureSession?.canAddInput(videoDeviceInput))! else {
fatalError()
}
captureSession?.addInput(videoDeviceInput)
}
}
do {
let audioDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio)
let audioDeviceInput: AVCaptureDeviceInput
do {
audioDeviceInput = try AVCaptureDeviceInput(device: audioDevice)
}
catch {
fatalError("Could not create AVCaptureDeviceInput instance with error: \(error).")
}
guard (captureSession?.canAddInput(audioDeviceInput))! else {
fatalError()
}
captureSession?.addInput(audioDeviceInput)
}
do {
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : kCVPixelFormatType_32BGRA]
dataOutput.alwaysDiscardsLateVideoFrames = true
let queue = DispatchQueue(label: "com.3DTOPO.videosamplequeue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
guard (captureSession?.canAddOutput(dataOutput))! else {
fatalError()
}
captureSession?.addOutput(dataOutput)
videoConnection = dataOutput.connection(withMediaType: AVMediaTypeVideo)
}
do {
let audioDataOutput = AVCaptureAudioDataOutput()
let queue = DispatchQueue(label: "com.3DTOPO.audiosamplequeue")
audioDataOutput.setSampleBufferDelegate(self, queue: queue)
guard (captureSession?.canAddOutput(audioDataOutput))! else {
fatalError()
}
captureSession?.addOutput(audioDataOutput)
audioConnection = audioDataOutput.connection(withMediaType: AVMediaTypeAudio)
}
captureSession?.commitConfiguration()
// this will trigger capture on its own queue
captureSession?.startRunning()
}
The AVCaptureVideoDataOutput delegate method:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
// func captureOutput(captureOutput: AVCaptureOutput, sampleBuffer: CMSampleBuffer, connection:AVCaptureConnection) {
var error: CVReturn
if (connection == audioConnection) {
delegate?.audioSampleUpdated(sampleBuffer: sampleBuffer)
return
}
// ... Write video buffer ...//
}
Which calls:
func audioSampleUpdated(sampleBuffer: CMSampleBuffer) {
if (isRecording) {
while !assetWriterAudioInput.isReadyForMoreMediaData {}
if (!assetWriterAudioInput.append(sampleBuffer)) {
print("Unable to write to audio input");
}
}
}
If I disable the assetWriterAudioInput.append() call above, then the video isn't corrupt but of course I have no audio encoded. How can I get both video and audio encoding to work?
I figured it out. I was setting the assetWriter.startSession source time to 0, and then subtracting the start time from current CACurrentMediaTime() for writing the pixel data.
I changed the assetWriter.startSession source time to the CACurrentMediaTime() and don't subtract the current time when writing the video frame.
Old start session code:
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: kCMTimeZero)
New code that works:
let presentationStartTime = CMTimeMakeWithSeconds(CACurrentMediaTime(), 240)
assetWriter.startWriting()
assetWriter.startSession(atSourceTime: presentationStartTime)
Related
I have created a video file from an iPhone's camera/microphone using AVAssetWriter, AVAssetWriterInput, etc. and when I play the resulting video in iMovie, QuickTime player, and Davinci Resolve, there appears to be no audio. iMovie's audio waveform is empty, although the QuickTime inspector shows an audio track present.
However, I opened the same video file using Ocenaudio and it reads and plays the audio completely fine. So I know the audio is somewhere in the fie.
I used widely available code for using AVAssetWriter, such as:
previewVideoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoOutputSettings)
previewVideoWriterInput.expectsMediaDataInRealTime = true
previewAudioWriterInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioOutputSettings)
previewAudioWriterInput.expectsMediaDataInRealTime = true
if previewVideoWriter.canAdd(previewVideoWriterInput) == true { previewVideoWriter.add(previewVideoWriterInput) }
if previewVideoWriter.canAdd(previewAudioWriterInput) == true { previewVideoWriter.add(previewAudioWriterInput) }
The main difference is that I'm using a CIFilter on the video data in the captureOutput() function. Here's a simplified version of my captureOutout() code:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
recordingQueue.async {
autoreleasepool {
if output == self.previewVideoOutput {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer!)
if self.currentFilter != nil { self.currentFilter.setValue(cameraImage, forKey: kCIInputImageKey) }
let filteredImage = UIImage(ciImage: self.currentFilter != nil ? self.currentFilter.value(forKey: kCIOutputImageKey) as! CIImage : cameraImage)
let timeStampie = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
self.previewProgressTimeStamp = timeStampie
if self.previewFirstFrameTimeStamp == CMTime.zero {
self.previewFirstFrameTimeStamp = self.previewProgressTimeStamp
}
self.previewProgressTimeStamp = CMTimeSubtract(self.previewProgressTimeStamp, self.previewFirstFrameTimeStamp)
if self.previewVideoWriterInput.isReadyForMoreMediaData {
let appendSuccess = self.previewVideoAdapter.append(self.pixelBufferFromImage(filteredImage)!, withPresentationTime: self.previewProgressTimeStamp)
}
}
else if output == self.previewAudioOutput {
if self.previewAudioWriterInput.isReadyForMoreMediaData {
let appendSuccess = self.previewAudioWriterInput.append(sampleBuffer)
}
}
}
}
}
This is the code I use when it's time to finish recording:
if previewVideoWriterInput != nil {
previewVideoWriterInput.markAsFinished()
previewAudioWriterInput.markAsFinished()
}
if previewVideoWriter != nil {
previewVideoWriter.endSession(atSourceTime: previewProgressTimeStamp)
previewVideoWriter.finishWriting { () -> Void in
self.recordingCompletedHandler?()
}
}
Any ideas why the audio being written to the video file is not being seen by video players? And how would I fix that?
Edit:
Here's the code I'm using to setup the AVCapture outputs as requested.
captureSession = AVCaptureSession()
captureSession.beginConfiguration()
captureSession.sessionPreset = .vga640x480
captureSession.usesApplicationAudioSession = true
if frontCameraInput != nil && captureSession.canAddInput(frontCameraInput) == true { captureSession.addInput(frontCameraInput) }
if let audioCaptureDeviceInput = AVCaptureDevice.default(for: .audio) { audioCaptureDevice = audioCaptureDeviceInput }
if audioCaptureDevice != nil {
do {
audioCaptureDeviceInput = try AVCaptureDeviceInput(device: audioCaptureDevice)
} catch let error {
print("\(error.localizedDescription)")
}
}
if audioCaptureDeviceInput != nil && captureSession.canAddInput(audioCaptureDeviceInput) == true { captureSession.addInput(audioCaptureDeviceInput) }
previewVideoOutput.setSampleBufferDelegate(self, queue: recordingQueue)
previewAudioOutput.setSampleBufferDelegate(self, queue: recordingQueue)
if captureSession.canAddOutput(previewVideoOutput) { captureSession.addOutput(previewVideoOutput) }
if captureSession.canAddOutput(previewAudioOutput) { captureSession.addOutput(previewAudioOutput) }
captureSession.commitConfiguration()
I'm trying to record video and audio and sending them over the network so that they can be played back in real time on other clients. I've managed to record and play back video successfully, but audio still cannot be played back (see AVAudioPlayer at the bottom of the code below). What am I doing wrong or what is missing? (There are a couple of other StackOverflow questions which seem to address the same issue, but even if the comments there show that some people were able to make it work, none of them show an explicit, working answer.) Thank you in advance for any input.
let captureSession = AVCaptureSession()
private func startVideoFeed() {
let sessionPreset = AVCaptureSession.Preset.low
if captureSession.canSetSessionPreset(sessionPreset) {
captureSession.sessionPreset = sessionPreset
}
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { success in
self.startVideoFeed()
}
case .authorized:
captureSession.beginConfiguration()
let captureVideoDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)!
let captureVideoInput = try! AVCaptureDeviceInput(device: captureVideoDevice)
if captureSession.canAddInput(captureVideoInput) {
captureSession.addInput(captureVideoInput)
}
let captureVideoOutput = AVCaptureVideoDataOutput()
captureVideoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
if captureSession.canAddOutput(captureVideoOutput) {
captureSession.addOutput(captureVideoOutput)
}
captureSession.commitConfiguration()
captureSession.startRunning()
default:
break
}
}
private func startAudioFeed() {
switch AVCaptureDevice.authorizationStatus(for: .audio) {
case .notDetermined:
AVCaptureDevice.requestAccess(for: .audio) { success in
self.startAudioFeed()
}
case .authorized:
captureSession.beginConfiguration()
let captureAudioDevice = AVCaptureDevice.default(for: .audio)!
let captureAudioInput = try! AVCaptureDeviceInput(device: captureAudioDevice)
if captureSession.canAddInput(captureAudioInput) {
captureSession.addInput(captureAudioInput)
}
let captureAudioOutput = AVCaptureAudioDataOutput()
captureAudioOutput.audioSettings = [AVFormatIDKey: kAudioFormatLinearPCM, AVNumberOfChannelsKey: NSNumber(value: 1), AVSampleRateKey: NSNumber(value: 44100)]
captureAudioOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
if captureSession.canAddOutput(captureAudioOutput) {
captureSession.addOutput(captureAudioOutput)
}
captureSession.commitConfiguration()
default:
break
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if let imageBuffer = sampleBuffer.imageBuffer {
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
let cgImage = CIContext().createCGImage(ciImage, from: ciImage.extent)!
let data = CFDataCreateMutable(nil, 0)!
let imageDestination = CGImageDestinationCreateWithData(data, kUTTypeJPEG, 1, nil)!
CGImageDestinationAddImage(imageDestination, cgImage, [kCGImageDestinationLossyCompressionQuality: NSNumber(value: 0)] as CFDictionary)
CGImageDestinationFinalize(imageDestination)
play(data: data as Data)
} else if let dataBuffer = sampleBuffer.dataBuffer {
let data = try! dataBuffer.dataBytes()
play(data: data)
}
}
private func play(data: Data) {
if let image = CGImage(jpegDataProviderSource: CGDataProvider(data: data as CFData)!, decode: nil, shouldInterpolate: false, intent: .defaultIntent) {
// image is a valid image
} else if let audioPlayer = try? AVAudioPlayer(data: data) {
audioPlayer.play()
// audioPlayer is always nil with error: Error Domain=NSOSStatusErrorDomain Code=1954115647 "(null)"
}
}
https://developer.apple.com/documentation/coremedia/1489629-cmsamplebuffergetdatabuffer
The caller does not own the returned dataBuffer, and must retain it explicitly if the caller needs to maintain a reference to it.
You will need to do a CFRetain and CFRelease on the data.
The player is nil because of the data you are initializing it with. Convert sampleBuffer to Data.
NSOSStatusErrorDomain Code=1954115647
I am making an app that records video. Up until now, I have been able to successfully record video and audio using AVCaptureMovieFileOutput, however, I now have a need to edit the video frames in real time to overlay some data onto the video. I began the switch to AVAssetWriter.
After the switch, I am able to record video (with my overlays) just fine using AVCaptureVideoDataOutput, however, AVCaptureAudioDataOutput never calls the delegate method so my audio doesn't record.
This is how I set up my AVCaptureSession:
fileprivate func setupCamera() {
//Set queues
queue = DispatchQueue(label: "myqueue", qos: .utility, attributes: .concurrent, autoreleaseFrequency: DispatchQueue.AutoreleaseFrequency.inherit, target: DispatchQueue.global())
//The size of output video will be 720x1280
print("Established AVCaptureSession")
cameraSession.sessionPreset = AVCaptureSession.Preset.hd1280x720
//Setup your camera
//Detect which type of camera should be used via `isUsingFrontFacingCamera`
let videoDevice: AVCaptureDevice
videoDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera, for: AVMediaType.video, position: AVCaptureDevice.Position.front)!
print("Created AVCaptureDeviceInput: video")
//Setup your microphone
var audioDevice: AVCaptureDevice
//audioDevice = AVCaptureDevice.default(for: AVMediaType.audio)!
audioDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInMicrophone, for: AVMediaType.audio, position: AVCaptureDevice.Position.unspecified)!
print("Created AVCaptureDeviceInput: audio")
do {
cameraSession.beginConfiguration()
cameraSession.automaticallyConfiguresApplicationAudioSession = false
cameraSession.usesApplicationAudioSession = true
// Add camera to your session
let videoInput = try AVCaptureDeviceInput(device: videoDevice)
if cameraSession.canAddInput(videoInput) {
cameraSession.addInput(videoInput)
print("Added AVCaptureDeviceInput: video")
} else
{
print("Could not add VIDEO!!!")
}
// Add microphone to your session
let audioInput = try AVCaptureDeviceInput(device: audioDevice)
if cameraSession.canAddInput(audioInput) {
cameraSession.addInput(audioInput)
print("Added AVCaptureDeviceInput: audio")
} else
{
print("Could not add MIC!!!")
}
//Define your video output
videoDataOutput.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
if cameraSession.canAddOutput(videoDataOutput) {
videoDataOutput.setSampleBufferDelegate(self, queue: queue)
cameraSession.addOutput(videoDataOutput)
print("Added AVCaptureDataOutput: video")
}
//Define your audio output
if cameraSession.canAddOutput(audioDataOutput) {
audioDataOutput.setSampleBufferDelegate(self, queue: queue)
cameraSession.addOutput(audioDataOutput)
print("Added AVCaptureDataOutput: audio")
}
//Set up the AVAssetWriter (to write to file)
do {
videoWriter = try AVAssetWriter(outputURL: getURL()!, fileType: AVFileType.mp4)
print("Setup AVAssetWriter")
//Video Settings
let videoSettings: [String : Any] = [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : 720,
AVVideoHeightKey : 1280,
]
videoWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
videoWriterVideoInput?.expectsMediaDataInRealTime = true;
print("Setup AVAssetWriterInput: Video")
if (videoWriter?.canAdd(videoWriterVideoInput!))!
{
videoWriter?.add(videoWriterVideoInput!)
print("Added AVAssetWriterInput: Video")
} else{
print("Could not add VideoWriterInput to VideoWriter")
}
// Add the audio input
//Audio Settings
let audioSettings : [String : Any] = [
AVFormatIDKey : kAudioFormatMPEG4AAC,
AVSampleRateKey : 44100,
AVEncoderBitRateKey : 64000,
AVNumberOfChannelsKey: 1
]
videoWriterAudioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioSettings)
videoWriterAudioInput?.expectsMediaDataInRealTime = true;
print("Setup AVAssetWriterInput: Audio")
if (videoWriter?.canAdd(videoWriterAudioInput!))!
{
videoWriter?.add(videoWriterAudioInput!)
print("Added AVAssetWriterInput: Audio")
} else{
print("Could not add AudioWriterInput to VideoWriter")
}
}
catch {
print("ERROR")
return
}
//PixelWriter
videoWriterInputPixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterVideoInput!, sourcePixelBufferAttributes: [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
kCVPixelBufferWidthKey as String: 1280,
kCVPixelBufferHeightKey as String: 768,
kCVPixelFormatOpenGLESCompatibility as String: true,
])
print("Created AVAssetWriterInputPixelBufferAdaptor")
//Present the preview of video
previewLayer = AVCaptureVideoPreviewLayer(session: cameraSession)
previewLayer.position = CGPoint.init(x: CGFloat(self.view.frame.width/2), y: CGFloat(self.view.frame.height/2))
previewLayer.bounds = self.view.bounds
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraView.layer.addSublayer(previewLayer)
print("Created AVCaptureVideoPreviewLayer")
//Don't forget start running your session
//this doesn't mean start record!
cameraSession.commitConfiguration()
cameraSession.startRunning()
}
catch let error {
debugPrint(error.localizedDescription)
}
}
Start recording:
func startRecording()
{
print("Begin Recording...")
let recordingClock = self.cameraSession.masterClock
isRecording = true
videoWriter?.startWriting()
videoWriter?.startSession(atSourceTime: CMClockGetTime(recordingClock!))
}
Stop recording:
func stopRecording()
{
if (videoWriter?.status.rawValue == 1) {
videoWriterVideoInput?.markAsFinished()
videoWriterAudioInput?.markAsFinished()
print("video finished")
print("audio finished")
}else{
print("not writing")
}
self.videoWriter?.finishWriting(){
self.isRecording = false
print("finished writing")
DispatchQueue.main.async{
if self.videoWriter?.status == AVAssetWriterStatus.failed {
print("status: failed")
}else if self.videoWriter?.status == AVAssetWriterStatus.completed{
print("status: completed")
}else if self.videoWriter?.status == AVAssetWriterStatus.cancelled{
print("status: cancelled")
}else{
print("status: unknown")
}
if let e=self.videoWriter?.error{
print("stop record error:", e)
}
}
}
print("Stop Recording!")
}
And this is the delegate method, which gets called for video, but not for audio:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
autoreleasepool {
guard captureOutput != nil,
sampleBuffer != nil,
connection != nil,
CMSampleBufferDataIsReady(sampleBuffer) else { return }
guard CMSampleBufferDataIsReady(sampleBuffer) else{
return
}
if (connection.isVideoOrientationSupported) {
connection.videoOrientation = currentVideoOrientation()
} else
{
return
}
if (connection.isVideoStabilizationSupported) {
//connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
if !self.isRecording
{
return
}
var string = ""
if let audio = self.videoWriterAudioInput
{
if connection.audioChannels.count > 0
{
//EXECUTION NEVER REACHES HERE
if audio.isReadyForMoreMediaData
{
queue!.async() {
audio.append(sampleBuffer)
}
return
}
}
}
print ("\(string)")
if let camera = self.videoWriterVideoInput, camera.isReadyForMoreMediaData {
//This is getting called!!!
queue!.async() {
self.videoWriterInputPixelBufferAdaptor.append(self.imageToBuffer(from: image!)!, withPresentationTime: timestamp)
}
}
}//End autoreleasepool
}
}
I am sure the problem does not lie with my devices or inputs, as I was able to successfully record video and audio using AVCaptureMovieFileOutput. I have also read other relevant posts with no luck:
Corrupt video capturing audio and video using AVAssetWriter
VAssetWriter audio with video together
Ripped my hair out for days on this. My mistake was simple - The delegate method was being called, but was being returned BEFORE I reached the audio statements. These were the culprits which needed to be moved to after the audio processing portion of my code:
if (connection.isVideoOrientationSupported) {
connection.videoOrientation = currentVideoOrientation()
} else
{
return
}
if (connection.isVideoStabilizationSupported) {
//connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto
}
I am trying to capture camera video in memory using AVCaptureSession so that I can later write the video data to a movie file. While I have been able to successfully start a capture session, I am not able to successful write the CMSampleBuffers I've captured to a compressed movie file using AVAssetWriter.
Appending sample buffers using AVAssetWriterInput's append method fails and when I inspect the AVAssetWriter's error property, I get the following:
Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could not be completed" UserInfo={NSUnderlyingError=0x17005d070 {Error Domain=NSOSStatusErrorDomain Code=-12780 "(null)"}, NSLocalizedFailureReason=An unknown error occurred (-12780), NSLocalizedDescription=The operation could not be completed}
As far as I can tell -11800 indicates an AVErrorUnknown, however I have not been able to find information about the -12780 error code, which as far as I can tell is undocumented. Below I have pasted the main files in the example project I setup to demonstrate the issue.
Any guidance would be greatly appreciated. Thanks!
ViewController.swift
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
private let recordingClipQueue = DispatchQueue(label: "com.example.recordingClipQueue")
private let videoDataOutputQueue = DispatchQueue(label: "com.example.videoDataOutputQueue")
private let session = AVCaptureSession()
private var backfillSampleBufferList = [CMSampleBuffer]()
override func viewDidLoad() {
super.viewDidLoad()
session.sessionPreset = AVCaptureSessionPreset640x480
let videoDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo);
let videoDeviceInput: AVCaptureDeviceInput;
do {
videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
} catch {
print("Error creating device input from video device: \(error).")
return
}
guard session.canAddInput(videoDeviceInput) else {
print("Could not add video device input to capture session.")
return
}
session.addInput(videoDeviceInput)
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.videoSettings = [ kCVPixelBufferPixelFormatTypeKey as NSString : Int(kCMPixelFormat_32BGRA) ]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
videoDataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
guard session.canAddOutput(videoDataOutput) else {
print("Could not add video data output to capture session.")
return
}
session.addOutput(videoDataOutput)
videoDataOutput.connection(withMediaType: AVMediaTypeVideo).isEnabled = true
session.startRunning()
}
private func backfillSizeInSeconds() -> Double {
if backfillSampleBufferList.count < 1 {
return 0.0
}
let earliestSampleBuffer = backfillSampleBufferList.first!
let latestSampleBuffer = backfillSampleBufferList.last!
let earliestSampleBufferPTS = CMSampleBufferGetOutputPresentationTimeStamp(earliestSampleBuffer).value
let latestSampleBufferPTS = CMSampleBufferGetOutputPresentationTimeStamp(latestSampleBuffer).value
let timescale = CMSampleBufferGetOutputPresentationTimeStamp(latestSampleBuffer).timescale
return Double(latestSampleBufferPTS - earliestSampleBufferPTS) / Double(timescale)
}
private func createClipFromBackfill() {
guard backfillSampleBufferList.count > 0 else {
print("createClipFromBackfill() called before any samples were recorded.")
return
}
let clipURL = URL(fileURLWithPath:
NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] +
"/recorded_clip.mp4")
if FileManager.default.fileExists(atPath: clipURL.path) {
do {
try FileManager.default.removeItem(atPath: clipURL.path)
} catch {
print("Could not delete existing clip file: \(error).")
}
}
var _videoFileWriter: AVAssetWriter?
do {
_videoFileWriter = try AVAssetWriter(url: clipURL, fileType: AVFileTypeQuickTimeMovie)
} catch {
print("Could not create video file writer: \(error).")
return
}
guard let videoFileWriter = _videoFileWriter else {
print("Video writer was nil.")
return
}
let settingsAssistant = AVOutputSettingsAssistant(preset: AVOutputSettingsPreset640x480)!
guard videoFileWriter.canApply(outputSettings: settingsAssistant.videoSettings, forMediaType: AVMediaTypeVideo) else {
print("Video file writer could not apply video output settings.")
return
}
let earliestRecordedSampleBuffer = backfillSampleBufferList.first!
let _formatDescription = CMSampleBufferGetFormatDescription(earliestRecordedSampleBuffer)
guard let formatDescription = _formatDescription else {
print("Earliest recording pixel buffer format description was nil.")
return
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo,
outputSettings: settingsAssistant.videoSettings,
sourceFormatHint: formatDescription)
guard videoFileWriter.canAdd(videoWriterInput) else {
print("Could not add video writer input to video file writer.")
return
}
videoFileWriter.add(videoWriterInput)
guard videoFileWriter.startWriting() else {
print("Video file writer not ready to write file.")
return
}
videoFileWriter.startSession(atSourceTime: CMSampleBufferGetOutputPresentationTimeStamp(earliestRecordedSampleBuffer))
videoWriterInput.requestMediaDataWhenReady(on: recordingClipQueue) {
while videoWriterInput.isReadyForMoreMediaData {
if self.backfillSampleBufferList.count > 0 {
let sampleBufferToAppend = self.backfillSampleBufferList.first!.deepCopy()
let appendSampleBufferSucceeded = videoWriterInput.append(sampleBufferToAppend)
if !appendSampleBufferSucceeded {
print("Failed to append sample buffer to asset writer input: \(videoFileWriter.error!)")
print("Video file writer status: \(videoFileWriter.status.rawValue)")
}
self.backfillSampleBufferList.remove(at: 0)
} else {
videoWriterInput.markAsFinished()
videoFileWriter.finishWriting {
print("Saved clip to \(clipURL)")
}
break
}
}
}
}
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ captureOutput: AVCaptureOutput!,
didOutputSampleBuffer sampleBuffer: CMSampleBuffer!,
from connection: AVCaptureConnection!) {
guard let buffer = sampleBuffer else {
print("Captured sample buffer was nil.")
return
}
let sampleBufferCopy = buffer.deepCopy()
backfillSampleBufferList.append(sampleBufferCopy)
if backfillSizeInSeconds() > 3.0 {
session.stopRunning()
createClipFromBackfill()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!,
didDrop sampleBuffer: CMSampleBuffer!,
from connection: AVCaptureConnection!) {
print("Sample buffer dropped.")
}
}
CVPixelBuffer+Copy.swift:
import CoreVideo
extension CVPixelBuffer {
func deepCopy() -> CVPixelBuffer {
precondition(CFGetTypeID(self) == CVPixelBufferGetTypeID(), "deepCopy() cannot copy a non-CVPixelBuffer")
var _copy : CVPixelBuffer?
CVPixelBufferCreate(
nil,
CVPixelBufferGetWidth(self),
CVPixelBufferGetHeight(self),
CVPixelBufferGetPixelFormatType(self),
CVBufferGetAttachments(self, CVAttachmentMode.shouldPropagate),
&_copy)
guard let copy = _copy else {
print("Pixel buffer copy was nil.")
fatalError()
}
CVBufferPropagateAttachments(self, copy)
CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags.readOnly)
CVPixelBufferLockBaseAddress(copy, CVPixelBufferLockFlags(rawValue: 0))
let sourceBaseAddress = CVPixelBufferGetBaseAddress(self)
let copyBaseAddress = CVPixelBufferGetBaseAddress(copy)
memcpy(copyBaseAddress, sourceBaseAddress, CVPixelBufferGetHeight(self) * CVPixelBufferGetBytesPerRow(self))
CVPixelBufferUnlockBaseAddress(copy, CVPixelBufferLockFlags(rawValue: 0))
CVPixelBufferUnlockBaseAddress(self, CVPixelBufferLockFlags.readOnly)
return copy
}
}
CMSampleBuffer+Copy.swift:
import CoreMedia
extension CMSampleBuffer {
func deepCopy() -> CMSampleBuffer {
let _pixelBuffer = CMSampleBufferGetImageBuffer(self)
guard let pixelBuffer = _pixelBuffer else {
print("Pixel buffer to copy was nil.")
fatalError()
}
let pixelBufferCopy = pixelBuffer.deepCopy()
let _formatDescription = CMSampleBufferGetFormatDescription(self)
guard let formatDescription = _formatDescription else {
print("Format description to copy was nil.")
fatalError()
}
var timingInfo = kCMTimingInfoInvalid
let getTimingInfoResult = CMSampleBufferGetSampleTimingInfo(self, 0, &timingInfo)
guard getTimingInfoResult == noErr else {
print("Could not get timing info to copy: \(getTimingInfoResult).")
fatalError()
}
timingInfo.presentationTimeStamp = CMSampleBufferGetOutputPresentationTimeStamp(self)
var _copy : CMSampleBuffer?
let createCopyResult = CMSampleBufferCreateForImageBuffer(kCFAllocatorDefault,
pixelBufferCopy,
true,
nil,
nil,
formatDescription,
&timingInfo,
&_copy);
guard createCopyResult == noErr else {
print("Error creating copy of sample buffer: \(createCopyResult).")
fatalError()
}
guard let copy = _copy else {
print("Copied sample buffer was nil.")
fatalError()
}
return copy
}
}
I also ran into this while trying to synthesize videos. I finally figured out that -[AVAssetWriterInput appendSampleBuffer:] only works on device (as of iOS 11.2.6 anyway) if the underlying pixel buffer is backed by an IOSurface.
If you modify your CVPixelBuffer.deepCopy() method to include the (id)kCVPixelBufferIOSurfacePropertiesKey: #{} key-value pair in the attributes dictionary you pass to CVPixelBufferCreate, it'll probably work.
After more research and experimentation, it appears using AVAssetWriterInputPixelBufferAdaptor to append the CVPixelBuffers of the CMSampleBuffers I'm storing to the AVAssetWriterInput works without generating an error.
Below is the modified version of ViewController.swift implementation that uses AVAssetWriterInputPixelBufferAdaptor to append pixel buffers.
ViewController.swift
import UIKit
import AVFoundation
import Photos
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
private let recordingClipQueue = DispatchQueue(label: "com.example.recordingClipQueue")
private let videoDataOutputQueue = DispatchQueue(label: "com.example.videoDataOutputQueue")
private let session = AVCaptureSession()
private var backfillSampleBufferList = [CMSampleBuffer]()
override func viewDidLoad() {
super.viewDidLoad()
session.sessionPreset = AVCaptureSessionPreset640x480
let videoDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo);
let videoDeviceInput: AVCaptureDeviceInput;
do {
videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
} catch {
print("Error creating device input from video device: \(error).")
return
}
guard session.canAddInput(videoDeviceInput) else {
print("Could not add video device input to capture session.")
return
}
session.addInput(videoDeviceInput)
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.videoSettings = [ kCVPixelBufferPixelFormatTypeKey as NSString : Int(kCMPixelFormat_32BGRA) ]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
videoDataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
guard session.canAddOutput(videoDataOutput) else {
print("Could not add video data output to capture session.")
return
}
session.addOutput(videoDataOutput)
videoDataOutput.connection(withMediaType: AVMediaTypeVideo).isEnabled = true
session.startRunning()
}
private func backfillSizeInSeconds() -> Double {
if backfillSampleBufferList.count < 1 {
return 0.0
}
let earliestSampleBuffer = backfillSampleBufferList.first!
let latestSampleBuffer = backfillSampleBufferList.last!
let earliestSampleBufferPTS = CMSampleBufferGetOutputPresentationTimeStamp(earliestSampleBuffer).value
let latestSampleBufferPTS = CMSampleBufferGetOutputPresentationTimeStamp(latestSampleBuffer).value
let timescale = CMSampleBufferGetOutputPresentationTimeStamp(latestSampleBuffer).timescale
return Double(latestSampleBufferPTS - earliestSampleBufferPTS) / Double(timescale)
}
private func createClipFromBackfill() {
guard backfillSampleBufferList.count > 0 else {
print("createClipFromBackfill() called before any samples were recorded.")
return
}
let clipURL = URL(fileURLWithPath:
NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] +
"/recorded_clip.mp4")
if FileManager.default.fileExists(atPath: clipURL.path) {
do {
try FileManager.default.removeItem(atPath: clipURL.path)
} catch {
print("Could not delete existing clip file: \(error).")
}
}
var _videoFileWriter: AVAssetWriter?
do {
_videoFileWriter = try AVAssetWriter(url: clipURL, fileType: AVFileTypeMPEG4)
} catch {
print("Could not create video file writer: \(error).")
return
}
guard let videoFileWriter = _videoFileWriter else {
print("Video writer was nil.")
return
}
let settingsAssistant = AVOutputSettingsAssistant(preset: AVOutputSettingsPreset640x480)!
guard videoFileWriter.canApply(outputSettings: settingsAssistant.videoSettings, forMediaType: AVMediaTypeVideo) else {
print("Video file writer could not apply video output settings.")
return
}
let earliestRecordedSampleBuffer = backfillSampleBufferList.first!
let _formatDescription = CMSampleBufferGetFormatDescription(earliestRecordedSampleBuffer)
guard let formatDescription = _formatDescription else {
print("Earliest recording pixel buffer format description was nil.")
return
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo,
outputSettings: settingsAssistant.videoSettings,
sourceFormatHint: formatDescription)
guard videoFileWriter.canAdd(videoWriterInput) else {
print("Could not add video writer input to video file writer.")
return
}
videoFileWriter.add(videoWriterInput)
let pixelAdapterBufferAttributes = [ kCVPixelBufferPixelFormatTypeKey as String : Int(kCMPixelFormat_32BGRA) ]
let pixelAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: pixelAdapterBufferAttributes)
guard videoFileWriter.startWriting() else {
print("Video file writer not ready to write file.")
return
}
videoFileWriter.startSession(atSourceTime: CMSampleBufferGetOutputPresentationTimeStamp(earliestRecordedSampleBuffer))
videoWriterInput.requestMediaDataWhenReady(on: recordingClipQueue) {
while videoWriterInput.isReadyForMoreMediaData {
if self.backfillSampleBufferList.count > 0 {
let sampleBufferToAppend = self.backfillSampleBufferList.first!.deepCopy()
let appendSampleBufferSucceeded = pixelAdapter.append(CMSampleBufferGetImageBuffer(sampleBufferToAppend)!,
withPresentationTime: CMSampleBufferGetOutputPresentationTimeStamp(sampleBufferToAppend))
if !appendSampleBufferSucceeded {
print("Failed to append sample buffer to asset writer input: \(videoFileWriter.error!)")
print("Video file writer status: \(videoFileWriter.status.rawValue)")
}
self.backfillSampleBufferList.remove(at: 0)
} else {
videoWriterInput.markAsFinished()
videoFileWriter.finishWriting {
print("Saving clip to \(clipURL)")
}
break
}
}
}
}
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ captureOutput: AVCaptureOutput!,
didOutputSampleBuffer sampleBuffer: CMSampleBuffer!,
from connection: AVCaptureConnection!) {
guard let buffer = sampleBuffer else {
print("Captured sample buffer was nil.")
return
}
let sampleBufferCopy = buffer.deepCopy()
backfillSampleBufferList.append(sampleBufferCopy)
if backfillSizeInSeconds() > 3.0 {
session.stopRunning()
createClipFromBackfill()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!,
didDrop sampleBuffer: CMSampleBuffer!,
from connection: AVCaptureConnection!) {
print("Sample buffer dropped.")
}
}
I ran into issues with the same error codes when creating CVPixelBuffers and CMSampleBuffers manually to create a video with individual frames rendered by CoreGraphics. I could solve the problem by using a AVAssetWriterInputPixelBufferAdaptor instead, like you suggested in your own answer. For some reason, this was only needed when the code was run on an actual device. On the simulator, manually creating the buffers worked fine.
I noticed that the same error codes AVFoundationErrorDomain Code -11800 and NSOSStatusErrorDomain Code -12780 can also occur for other reasons, for example:
There exists already a file at the destination URL provided to AVAssetWriter
The destination URL is not a file URL (it must be created with URL.init(fileURLWithPath:) and not with URL.init(string:)).
(Posting this for the sake of completeness, your code already handles this correctly.)
I am writing a video to photo library / document directory using capture session and AVAssetWriter. What I want to know when I append pixel buffer to the adapter I do get false here print("video is (bobo)")same with audio.
This doesn't save my output file and I do get an error on export and saving.
I am working on it from so long any suggestions or mistake would help me a lot.
Main problem is this issue is very random lets say 1 in 10 times but it do persist and I want to eliminate this issue.
My code where I am appending pixel buffer to adapter
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
{
starTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
if captureOutput == videoOutput
{
if self.record == true{
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
if self.record == true
{
if self.writerInput.isReadyForMoreMediaData
{
DispatchQueue(label: "newQeueLocalFeedVideo2", attributes: DispatchQueue.Attributes.concurrent).sync(execute: {
starTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
let bobo = self.adapter.append(pixelBuffer!, withPresentationTime: self.starTime)
print("video is \(bobo)")
})
}
}
}
}else if captureOutput == audioOutput{
if self.record == true
{
if audioWriterInput.isReadyForMoreMediaData
{
let bo = audioWriterInput.append(sampleBuffer)
print("audio conversion is \(bo)")
}
}
}
}
/*****------******/
Code where I am setting asset writer
{
let fileUrl = URL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent("\(getCurrentDate())-capturedvideo.mp4")
lastPath = fileUrl.path
videoWriter = try? AVAssetWriter(outputURL: fileUrl, fileType: AVFileTypeMPEG4)
lastPathURL = fileUrl
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width) as Float), AVVideoHeightKey : NSNumber(value: Float(outputSize.height) as Float)] as [String : Any]
writerInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
writerInput.expectsMediaDataInRealTime = true
// writerInput.performsMultiPassEncodingIfSupported = true
audioWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: DejalActivityView.getAudioDictionary() as? [String:AnyObject])
videoWriter.add(writerInput)
videoWriter.add(audioWriterInput)
adapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: DejalActivityView.getAdapterDictionary() as? [String:AnyObject])
videoWriter.startWriting()
videoWriter.startSession(atSourceTime: starTime)
//self.client?.recordCaptureSession.captureSession.startRunning()
record = true
}
And to export the file to a video I am using this code.
self.videoWriter.finishWriting { () -> Void in
Thread.sleep(forTimeInterval: 1.0)
if self.videoWriter.status == AVAssetWriterStatus.failed {
print("oh noes, an error: \(self.videoWriter.error.debugDescription)")
completionHandler(true)
} else {
let content = FileManager.default.contents(atPath: self.lastPathURL.path)
print("wrote video: \(self.lastPathURL.path) at size: \(content?.count)")
// This below line will save the video to photo library
HEPhotoLibraryHelper.saveVideosToPhotoLibrary(self.lastPathURL, withCompletionBlock: { (result) in
if result == true
{
do
{
try HEDocDirectory.shared.fileManagerDefault .removeItem(atPath: self.lastPath)
}catch let err as NSError
{
print("Error in removing file from doc dir \(err.localizedDescription)")
}
}
})
}
}