Exporting time lapse with AVAssetExportSession results in black video - ios

I need to be able to merge videos taken with the time lapse function in the Camera app on iOS and export as a single video.
However, even if I try to export a single, unchanged time lapse video to the Photo Library, it saves as a completely black video (with the correct duration). Here is the sample code I wrote to just export a single, unchanged video (most of which is adapted from a Ray Wenderlich tutorial):
#IBAction func saveVideo(_ sender: UIBarButtonItem) {
// 1 - Early exit if there's no video file selected
guard let videoAsset = self.avAsset else {
let alert = UIAlertController(title: "Error", message: "Failed to load video asset.", preferredStyle: .alert)
let cancelAction = UIAlertAction(title: "OK", style: .cancel, handler: nil)
alert.addAction(cancelAction)
self.present(alert, animated: true, completion: nil)
return
}
// 2 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
print("Preparing AVMutableComposition...")
let mixComposition = AVMutableComposition()
// 3 - Video track
let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
if let videoAssetTrack = videoAsset.tracks(withMediaType: .video).first {
try videoTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAssetTrack, at: kCMTimeZero)
}
if let audioAssetTrack = videoAsset.tracks(withMediaType: .audio).first {
let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
try audioTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: audioAssetTrack, at: kCMTimeZero)
}
} catch let error as NSError {
self.presentAlert(title: "Export Error", message: "Unable to complete export due to the following error: \(error). Please try again.", block: nil)
print("error: \(error)")
}
// 3.1 - Create AVMutableVideoCompositionInstruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, videoAsset.duration)
// 3.2 - Create an AVMutableVideoCompositionLayerInstruction for the video track and fix the orientation.
let videoLayerInstruction: AVMutableVideoCompositionLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack!)
let videoAssetTrack = videoAsset.tracks(withMediaType: .video).first
var assetOrientation: UIImageOrientation = .up
var isPortrait = false
let t = videoAssetTrack!.preferredTransform
if t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0 {
assetOrientation = .right
isPortrait = true
} else if t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0 {
assetOrientation = .left
isPortrait = true
} else if t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0 {
assetOrientation = .up
} else if t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0 {
assetOrientation = .down
}
videoLayerInstruction.setTransform(videoAssetTrack!.preferredTransform, at: kCMTimeZero)
videoLayerInstruction.setOpacity(0.0, at: videoAsset.duration)
// 3.3 - Add instructions
mainInstruction.layerInstructions = [videoLayerInstruction]
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
var naturalSize: CGSize
if isPortrait {
naturalSize = CGSize(width: videoAssetTrack!.naturalSize.height, height: videoAssetTrack!.naturalSize.width)
} else {
naturalSize = videoAssetTrack!.naturalSize
}
mainComposition.renderSize = CGSize(width: naturalSize.width, height: naturalSize.height)
// set up file destination
let tempName = "temp-thread.mov"
let tempURL = URL(fileURLWithPath: (NSTemporaryDirectory() as NSString).appendingPathComponent(tempName))
do {
if FileManager.default.fileExists(atPath: tempURL.path) {
try FileManager.default.removeItem(at: tempURL)
}
} catch {
print("Error removing temp file.")
}
// create final video using export session
guard let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exportSession.outputURL = tempURL
exportSession.outputFileType = AVFileType.mov
exportSession.shouldOptimizeForNetworkUse = true
exportSession.videoComposition = mainComposition
print("Exporting video...")
exportSession.exportAsynchronously {
DispatchQueue.main.async {
switch exportSession.status {
// Success
case .completed:
print("Saving to Photos Library...")
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: exportSession.outputURL!)
}) { success, error in
if success {
print("Added video to library - success: \(success), error: \(String(describing: error?.localizedDescription))")
} else {
print("Added video to library - success: \(success), error: \(String(describing: error!.localizedDescription))")
}
let _ = try? FileManager.default.removeItem(at: tempURL)
}
print("Export session completed")
// Status other than success
case .cancelled, .exporting, .failed, .unknown, .waiting:
print("Export status: \(exportSession.status.rawValue)")
print("Reason: \(String(describing: exportSession.error))")
}
}
}
}
Why would the resulting video show up completely black? I can't seem to find much documentation on Apple's time lapse videos, so I'm not sure why they might be different than a regular video file. They seem to have a frame rate of 30fps and if I inspect one on my Mac, it's just a regular QuickTime movie file without an audio channel. Any ideas? Exporting any other video with this code (even ones without audio) works flawlessly.

The problem code is:
videoLayerInstruction.setTransform(videoAssetTrack!.preferredTransform, at: kCMTimeZero)
This transformation is eligible to "up" (default) orientation only and it makes a video completely black for the other orientations. You should make a correct transformation for each orientation e.g.:
var transform = videoAssetTrack.preferredTransform
// Right
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
isPortrait = true
let rotate = CGAffineTransform.identity.translatedBy(x: videoAssetTrack.naturalSize.height - videoAssetTrack.preferredTransform.tx, y: -videoAssetTrack.preferredTransform.ty)
transform = videoAssetTrack.preferredTransform.concatenating(rotate)
}
// Left
else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
isPortrait = true
let rotate = CGAffineTransform.identity.translatedBy(x: -videoAssetTrack.preferredTransform.tx, y: videoAssetTrack.naturalSize.width - videoAssetTrack.preferredTransform.ty)
transform = videoAssetTrack.preferredTransform.concatenating(rotate)
}
// Up
else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
transform = videoAssetTrack.preferredTransform
}
// Down
else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
let rotate = CGAffineTransform.identity.translatedBy(x: videoAssetTrack.naturalSize.width - videoAssetTrack.preferredTransform.tx, y: videoAssetTrack.naturalSize.height - videoAssetTrack.preferredTransform.ty)
transform = videoAssetTrack.preferredTransform.concatenating(rotate)
}
videoLayerInstruction.setTransform(transform, at: .zero)

Related

AVExportSession exporting video super slow

I am trying to simply increase the speed of my exporting of my merged video.
Here is the code: //from my extensive research online and on SO, I have pretty much come down to the preset PassThrough makes it super fast, however as I wrote in a comment in the code, my merging code does not seem to work with that preset for export :/
static func videoCompositionInstruction(_ track: AVCompositionTrack, asset: AVAsset)
-> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = 1080 / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = 1080 / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var finalTransform = assetTrack.preferredTransform.concatenating(scaleFactor)
//was needed in my case (if video not taking entire screen and leaving some parts black - don't know when actually needed so you'll have to try and see when it's needed)
if assetInfo.orientation == .rightMirrored || assetInfo.orientation == .leftMirrored {
finalTransform = finalTransform.translatedBy(x: -transform.ty, y: 0)
}
instruction.setTransform(finalTransform, at: CMTime.zero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
.concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: CMTime.zero)
}
return instruction
}
static func orientationFromTransform(_ transform: CGAffineTransform)
-> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == 1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .rightMirrored
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .leftMirrored
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
func mergeVideosTestSQ(arrayVideos:[AVAsset], completion:#escaping (URL?, Error?) -> ()) {
let mixComposition = AVMutableComposition()
var instructions: [AVMutableVideoCompositionLayerInstruction] = []
var insertTime = CMTime(seconds: 0, preferredTimescale: 1)
/// for each URL add the video and audio tracks and their duration to the composition
for sourceAsset in arrayVideos {
let frameRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: sourceAsset.duration)
guard
let nthVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let nthAudioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), //0 used to be kCMPersistentTrackID_Invalid
let assetVideoTrack = sourceAsset.tracks(withMediaType: .video).first
else {
print("didnt work")
return
}
var assetAudioTrack: AVAssetTrack?
assetAudioTrack = sourceAsset.tracks(withMediaType: .audio).first
print(assetAudioTrack, ",-- assetAudioTrack???", assetAudioTrack?.asset, "<-- hes", sourceAsset)
do {
try nthVideoTrack.insertTimeRange(frameRange, of: assetVideoTrack, at: insertTime)
try nthAudioTrack.insertTimeRange(frameRange, of: assetAudioTrack!, at: insertTime)
//instructions:
let nthInstruction = MainCamVC.videoCompositionInstruction(nthVideoTrack, asset: sourceAsset)
nthInstruction.setOpacity(0.0, at: CMTimeAdd(insertTime, sourceAsset.duration)) //sourceasset.duration
instructions.append(nthInstruction)
insertTime = insertTime + sourceAsset.duration //sourceAsset.duration
} catch {
DispatchQueue.main.async {
print("didnt wor2k")
}
}
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: insertTime)
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = CGSize(width: 1080, height: 1920)
let outputFileURL = URL(fileURLWithPath: NSTemporaryDirectory() + "merge.mp4")
//below to clear the video form docuent folder for new vid...
let fileManager = FileManager()
try? fileManager.removeItem(at: outputFileURL)
/// try to start an export session and set the path and file type
if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) { //DOES NOT WORK WITH AVAssetExportPresetPassthrough
exportSession.outputFileType = .mov
exportSession.outputURL = outputFileURL
exportSession.videoComposition = mainComposition
exportSession.shouldOptimizeForNetworkUse = true
/// try to export the file and handle the status cases
exportSession.exportAsynchronously {
if let url = exportSession.outputURL{
completion(url, nil)
}
if let error = exportSession.error {
completion(nil, error)
}
}
}
}
Note I have instructions in order to preserve correct orientations.
Thanks for any help! I just need it to be faster, it roughly takes videoDuration/2 seconds to export in time to export...
After implementing your code into my project, it seems what is making your export slow, would be the way you handle the renderSize, as well as the resolution of the video. On top of that perhaps using a lower preset of quality may make it higher.
Specicially I would note this part:
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: insertTime)
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = CGSize(width: 1080, height: 1920)
Changing the renderSize to = the videoCompositions.size (may be different name for your project) does the trick.
Then in the exporting place, I suggest changing this part:
/// try to start an export session and set the path and file type
if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) { //DOES NOT WORK WITH AVAssetExportPresetPassthrough
exportSession.outputFileType = .mov
exportSession.outputURL = outputFileURL
exportSession.videoComposition = mainComposition
exportSession.shouldOptimizeForNetworkUse = true
/// try to export the file and handle the status cases
exportSession.exportAsynchronously {
if let url = exportSession.outputURL{
completion(url, nil)
}
if let error = exportSession.error {
completion(nil, error)
}
}
}
As I said before, change it to presetQuality one lower or so. It will vastly improve the speed! Some transformations were also duplicated as well, something to look out for!

How to merge mp4 and MOV type videos using AVMutableComposition iOS Swift?

I am using AVMutableComposition for merging videos, it's working fine when two videos are of same types like to mp4 videos or two mov videos.
But if I try to merge one video of mp4 and one video of mov then merged video stopped after completion of the first video.
you can use this Code
private func mergeAssets(arrayAssets:[AVAsset],audioAsset:AVAsset,completionHandler:#escaping MergedVideoCompletionHandler){
var insertTime = kCMTimeZero
let animation = true
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayAssets {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
}
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
let audioSoundTrack = audioAsset.tracks(withMediaType: AVMediaType.audio).first
// Init composition
let mixComposition = AVMutableComposition.init()
for videoAsset in arrayAssets {
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
// Get audio track
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = kCMTimeZero
let duration = videoAsset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioSoundTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = videoAsset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
let appError = getAppError(error, message: "Failed to loadvideoTrack")
completionHandler(nil,appError)
}
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// //4 - Get path
// let fileName = IGMediaUtil.createNewFile(fileName: "mergeVideo", fileExtension: "mp4")
// guard let docSubFolder = IGMediaUtil.createFolderInDoc(folderName: Constants.kMergedVideosFolder) else{
// return
// }
// let mergingURL = docSubFolder.appendingPathComponent(fileName)
// // Remove file if existed
// FileManager.default.removeItemIfExisted(mergingURL)
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL)
}
})
}
fileprivate func exportDidFinish(exporter:AVAssetExportSession?, videoURL:URL) {
var progressValue:Float = 0
if let status = exporter?.status {
switch status{
case .exporting:
progressValue = (exporter?.progress)!
case .failed, .cancelled,.unknown:
progressValue = 1.0
let appError = self.getAppError(exporter?.error,message:"Failed to create Data")
print( "localizedDescription ::::::AVExport ********** \(exporter?.error?.localizedDescription)" ?? "No Error")
print(exporter?.error.debugDescription)
if let exportBlock = self.callback{
exportBlock(nil ,appError)
}
case .waiting:
break
case .completed:
progressValue = 1.0;
print("Exported file: \(videoURL.absoluteString)")
if let exportBlock = self.callback{
exportBlock(videoURL ,nil)
}
}
if let progressBlock = self.progressCallback{
DispatchQueue.main.async {
progressBlock(progressValue)
}
}
}
}
}
extension MediaAudioMergerServiceManager{
fileprivate func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
fileprivate func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset, standardSize:CGSize, atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var aspectFillRatio:CGFloat = 1
if assetTrack.naturalSize.height < assetTrack.naturalSize.width {
aspectFillRatio = standardSize.height / assetTrack.naturalSize.height
}
else {
aspectFillRatio = standardSize.width / assetTrack.naturalSize.width
}
if assetInfo.isPortrait {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor), at: atTime)
} else {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor)
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
concat = fixUpsideDown.concatenating(scaleFactor).concatenating(moveFactor)
}
instruction.setTransform(concat, at: atTime)
}
return instruction
}
fileprivate func exportDidFinish(exporter:AVAssetExportSession?, videoURL:URL) {
var progressValue:Float = 0
if let status = exporter?.status {
switch status{
case .exporting:
progressValue = (exporter?.progress)!
case .failed, .cancelled,.unknown:
progressValue = 1.0
let appError = self.getAppError(exporter?.error,message:"Failed to create Data")
print( "localizedDescription ::::::AVExport ********** \(exporter?.error?.localizedDescription)" ?? "No Error")
print(exporter?.error.debugDescription)
if let exportBlock = self.callback{
exportBlock(nil ,appError)
}
case .waiting:
break
case .completed:
progressValue = 1.0;
print("Exported file: \(videoURL.absoluteString)")
if let exportBlock = self.callback{
exportBlock(videoURL ,nil)
}
}
if let progressBlock = self.progressCallback{
DispatchQueue.main.async {
progressBlock(progressValue)
}
}
}
}
}

Swift: Square video composition

I am following the below code for square video composition
func completeWithVideoAtURL(input: NSURL) {
let asset = AVAsset(url: input as URL)
let output = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/Video.mp4")
let session = AVAssetExportSession(asset: asset, presetName: AVAssetExportPresetMediumQuality)!
session.videoComposition = self.squareVideoCompositionForAsset(asset: asset)
session.outputURL = output as URL
session.outputFileType = AVFileTypeMPEG4
session.shouldOptimizeForNetworkUse = true
session.exportAsynchronously(completionHandler: { () -> Void in
DispatchQueue.main.async(execute: { () -> Void in
// do something with the output
print("\(output)")
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: output as URL)
}) { saved, error in
if saved {
print("saved to gallery")
}
}
})
})
}
func squareVideoCompositionForAsset(asset: AVAsset) -> AVVideoComposition {
let track = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let length = max(track.naturalSize.width, track.naturalSize.height)
var transform = track.preferredTransform
let size = track.naturalSize
let scale: CGFloat = (transform.a == -1 && transform.b == 0 && transform.c == 0 && transform.d == -1) ? -1 : 1 // check for inversion
transform = transform.translatedBy(x: scale * -(size.width - length) / 2, y: scale * -(size.height - length) / 2)
let transformer = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
transformer.setTransform(transform, at: kCMTimeZero)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: kCMTimeZero, duration: kCMTimePositiveInfinity)
instruction.layerInstructions = [transformer]
let composition = AVMutableVideoComposition()
composition.frameDuration = CMTime(value: 1, timescale: 30)
composition.renderSize = CGSize(width: length, height: length)
composition.instructions = [instruction]
return composition
}
From the squareVideoCompositionForAsset() function I take the max value for length between track.naturalSize.width & track.naturalSize.height cause I don't want to crop any partial part of the video. If I take min value, for portrait video it cropped the upper & lower portion of the video and for landscape video it cropped some left & right portion of the video.
For landscape video, output is okay
but for portrait video, output is like following image
the video gets left sided. Is it possible to center the video? Any assistance would be great and sorry for long explanation.
instead of this line
let scale: CGFloat = (transform.a == -1 && transform.b == 0 &&
transform.c == 0 && transform.d == -1) ? -1 : 1
I just used this
var scale = CGFloat()
if (transform.a == 0 && transform.b == 1 && transform.c == -1 && transform.d == 0) {
scale = -1
}
else if (transform.a == 0 && transform.b == -1 && transform.c == 1 && transform.d == 0) {
scale = -1
}
else if (transform.a == 1 && transform.b == 0 && transform.c == 0 && transform.d == 1) {
scale = 1
}
else if (transform.a == -1 && transform.b == 0 && transform.c == 0 && transform.d == -1) {
scale = 1
}
and it worked like a charm
Swift 4.2
func suqareCropVideo(videoURL: URL, withSide sideLength: CGFloat, completion: #escaping (_ resultURL: URL?, _ error: Error?) -> ()) {
let asset = AVAsset(url: videoURL)
if let assetVideoTrack = asset.tracks(withMediaType: .video).last {
let originalSize = assetVideoTrack.naturalSize
var scale: CGFloat
if originalSize.width < originalSize.height {
scale = sideLength / originalSize.width
} else {
scale = sideLength / originalSize.height
}
let scaledSize = CGSize(width: originalSize.width * scale, height: originalSize.height * scale)
let topLeft = CGPoint(x: sideLength * 0.5 - scaledSize.width * 0.5, y: sideLength * 0.5 - scaledSize.height * 0.5)
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: assetVideoTrack)
var orientationTransform = assetVideoTrack.preferredTransform
if (orientationTransform.tx == originalSize.width || orientationTransform.tx == originalSize.height) {
orientationTransform.tx = sideLength
}
if (orientationTransform.ty == originalSize.width || orientationTransform.ty == originalSize.height) {
orientationTransform.ty = sideLength
}
let transform = CGAffineTransform(scaleX: scale, y: scale).concatenating(CGAffineTransform(translationX: topLeft.x, y: topLeft.y)).concatenating(orientationTransform)
layerInstruction.setTransform(transform, at: .zero)
let instruction = AVMutableVideoCompositionInstruction()
instruction.layerInstructions = [layerInstruction]
instruction.timeRange = assetVideoTrack.timeRange
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize(width: sideLength, height: sideLength)
videoComposition.renderScale = 1.0
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.instructions = [instruction]
if let export = AVAssetExportSession(asset: asset, presetName: AVAssetExportPresetHighestQuality) {
export.videoComposition = videoComposition
export.outputURL = NSURL.fileURL(withPath: "\(NSTemporaryDirectory())\(NSUUID().uuidString).mp4")
export.outputFileType = AVFileType.mp4
export.shouldOptimizeForNetworkUse = true
export.exportAsynchronously {
DispatchQueue.main.async {
if export.status == .completed {
completion(export.outputURL, nil)
} else {
completion(nil, export.error)
}
}
}
} else {
completion(nil, nil)
}
}
}

AVAssetExportSession export takes a lot of time

My goal is to let user select video from photos and then let him to add labels over it.
Here is what I've got:
let audioAsset = AVURLAsset(url: selectedVideoURL)
let videoAsset = AVURLAsset(url: selectedVideoURL)
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
let clipAudioTrack = audioAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, audioAsset.duration), of: clipAudioTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = clipVideoTrack.preferredTransform
} catch {
print(error)
}
var videoSize = clipVideoTrack.naturalSize
if isVideoPortrait(asset: videoAsset) {
videoSize = CGSize(width: videoSize.height, height: videoSize.width)
}
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(videoLayer)
// adding label
let helloLabelLayer = CATextLayer()
helloLabelLayer.string = "Hello"
helloLabelLayer.font = "Signika-Semibold" as CFTypeRef?
helloLabelLayer.fontSize = 30.0
helloLabelLayer.contentsScale = mainScreen.scale
helloLabelLayer.alignmentMode = kCAAlignmentNatural
helloLabelLayer.frame = CGRect(x: 0.0, y: 0.0, width: 100.0, height: 50.0)
parentLayer.addSublayer(helloLabelLayer)
// creating composition
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
let layerInstruction = videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
if let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset640x480) {
let filename = NSTemporaryDirectory().appending("video.mov")
if FileManager.default.fileExists(atPath: filename) {
do {
try FileManager.default.removeItem(atPath: filename)
} catch {
print(error)
}
}
let url = URL(fileURLWithPath: filename)
assetExport.outputURL = url
assetExport.outputFileType = AVFileTypeMPEG4
assetExport.videoComposition = videoComp
print(NSDate().timeIntervalSince1970)
assetExport.exportAsynchronously {
print(NSDate().timeIntervalSince1970)
let library = ALAssetsLibrary()
library.writeVideoAtPath(toSavedPhotosAlbum: url, completionBlock: {
(url, error) in
switch assetExport.status {
case AVAssetExportSessionStatus.failed:
p("failed \(assetExport.error)")
case AVAssetExportSessionStatus.cancelled:
p("cancelled \(assetExport.error)")
default:
p("complete")
p(NSDate().timeIntervalSince1970)
if FileManager.default.fileExists(atPath: filename) {
do {
try FileManager.default.removeItem(atPath: filename)
} catch {
p(error)
}
}
print("Exported")
}
})
}
Implementation of isVideoPortrait function:
func isVideoPortrait(asset: AVAsset) -> Bool {
var isPortrait = false
let tracks = asset.tracks(withMediaType: AVMediaTypeVideo)
if tracks.count > 0 {
let videoTrack = tracks[0]
let t = videoTrack.preferredTransform
if t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0 {
isPortrait = true
}
if t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0 {
isPortrait = true
}
if t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0 {
isPortrait = false
}
if t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0 {
isPortrait = false
}
}
return isPortrait
}
And the last function for video composition layer instruction:
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
instruction.setTransform(transform, at: kCMTimeZero)
return instruction
}
The code works well, output video has label, but if I select 1 minute video, export takes 28 seconds.
I've search for it and tried to remove layerInsctuction transform, but no effect.
Tried to add:
assetExport.shouldOptimizeForNetworkUse = false
no effect either.
Also, tried to set AVAssetExportPresetPassthrough for AVAssetExportSession, in this case video exports with 1 second but labels have gone.
Any help would be appreciated, because I'm in stuck. Thanks for your time.
The only way I can think of is to reduce the quality via the bit rate and resolution.
This is done through a dictionary applied to the videoSettings of the AssetExporter, for this to work I had to use a Framework called SDAVAssetExportSession
Then by changing the videoSettings I could play with the quality to get an optimal quality / speed.
let compression = [AVVideoAverageBitRateKey : 2097152(DESIRED_BITRATE),AVVideoProfileLevelKey : AVVideoProfileLevelH264BaselineAutoLevel]
let videoSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : maxWidth, AVVideoHeightKey : maxHeight, AVVideoCompressionPropertiesKey:compression]
This was the only way I could speed things up.
This is not directly relevant to your question, but your code here is backwards:
assetExport.exportAsynchronously {
let library = ALAssetsLibrary()
library.writeVideoAtPath(toSavedPhotosAlbum: url, completionBlock: {
switch assetExport.status {
No no no. First you complete the asset export. Then you can copy again to somewhere else if that's what you want to do. So this needs to go like this:
assetExport.exportAsynchronously {
switch assetExport.status {
case .completed:
let library = ALAssetsLibrary()
library.writeVideoAtPath...
Other comments:
ALAssetsLibrary is dead. This is not the way to copy into the user's photo library. Use Photo framework.
Your original code is very odd, because there are a lot of other cases you are not testing for. You are just assuming that default means .completed. That's dangerous.

iOS - How to fix video orientation and and square center crop?

I want to crop a square video from a video selected from gallery with its orientation fixed. I have searched many stackoverflow posts and raywenderlich post.
My code is working for some videos but not all. For example I have selected a portrait video from gallery having resolution 352x640. During debugging, the natural size of the asset is 640x352 and with the preferredTransform it is detected as a portrait but the crop size is set to 640x640 instead of 352x352 for center square crop.
This is my code
let asset = AVAsset(URL: url)
let composition = AVMutableComposition()
let compositionVideoTrack = composition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let videoComposition: AVMutableVideoComposition?
let audioMix: AVMutableAudioMix?
let timeRange = self.timeRange(asset)
if let videoAssetTrack = asset.tracksWithMediaType(AVMediaTypeVideo).first {
videoComposition = AVMutableVideoComposition()
var error: NSError?
do {
try compositionVideoTrack.insertTimeRange(timeRange, ofTrack: videoAssetTrack, atTime: kCMTimeZero)
} catch var error1 as NSError {
error = error1
} catch {
fatalError()
}
let naturalSize = videoAssetTrack.naturalSize
let videoSize: CGSize
var transform = videoAssetTrack.preferredTransform
var isFirstAssetPortrait_ = false
if(transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0) {
isFirstAssetPortrait_ = true}
if(transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0) {
isFirstAssetPortrait_ = true
}
var FirstAssetScaleToFitRatio = naturalSize.height/naturalSize.width
if(isFirstAssetPortrait_){
videoSize = CGSizeMake(naturalSize.width, naturalSize.width)
FirstAssetScaleToFitRatio = naturalSize.width/naturalSize.height
var FirstAssetScaleFactor = CGAffineTransformMakeScale(FirstAssetScaleToFitRatio,FirstAssetScaleToFitRatio)
FirstAssetScaleFactor = CGAffineTransformTranslate(FirstAssetScaleFactor, 0, (naturalSize.height - naturalSize.width) / 2.0 )
transform = CGAffineTransformConcat(transform, FirstAssetScaleFactor)
}else{
videoSize = CGSizeMake(naturalSize.height, naturalSize.height)
if transform.a >= 0 {
transform = CGAffineTransformTranslate(transform, -(naturalSize.width - naturalSize.height) / 2.0, 0.0)
} else {
transform = CGAffineTransformTranslate(transform, (naturalSize.width - naturalSize.height) / 2.0, 0.0)
}
}
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack)
layerInstruction.setTransform(transform, atTime: kCMTimeZero)
let videoInstructions = AVMutableVideoCompositionInstruction()
videoInstructions.timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration)
videoInstructions.layerInstructions = [layerInstruction]
videoComposition?.renderSize = videoSize
videoComposition?.frameDuration = CMTimeMake(1, 30)
videoComposition?.renderScale = 1.0
videoComposition?.instructions = [videoInstructions]
} else {
videoComposition = nil
}
if let audioTrack = asset.tracksWithMediaType(AVMediaTypeAudio).first {
var error: NSError?
do {
try compositionAudioTrack.insertTimeRange(timeRange, ofTrack: audioTrack, atTime: kCMTimeZero)
} catch var error1 as NSError {
error = error1
} catch {
fatalError()
}
let mixParameters = AVMutableAudioMixInputParameters(track: compositionAudioTrack)
mixParameters.setVolume(1.0, atTime: kCMTimeZero)
audioMix = AVMutableAudioMix()
audioMix?.inputParameters = [mixParameters]
} else {
audioMix = nil
}
self.exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)
self.exporter?.videoComposition = videoComposition
self.exporter?.audioMix = audioMix
self.exporter?.outputURL = NSURL.tempFileURL("mp4")
self.exporter?.outputFileType = AVFileTypeQuickTimeMovie
self.exporter?.exportAsynchronouslyWithCompletionHandler() {
if let exporter = self.exporter {
switch exporter.status {
case .Failed:
break
case .Completed:
default:
break
}
}
}
What I am doing wrong. Is there any better way to fix video orientation and center crop (using minimum value of width or height)?

Resources