Swift: Adding Audio to AVMutableComposition - ios

I'm using the code in the answer to the question posted here (added below):
Swift 3: How to add watermark on video ? AVVideoCompositionCoreAnimationTool iOS 10 issue
The code strips the video track from a recorded video and adds a watermark. The code doesn't merge the audio track so the output is the watermarked video with no audio.
How can I merge the audio back into the watermarked video?
The code:
import UIKit
import AssetsLibrary
import AVFoundation
import Photos
enum QUWatermarkPosition {
case TopLeft
case TopRight
case BottomLeft
case BottomRight
case Default
}
class QUWatermarkManager: NSObject {
func watermark(video videoAsset:AVAsset, watermarkText text : String, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
self.watermark(video: videoAsset, watermarkText: text, imageName: nil, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status, session, outputURL)
}
}
func watermark(video videoAsset:AVAsset, imageName name : String, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
self.watermark(video: videoAsset, watermarkText: nil, imageName: name, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status, session, outputURL)
}
}
private func watermark(video videoAsset:AVAsset, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(videoLayer)
if text != nil {
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.red.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 15
titleLayer.alignmentMode = kCAAlignmentCenter
titleLayer.bounds = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(titleLayer)
} else if name != nil {
let watermarkImage = UIImage(named: name)
let imageLayer = CALayer()
imageLayer.contents = watermarkImage?.cgImage
var xPosition : CGFloat = 0.0
var yPosition : CGFloat = 0.0
let imageSize : CGFloat = 57.0
switch (position) {
case .TopLeft:
xPosition = 0
yPosition = 0
break
case .TopRight:
xPosition = videoSize.width - imageSize
yPosition = 0
break
case .BottomLeft:
xPosition = 0
yPosition = videoSize.height - imageSize
break
case .BottomRight, .Default:
xPosition = videoSize.width - imageSize
yPosition = videoSize.height - imageSize
break
}
imageLayer.frame = CGRect(x: xPosition, y: yPosition, width: imageSize, height: imageSize)
imageLayer.opacity = 0.65
parentLayer.addSublayer(imageLayer)
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mov")
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter?.outputURL
if flag {
// Save to library
// let library = ALAssetsLibrary()
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
// if library.videoAtPathIs(compatibleWithSavedPhotosAlbum: outputURL) {
// library.writeVideoAtPathToSavedPhotosAlbum(outputURL,
// completionBlock: { (assetURL:NSURL!, error:NSError!) -> Void in
//
// completion!(AVAssetExportSessionStatus.Completed, exporter, outputURL)
// })
// }
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)
}
}
}
}
}
private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
private func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor),
at: kCMTimeZero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: kCMTimeZero)
}
return instruction
}
}

Adding the following code achieved my goal:
let compositionAudioVideo: AVMutableCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!
let audioMix: AVMutableAudioMix = AVMutableAudioMix()
var audioMixParam: [AVMutableAudioMixInputParameters] = []
let assetVideoTrack: AVAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.audio)[0]
let videoParam: AVMutableAudioMixInputParameters = AVMutableAudioMixInputParameters(track: assetVideoTrack)
videoParam.trackID = compositionAudioVideo.trackID
audioMixParam.append(videoParam)
do {
try compositionAudioVideo.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: assetVideoTrack, at: kCMTimeZero)
} catch _ {
assertionFailure()
}
audioMix.inputParameters = audioMixParam

Related

Merging video with original orientation

I have a link I can DM for a minimum working example!
Recording Videos
For recording, the AVCaptureConnection for an AVCaptureSession, I set isVideoMirrored to true when using the front camera and false when using the back camera. All in portrait orientation.
Saving Videos
When I save videos, I perform an AVAssetExportSession. If I used the front camera, I want to maintain the isVideoMirrored = true, so I create an AVMutableComposition to set the AVAsset video track's preferredTransform to CGAffineTransform(scaleX: -1.0, y: 1.0).rotated(by: CGFloat(Double.pi/2)). For the back camera, I export the AVAsset as outputted.
Part of my saving code:
if didCaptureWithFrontCamera {
let composition = AVMutableComposition()
let assetVideoTrack = asset.tracks(withMediaType: .video).last!
let assetAudioTrack = asset.tracks(withMediaType: .audio).last!
let compositionVideoTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
try? compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: asset.duration), of: assetVideoTrack, at: CMTime.zero)
try? compositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: asset.duration), of: assetAudioTrack, at: CMTime.zero)
compositionVideoTrack?.preferredTransform = CGAffineTransform(scaleX: -1.0, y: 1.0).rotated(by: CGFloat(Double.pi/2))
guard let exportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPreset1280x720) else {
handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = .mp4
exportSession.shouldOptimizeForNetworkUse = true
exportSession.exportAsynchronously { handler(exportSession) }
} else {
guard let exportSession = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset1280x720) else {
handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = .mp4
exportSession.shouldOptimizeForNetworkUse = true
exportSession.exportAsynchronously { handler(exportSession) }
}
Merging Videos
Later, to view the saved videos, I want to merge them together as a single video and maintain each by their original orientation via AVMutableComposition.
What partially has worked is setting the video track of AVMutableComposition to the preferredTransform property of the video track of an individual AVAsset video. The only problem is that a single orientation is applied to all the videos (i.e. mirroring isn't applied in a back camera recorded video and the same is applied to the front camera video too).
From solutions I've come across it appears I need to apply AVMutableVideoCompositionInstruction, but in trying to do so, the AVAssetExportSession doesn't seem to factor in the videoComposition instructions at all.
Any guidance would be extremely appreciated as I haven't been able to solve it for the life of me...
My attempted merge code:
func merge(videos: [AVURLAsset], for date: Date, completion: #escaping (_ url: URL, _ asset: AVAssetExportSession)->()) {
let videoComposition = AVMutableComposition()
var lastTime: CMTime = .zero
var count = 0
var instructions = [AVMutableVideoCompositionInstruction]()
let renderSize = CGSize(width: 720, height: 1280)
guard let videoCompositionTrack = videoComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
guard let audioCompositionTrack = videoComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
for video in videos {
if let videoTrack = video.tracks(withMediaType: .video)[safe: 0] {
//this is the only thing that seems to work, but work not in the way i'd hope where each video keeps its original orientation
//videoCompositionTrack.preferredTransform = videoTrack.preferredTransform
if let audioTrack = video.tracks(withMediaType: .audio)[safe: 0] {
do {
try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration), of: videoTrack, at: lastTime)
try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration), of: audioTrack, at: lastTime)
let layerInstruction = videoCompositionInstruction(videoTrack, asset: video, count: count)
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(start: lastTime, duration: video.duration)
videoCompositionInstruction.layerInstructions = [layerInstruction]
instructions.append(videoCompositionInstruction)
} catch {
return
}
lastTime = CMTimeAdd(lastTime, video.duration)
count += 1
} else {
do {
try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration), of: videoTrack, at: lastTime)
let layerInstruction = videoCompositionInstruction(videoTrack, asset: video, count: count)
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(start: lastTime, duration: video.duration)
videoCompositionInstruction.layerInstructions = [layerInstruction]
instructions.append(videoCompositionInstruction)
} catch {
return
}
lastTime = CMTimeAdd(lastTime, video.duration)
count += 1
}
}
}
let mutableVideoComposition = AVMutableVideoComposition()
mutableVideoComposition.instructions = instructions
mutableVideoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mutableVideoComposition.renderSize = renderSize
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: date)
let mergedURL = NSURL.fileURL(withPath: NSTemporaryDirectory() + "merged-\(date)" + ".mp4")
guard let exporter = AVAssetExportSession(asset: videoComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.outputURL = mergedURL
exporter.outputFileType = .mp4
exporter.videoComposition = mutableVideoComposition
exporter.shouldOptimizeForNetworkUse = true
completion(mergedURL, exporter)
}
func videoCompositionInstruction(_ firstTrack: AVAssetTrack, asset: AVAsset, count: Int) -> AVMutableVideoCompositionLayerInstruction {
let renderSize = CGSize(width: 720, height: 1280)
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let t = assetTrack.fixedPreferredTransform // new transform fix
let assetInfo = orientationFromTransform(t)
if assetInfo.isPortrait {
let scaleToFitRatio = renderSize.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var finalTransform = assetTrack.fixedPreferredTransform.concatenating(scaleFactor)
if assetInfo.orientation == .rightMirrored || assetInfo.orientation == .leftMirrored {
finalTransform = finalTransform.translatedBy(x: -t.ty, y: 0)
}
instruction.setTransform(t, at: CMTime.zero)
} else {
let renderRect = CGRect(x: 0, y: 0, width: renderSize.width, height: renderSize.height)
let videoRect = CGRect(origin: .zero, size: assetTrack.naturalSize).applying(assetTrack.fixedPreferredTransform)
let scale = renderRect.width / videoRect.width
let transform = CGAffineTransform(scaleX: renderRect.width / videoRect.width, y: (videoRect.height * scale) / assetTrack.naturalSize.height)
let translate = CGAffineTransform(translationX: .zero, y: ((renderSize.height - (videoRect.height * scale))) / 2)
instruction.setTransform(assetTrack.fixedPreferredTransform.concatenating(transform).concatenating(translate), at: .zero)
}
if count == 0 {
instruction.setOpacity(0.0, at: asset.duration)
}
return instruction
}
func orientationFromTransform(_ transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == 1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .rightMirrored
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .leftMirrored
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
extension AVAssetTrack {
var fixedPreferredTransform: CGAffineTransform {
var t = preferredTransform
switch(t.a, t.b, t.c, t.d) {
case (1, 0, 0, 1):
t.tx = 0
t.ty = 0
case (1, 0, 0, -1):
t.tx = 0
t.ty = naturalSize.height
case (-1, 0, 0, 1):
t.tx = naturalSize.width
t.ty = 0
case (-1, 0, 0, -1):
t.tx = naturalSize.width
t.ty = naturalSize.height
case (0, -1, 1, 0):
t.tx = 0
t.ty = naturalSize.width
case (0, 1, -1, 0):
t.tx = naturalSize.height
t.ty = 0
case (0, 1, 1, 0):
t.tx = 0
t.ty = 0
case (0, -1, -1, 0):
t.tx = naturalSize.height
t.ty = naturalSize.width
default:
break
}
return t
}
}
Assuming your transformations are correct, I updated your merge function.
The main change is using a single AVMutableVideoCompositionInstruction with multiple AVMutableVideoCompositionLayerInstruction, and passing the correct CMTime value to for the layer instruction to be executed at.
func merge(videos: [AVURLAsset],
for date: Date,
completion: #escaping (_ url: URL, _ asset: AVAssetExportSession)->()) {
let videoComposition = AVMutableComposition()
guard let videoCompositionTrack = videoComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let audioCompositionTrack = videoComposition.addMutableTrack(withMediaType: .audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
else { return }
var lastTime: CMTime = .zero
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
for video in videos {
guard let videoTrack = video.tracks(withMediaType: .video)[safe: 0] else { return }
// add audio track if available
if let audioTrack = video.tracks(withMediaType: .audio)[safe: 0] {
do {
try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration),
of: audioTrack,
at: lastTime)
} catch {
return
}
}
// add video track
do {
try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration),
of: videoTrack,
at: lastTime)
let layerInstruction = makeVideoCompositionInstruction(videoTrack,
asset: video,
atTime: lastTime)
layerInstructions.append(layerInstruction)
} catch {
return
}
lastTime = CMTimeAdd(lastTime, video.duration)
} // end for..in videos
let renderSize = CGSize(width: 720, height: 1280)
let videoInstruction = AVMutableVideoCompositionInstruction()
videoInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: lastTime)
videoInstruction.layerInstructions = layerInstructions
let mutableVideoComposition = AVMutableVideoComposition()
mutableVideoComposition.instructions = [videoInstruction]
mutableVideoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mutableVideoComposition.renderSize = renderSize
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: date)
let mergedURL = NSURL.fileURL(withPath: NSTemporaryDirectory() + "merged-\(date)" + ".mp4")
guard let exporter = AVAssetExportSession(asset: videoComposition,
presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.outputURL = mergedURL
exporter.outputFileType = .mp4
exporter.videoComposition = mutableVideoComposition
exporter.shouldOptimizeForNetworkUse = true
completion(mergedURL, exporter)
}
func makeVideoCompositionInstruction(_ videoTrack: AVAssetTrack,
asset: AVAsset,
atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {
let renderSize = CGSize(width: 720, height: 1280)
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let t = assetTrack.fixedPreferredTransform // new transform fix
let assetInfo = orientationFromTransform(t)
if assetInfo.isPortrait {
let scaleToFitRatio = renderSize.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var finalTransform = assetTrack.fixedPreferredTransform.concatenating(scaleFactor)
if assetInfo.orientation == .rightMirrored || assetInfo.orientation == .leftMirrored {
finalTransform = finalTransform.translatedBy(x: -t.ty, y: 0)
}
instruction.setTransform(t, at: atTime)
} else {
let renderRect = CGRect(x: 0, y: 0, width: renderSize.width, height: renderSize.height)
let videoRect = CGRect(origin: .zero, size: assetTrack.naturalSize).applying(assetTrack.fixedPreferredTransform)
let scale = renderRect.width / videoRect.width
let transform = CGAffineTransform(scaleX: renderRect.width / videoRect.width,
y: (videoRect.height * scale) / assetTrack.naturalSize.height)
let translate = CGAffineTransform(translationX: .zero,
y: ((renderSize.height - (videoRect.height * scale))) / 2)
instruction.setTransform(assetTrack.fixedPreferredTransform.concatenating(transform).concatenating(translate),
at: atTime)
}
// if atTime = 0, we can assume this is the first track being added
if atTime == .zero {
instruction.setOpacity(0.0,
at: asset.duration)
}
return instruction
}

AVAssetExportSession AVFoundationErrorDomain Code -11800 The operation could not be completed, NSOSStatusErrorDomain Code=-12780 "(null) in Swift iOS

I am developing a Video based Application in Swift. Where I am exporting a Video clip with Watermark logo and Fade In Out effect. Here is my code:
func watermark(video videoAsset:AVAsset, videoModal:VideoModel, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : PDWatermarkPosition, withMode mode: SpeedoVideoMode, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
let servicemodel = ServiceModel()
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).sync {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
if videoAsset.tracks(withMediaType: AVMediaTypeVideo).count == 0
{
completion!(nil, nil, nil)
return
}
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
self.addAudioTrack(composition: mixComposition, videoAsset: videoAsset as! AVURLAsset, withMode: mode, videoModal:videoModal)
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize //CGSize(width: 375, height: 300)
//to add Watermark
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
//videoLayer.backgroundColor = UIColor.red.cgColor
parentLayer.addSublayer(videoLayer)
if name != nil {
let watermarkImage = UIImage(named: name)
let imageLayer = CALayer()
//imageLayer.backgroundColor = UIColor.purple.cgColor
imageLayer.contents = watermarkImage?.cgImage
var xPosition : CGFloat = 0.0
var yPosition : CGFloat = 0.0
let imageSize : CGFloat = 150
switch (position) {
case .TopLeft:
xPosition = 0
yPosition = 0
break
case .TopRight:
xPosition = videoSize.width - imageSize - 100
yPosition = 80
break
case .BottomLeft:
xPosition = 0
yPosition = videoSize.height - imageSize
break
case .BottomRight, .Default:
xPosition = videoSize.width - imageSize
yPosition = videoSize.height - imageSize
break
}
imageLayer.frame = CGRect(x: xPosition, y: yPosition, width: imageSize, height: imageSize)
imageLayer.opacity = 0.75
parentLayer.addSublayer(imageLayer)
if text != nil {
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 20
titleLayer.alignmentMode = kCAAlignmentRight
titleLayer.frame = CGRect(x: 0, y: yPosition - imageSize, width: videoSize.width - imageSize/2 - 4, height: 57)
titleLayer.foregroundColor = UIColor.lightGray.cgColor
parentLayer.addSublayer(titleLayer)
}
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
//Add Fade In Out effects
let startTime = CMTime(seconds: Double(0), preferredTimescale: 1000)
let endTime = CMTime(seconds: Double(1), preferredTimescale: 1000)
let timeRange = CMTimeRange(start: startTime, end: endTime)
layerInstruction.setOpacityRamp(fromStartOpacity: 0.1, toEndOpacity: 1.0, timeRange: timeRange)
let startTime1 = CMTime(seconds: videoAsset.duration.seconds-1, preferredTimescale: 1000)
let endTime1 = CMTime(seconds: videoAsset.duration.seconds, preferredTimescale: 1000)
let timeRange1 = CMTimeRange(start: startTime1, end: endTime1)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.1, timeRange: timeRange1)
arrayLayerInstructions.append(layerInstruction)
instruction.layerInstructions = arrayLayerInstructions
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("\(videoModal.fileID).mov")
let filePath = url.path
let fileManager = FileManager.default
do {
if fileManager.fileExists(atPath: filePath) {
print("FILE AVAILABLE")
try fileManager.removeItem(atPath:filePath)
} else {
print("FILE NOT AVAILABLE")
}
} catch _ {
}
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
let timeRangetoTrim = CMTimeRange(start: CMTime(seconds: Double(videoModal.leftRangeValue), preferredTimescale: 1000),
end: CMTime(seconds: Double(videoModal.rightRangeValue), preferredTimescale: 1000))
exporter?.timeRange = timeRangetoTrim
exporter?.shouldOptimizeForNetworkUse = false
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter?.outputURL
if flag {
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)// Getting error here
}
}
}
}
}
func addAudioTrack(composition: AVMutableComposition, videoAsset: AVURLAsset, withMode mode: SpeedoVideoMode, videoModal:VideoFileModel) {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
let audioTracks = videoAsset.tracks(withMediaType: AVMediaTypeAudio)
for audioTrack in audioTracks {
try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
}
}
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var scaleToFitRatio = UIScreen.main.bounds.width / 375
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor),
at: kCMTimeZero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(CGAffineTransform(translationX: 0, y: 0))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = 375 + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: CGFloat(yFix))
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: kCMTimeZero)
}
return instruction
}
private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
My code is working fine for some of the videos and sometimes it's not working for some videos too. I am getting below error due to AVAssetExportSessionStatus failed :
Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could
not be completed" UserInfo={NSLocalizedFailureReason=An unknown error
occurred (-12780), NSLocalizedDescription=The operation could not be
completed, NSUnderlyingError=0x28262c240 {Error
Domain=NSOSStatusErrorDomain Code=-12780 "(null)"}}
Can anyone help me on this? Thank you in advance.
This method func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction { is wrong because you need to provide and AVAssetTrack which has the actual videos.
But instead of that, you are passing AVCompositionTrack which is still need to be composed, so replace your method with this func videoCompositionInstructionForTrack(track: AVAssetTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {.
Now when you are calling the actual method, you need to pass clipVideoTrack, i.e., let layerInstruction = self.videoCompositionInstructionForTrack(track: clipVideoTrack, asset: videoAsset).
let me know, if you are still facing the error!

How to merge mp4 and MOV type videos using AVMutableComposition iOS Swift?

I am using AVMutableComposition for merging videos, it's working fine when two videos are of same types like to mp4 videos or two mov videos.
But if I try to merge one video of mp4 and one video of mov then merged video stopped after completion of the first video.
you can use this Code
private func mergeAssets(arrayAssets:[AVAsset],audioAsset:AVAsset,completionHandler:#escaping MergedVideoCompletionHandler){
var insertTime = kCMTimeZero
let animation = true
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayAssets {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
}
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
let audioSoundTrack = audioAsset.tracks(withMediaType: AVMediaType.audio).first
// Init composition
let mixComposition = AVMutableComposition.init()
for videoAsset in arrayAssets {
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
// Get audio track
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = kCMTimeZero
let duration = videoAsset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioSoundTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = videoAsset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
let appError = getAppError(error, message: "Failed to loadvideoTrack")
completionHandler(nil,appError)
}
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// //4 - Get path
// let fileName = IGMediaUtil.createNewFile(fileName: "mergeVideo", fileExtension: "mp4")
// guard let docSubFolder = IGMediaUtil.createFolderInDoc(folderName: Constants.kMergedVideosFolder) else{
// return
// }
// let mergingURL = docSubFolder.appendingPathComponent(fileName)
// // Remove file if existed
// FileManager.default.removeItemIfExisted(mergingURL)
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL)
}
})
}
fileprivate func exportDidFinish(exporter:AVAssetExportSession?, videoURL:URL) {
var progressValue:Float = 0
if let status = exporter?.status {
switch status{
case .exporting:
progressValue = (exporter?.progress)!
case .failed, .cancelled,.unknown:
progressValue = 1.0
let appError = self.getAppError(exporter?.error,message:"Failed to create Data")
print( "localizedDescription ::::::AVExport ********** \(exporter?.error?.localizedDescription)" ?? "No Error")
print(exporter?.error.debugDescription)
if let exportBlock = self.callback{
exportBlock(nil ,appError)
}
case .waiting:
break
case .completed:
progressValue = 1.0;
print("Exported file: \(videoURL.absoluteString)")
if let exportBlock = self.callback{
exportBlock(videoURL ,nil)
}
}
if let progressBlock = self.progressCallback{
DispatchQueue.main.async {
progressBlock(progressValue)
}
}
}
}
}
extension MediaAudioMergerServiceManager{
fileprivate func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
fileprivate func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset, standardSize:CGSize, atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var aspectFillRatio:CGFloat = 1
if assetTrack.naturalSize.height < assetTrack.naturalSize.width {
aspectFillRatio = standardSize.height / assetTrack.naturalSize.height
}
else {
aspectFillRatio = standardSize.width / assetTrack.naturalSize.width
}
if assetInfo.isPortrait {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor), at: atTime)
} else {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor)
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
concat = fixUpsideDown.concatenating(scaleFactor).concatenating(moveFactor)
}
instruction.setTransform(concat, at: atTime)
}
return instruction
}
fileprivate func exportDidFinish(exporter:AVAssetExportSession?, videoURL:URL) {
var progressValue:Float = 0
if let status = exporter?.status {
switch status{
case .exporting:
progressValue = (exporter?.progress)!
case .failed, .cancelled,.unknown:
progressValue = 1.0
let appError = self.getAppError(exporter?.error,message:"Failed to create Data")
print( "localizedDescription ::::::AVExport ********** \(exporter?.error?.localizedDescription)" ?? "No Error")
print(exporter?.error.debugDescription)
if let exportBlock = self.callback{
exportBlock(nil ,appError)
}
case .waiting:
break
case .completed:
progressValue = 1.0;
print("Exported file: \(videoURL.absoluteString)")
if let exportBlock = self.callback{
exportBlock(videoURL ,nil)
}
}
if let progressBlock = self.progressCallback{
DispatchQueue.main.async {
progressBlock(progressValue)
}
}
}
}
}

Swift 3 : How to export video with text using AVVideoComposition

I am trying to useAVVideoComposition to add some text on top of a video and save the video.
This is the code I use:
I Create an AVMutableComposition and AVVideoComposition
var mutableComp = AVMutableComposition()
var mutableVidComp = AVMutableVideoComposition()
var compositionSize : CGSize?
func configureAsset(){
let options = [AVURLAssetPreferPreciseDurationAndTimingKey : "true"]
let videoAsset = AVURLAsset(url: Bundle.main.url(forResource: "Car", withExtension: "mp4")! , options : options)
let videoAssetSourceTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo).first! as AVAssetTrack
compositionSize = videoAssetSourceTrack.naturalSize
let mutableVidTrack = mutableComp.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
let trackRange = CMTimeRangeMake(kCMTimeZero, videoAsset.duration)
do {
try mutableVidTrack.insertTimeRange( trackRange, of: videoAssetSourceTrack, at: kCMTimeZero)
mutableVidTrack.preferredTransform = videoAssetSourceTrack.preferredTransform
}catch { print(error) }
snapshot = mutableComp
mutableVidComp = AVMutableVideoComposition(propertiesOf: videoAsset)
}
II Setup the layers
func applyVideoEffectsToComposition() {
// 1 - Set up the text layer
let subTitle1Text = CATextLayer()
subTitle1Text.font = "Helvetica-Bold" as CFTypeRef
subTitle1Text.frame = CGRect(x: self.view.frame.midX - 60 , y: self.view.frame.midY - 50, width: 120, height: 100)
subTitle1Text.string = "Bench"
subTitle1Text.foregroundColor = UIColor.black.cgColor
subTitle1Text.alignmentMode = kCAAlignmentCenter
// 2 - The usual overlay
let overlayLayer = CALayer()
overlayLayer.addSublayer(subTitle1Text)
overlayLayer.frame = CGRect(x: 0, y: 0, width: compositionSize!.width, height: compositionSize!.height)
overlayLayer.masksToBounds = true
// 3 - set up the parent layer
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: compositionSize!.width, height: compositionSize!.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: compositionSize!.width, height: compositionSize!.height)
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(overlayLayer)
mutableVidComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
}
III . Save video with AVMutbaleVideoComposition
func saveAsset (){
func deleteFile(_ filePath:URL) {
guard FileManager.default.fileExists(atPath: filePath.path) else { return }
do {
try FileManager.default.removeItem(atPath: filePath.path) }
catch {fatalError("Unable to delete file: \(error) : \(#function).")} }
let documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] as URL
let filePath = documentsDirectory.appendingPathComponent("rendered-audio.mp4")
deleteFile(filePath)
if let exportSession = AVAssetExportSession(asset: mutableComp , presetName: AVAssetExportPresetHighestQuality){
exportSession.videoComposition = mutableVidComp
// exportSession.canPerformMultiplePassesOverSourceMediaData = true
exportSession.outputURL = filePath
exportSession.shouldOptimizeForNetworkUse = true
exportSession.timeRange = CMTimeRangeMake(kCMTimeZero, mutableComp.duration)
exportSession.outputFileType = AVFileTypeQuickTimeMovie
exportSession.exportAsynchronously {
print("finished: \(filePath) : \(exportSession.status.rawValue) ")
if exportSession.status.rawValue == 4 {
print("Export failed -> Reason: \(exportSession.error!.localizedDescription))")
print(exportSession.error!)
}
}
}
}
Then I run all three methods in the viewDidLoad method for a quick test. The problem is that when I run the app ,the result of the export is the original video without the title on it.
What am I missing here?
UPDATE
I notice that adding a subTitle1Text.backgroundColor property in
part II of the code makes a colored CGRect corresponding to subTitle1Text.frame appear on top of the video when exported.
(See Image)
When this code is modified for playback using AVSynchronizedLayer the desired layer can be seen on top of the video with text on it.
So perhaps this is a bug in AVFoundation itself.
I suppose I am only left with the option of using a customVideoCompositorClass. The problem with that is that it takes a lot of time to render the video . Here is an example that uses AVVideoCompositing
Here is full working code which I used in my project. It will show CATextLayer at bottom (0,0). And in export session finish it will replace new path in player item. I used one model from Objective C code to get orientation. Please do testing in device. AVPLayer will not show text layer properly in simulator.
let composition = AVMutableComposition.init()
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTimeMake(1, 30)
videoComposition.renderScale = 1.0
let compositionCommentaryTrack: AVMutableCompositionTrack? = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let compositionVideoTrack: AVMutableCompositionTrack? = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
let clipVideoTrack:AVAssetTrack = self.currentAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
let audioTrack: AVAssetTrack? = self.currentAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
try? compositionCommentaryTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, self.currentAsset.duration), of: audioTrack!, at: kCMTimeZero)
try? compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, self.currentAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
let orientation = VideoModel.videoOrientation(self.currentAsset)
var isPortrait = false
switch orientation {
case .landscapeRight:
isPortrait = false
case .landscapeLeft:
isPortrait = false
case .portrait:
isPortrait = true
case .portraitUpsideDown:
isPortrait = true
}
var naturalSize = clipVideoTrack.naturalSize
if isPortrait
{
naturalSize = CGSize.init(width: naturalSize.height, height: naturalSize.width)
}
videoComposition.renderSize = naturalSize
let scale = CGFloat(1.0)
var transform = CGAffineTransform.init(scaleX: CGFloat(scale), y: CGFloat(scale))
switch orientation {
case .landscapeRight: break
// isPortrait = false
case .landscapeLeft:
transform = transform.translatedBy(x: naturalSize.width, y: naturalSize.height)
transform = transform.rotated(by: .pi)
case .portrait:
transform = transform.translatedBy(x: naturalSize.width, y: 0)
transform = transform.rotated(by: CGFloat(M_PI_2))
case .portraitUpsideDown:break
}
let frontLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack!)
frontLayerInstruction.setTransform(transform, at: kCMTimeZero)
let MainInstruction = AVMutableVideoCompositionInstruction()
MainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
MainInstruction.layerInstructions = [frontLayerInstruction]
videoComposition.instructions = [MainInstruction]
let parentLayer = CALayer.init()
parentLayer.frame = CGRect.init(x: 0, y: 0, width: naturalSize.width, height: naturalSize.height)
let videoLayer = CALayer.init()
videoLayer.frame = parentLayer.frame
let layer = CATextLayer()
layer.string = "HELLO ALL"
layer.foregroundColor = UIColor.white.cgColor
layer.backgroundColor = UIColor.orange.cgColor
layer.fontSize = 32
layer.frame = CGRect.init(x: 0, y: 0, width: 300, height: 100)
var rct = layer.frame;
let widthScale = self.playerView.frame.size.width/naturalSize.width
rct.size.width /= widthScale
rct.size.height /= widthScale
rct.origin.x /= widthScale
rct.origin.y /= widthScale
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(layer)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool.init(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let videoPath = documentsPath+"/cropEditVideo.mov"
let fileManager = FileManager.default
if fileManager.fileExists(atPath: videoPath)
{
try! fileManager.removeItem(atPath: videoPath)
}
print("video path \(videoPath)")
var exportSession = AVAssetExportSession.init(asset: composition, presetName: AVAssetExportPresetHighestQuality)
exportSession?.videoComposition = videoComposition
exportSession?.outputFileType = AVFileTypeQuickTimeMovie
exportSession?.outputURL = URL.init(fileURLWithPath: videoPath)
exportSession?.videoComposition = videoComposition
var exportProgress: Float = 0
let queue = DispatchQueue(label: "Export Progress Queue")
queue.async(execute: {() -> Void in
while exportSession != nil {
// int prevProgress = exportProgress;
exportProgress = (exportSession?.progress)!
print("current progress == \(exportProgress)")
sleep(1)
}
})
exportSession?.exportAsynchronously(completionHandler: {
if exportSession?.status == AVAssetExportSessionStatus.failed
{
print("Failed \(exportSession?.error)")
}else if exportSession?.status == AVAssetExportSessionStatus.completed
{
exportSession = nil
let asset = AVAsset.init(url: URL.init(fileURLWithPath: videoPath))
DispatchQueue.main.async {
let item = AVPlayerItem.init(asset: asset)
self.player.replaceCurrentItem(with: item)
let assetDuration = CMTimeGetSeconds(composition.duration)
self.progressSlider.maximumValue = Float(assetDuration)
self.syncLayer.removeFromSuperlayer()
self.lblIntro.isHidden = true
self.player.play()
// let url = URL.init(fileURLWithPath: videoPath)
// let activityVC = UIActivityViewController(activityItems: [url], applicationActivities: [])
// self.present(activityVC, animated: true, completion: nil)
}
}
})
Below is code of My VideoModel class
-(AVCaptureVideoOrientation)videoOrientation:(AVAsset *)asset
{
AVCaptureVideoOrientation result = 0;
NSArray *tracks = [asset tracksWithMediaType:AVMediaTypeVideo];
if([tracks count] > 0) {
AVAssetTrack *videoTrack = [tracks objectAtIndex:0];
CGAffineTransform t = videoTrack.preferredTransform;
// Portrait
if(t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0)
{
result = AVCaptureVideoOrientationPortrait;
}
// PortraitUpsideDown
if(t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0) {
result = AVCaptureVideoOrientationPortraitUpsideDown;
}
// LandscapeRight
if(t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0)
{
result = AVCaptureVideoOrientationLandscapeRight;
}
// LandscapeLeft
if(t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0)
{
result = AVCaptureVideoOrientationLandscapeLeft;
}
}
return result;
}
Let me know if you need any more help in this.

How can I add a watermark in a captured video on iOS - Swift Programming

Can someone please help me in adding watermark to the recorded video using Swift programming language. For reference I'm working on AVFoundation framework.
Below are two requirements,
Watermark using UILabel as text
Watermark using UIImage
Thanks in advance.
I have found the solution and updating the answer given by #m177312 Check link
import UIKit
import AssetsLibrary
import AVFoundation
enum QUWatermarkPosition {
case TopLeft
case TopRight
case BottomLeft
case BottomRight
case Default
}
class QUWatermarkManager: NSObject {
func watermark(video videoAsset:AVAsset, watermarkText text : String, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((status : AVAssetExportSessionStatus!, session: AVAssetExportSession!, outputURL : NSURL!) -> ())?) {
self.watermark(video: videoAsset, watermarkText: text, imageName: nil, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status: status, session: session, outputURL: outputURL)
}
}
func watermark(video videoAsset:AVAsset, imageName name : String, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((status : AVAssetExportSessionStatus!, session: AVAssetExportSession!, outputURL : NSURL!) -> ())?) {
self.watermark(video: videoAsset, watermarkText: nil, imageName: name, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status: status, session: session, outputURL: outputURL)
}
}
private func watermark(video videoAsset:AVAsset, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((status : AVAssetExportSessionStatus!, session: AVAssetExportSession!, outputURL : NSURL!) -> ())?) {
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), { () -> Void in
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
var mixComposition = AVMutableComposition()
// 2 - Create video tracks
var compositionVideoTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
var clipVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0] as! AVAssetTrack
compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: clipVideoTrack, atTime: kCMTimeZero, error: nil)
clipVideoTrack.preferredTransform
// Video size
let videoSize = clipVideoTrack.naturalSize
// sorts the layer in proper order and add title layer
var parentLayer = CALayer()
var videoLayer = CALayer()
parentLayer.frame = CGRectMake(0, 0, videoSize.width, videoSize.height)
videoLayer.frame = CGRectMake(0, 0, videoSize.width, videoSize.height)
parentLayer.addSublayer(videoLayer)
if text != nil {
// Adding watermark text
var titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.redColor().CGColor
titleLayer.string = text
titleLayer.font = "Helvetica"
titleLayer.fontSize = 15
titleLayer.alignmentMode = kCAAlignmentCenter
titleLayer.bounds = CGRectMake(0, 0, videoSize.width, videoSize.height)
parentLayer.addSublayer(titleLayer)
println("\(videoSize.width)")
println("\(videoSize.height)")
} else if name != nil {
// Adding image
var watermarkImage = UIImage(named: name)
var imageLayer = CALayer()
imageLayer.contents = watermarkImage?.CGImage
var xPosition : CGFloat = 0.0
var yPosition : CGFloat = 0.0
let imageSize : CGFloat = 57.0
switch (position) {
case .TopLeft:
xPosition = 0
yPosition = 0
break
case .TopRight:
xPosition = videoSize.width - imageSize
yPosition = 0
break
case .BottomLeft:
xPosition = 0
yPosition = videoSize.height - imageSize
break
case .BottomRight, .Default:
xPosition = videoSize.width - imageSize
yPosition = videoSize.height - imageSize
break
default:
break
}
println("\(xPosition)")
println("\(yPosition)")
imageLayer.frame = CGRectMake(xPosition, yPosition, imageSize, imageSize)
imageLayer.opacity = 0.65
parentLayer.addSublayer(imageLayer)
}
var videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, inLayer: parentLayer)
/// instruction
var instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
var videoTrack = mixComposition.tracksWithMediaType(AVMediaTypeVideo)[0] as! AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(compositionVideoTrack, asset: videoAsset)
//var layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
// 4 - Get path
let documentDirectory = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0] as! String
var dateFormatter = NSDateFormatter()
dateFormatter.dateStyle = .LongStyle
dateFormatter.timeStyle = .ShortStyle
let date = dateFormatter.stringFromDate(NSDate())
let savePath = documentDirectory.stringByAppendingPathComponent("watermarkVideo-\(date).mov")
let url = NSURL(fileURLWithPath: savePath)
// 5 - Create Exporter
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter.outputURL = url
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = videoComp
// 6 - Perform the Export
exporter.exportAsynchronouslyWithCompletionHandler() {
dispatch_async(dispatch_get_main_queue(), { () -> Void in
if exporter.status == AVAssetExportSessionStatus.Completed {
let outputURL = exporter.outputURL
if flag {
// Save to library
let library = ALAssetsLibrary()
if library.videoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL) {
library.writeVideoAtPathToSavedPhotosAlbum(outputURL,
completionBlock: { (assetURL:NSURL!, error:NSError!) -> Void in
completion!(status: AVAssetExportSessionStatus.Completed, session: exporter, outputURL: outputURL)
})
}
} else {
// Dont svae to library
completion!(status: AVAssetExportSessionStatus.Completed, session: exporter, outputURL: outputURL)
}
} else {
// Error
completion!(status: exporter.status, session: exporter, outputURL: nil)
}
})
}
})
}
private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.Up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .Right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .Left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .Up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .Down
}
return (assetOrientation, isPortrait)
}
private func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracksWithMediaType(AVMediaTypeVideo)[0] as! AVAssetTrack
var transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = UIScreen.mainScreen().bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.mainScreen().bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransformMakeScale(scaleToFitRatio, scaleToFitRatio)
instruction.setTransform(CGAffineTransformConcat(assetTrack.preferredTransform, scaleFactor),
atTime: kCMTimeZero)
} else {
let scaleFactor = CGAffineTransformMakeScale(scaleToFitRatio, scaleToFitRatio)
var concat = CGAffineTransformConcat(CGAffineTransformConcat(assetTrack.preferredTransform, scaleFactor), CGAffineTransformMakeTranslation(0, UIScreen.mainScreen().bounds.width / 2))
if assetInfo.orientation == .Down {
let fixUpsideDown = CGAffineTransformMakeRotation(CGFloat(M_PI))
let windowBounds = UIScreen.mainScreen().bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransformMakeTranslation(assetTrack.naturalSize.width, yFix)
concat = CGAffineTransformConcat(CGAffineTransformConcat(fixUpsideDown, centerFix), scaleFactor)
}
instruction.setTransform(concat, atTime: kCMTimeZero)
}
return instruction
}
}
Thanks to many people who have helped me on StackOverflow. I have taken many references from stack overflow if i did not tag anyone whose answer is here, i apologies to them. And Thank you for helping me out. Hope this answer will be helpful to everyone.
The answer of #Ankit-Jain in
Swift 3:
import UIKit
import AssetsLibrary
import AVFoundation
import Photos
enum QUWatermarkPosition {
case TopLeft
case TopRight
case BottomLeft
case BottomRight
case Default
}
class QUWatermarkManager: NSObject {
func watermark(video videoAsset:AVAsset, watermarkText text : String, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
self.watermark(video: videoAsset, watermarkText: text, imageName: nil, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status, session, outputURL)
}
}
func watermark(video videoAsset:AVAsset, imageName name : String, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
self.watermark(video: videoAsset, watermarkText: nil, imageName: name, saveToLibrary: flag, watermarkPosition: position) { (status, session, outputURL) -> () in
completion!(status, session, outputURL)
}
}
private func watermark(video videoAsset:AVAsset, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(videoLayer)
if text != nil {
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.red.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 15
titleLayer.alignmentMode = kCAAlignmentCenter
titleLayer.bounds = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(titleLayer)
} else if name != nil {
let watermarkImage = UIImage(named: name)
let imageLayer = CALayer()
imageLayer.contents = watermarkImage?.cgImage
var xPosition : CGFloat = 0.0
var yPosition : CGFloat = 0.0
let imageSize : CGFloat = 57.0
switch (position) {
case .TopLeft:
xPosition = 0
yPosition = 0
break
case .TopRight:
xPosition = videoSize.width - imageSize
yPosition = 0
break
case .BottomLeft:
xPosition = 0
yPosition = videoSize.height - imageSize
break
case .BottomRight, .Default:
xPosition = videoSize.width - imageSize
yPosition = videoSize.height - imageSize
break
}
imageLayer.frame = CGRect(x: xPosition, y: yPosition, width: imageSize, height: imageSize)
imageLayer.opacity = 0.65
parentLayer.addSublayer(imageLayer)
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mov")
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter?.outputURL
if flag {
// Save to library
// let library = ALAssetsLibrary()
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
// if library.videoAtPathIs(compatibleWithSavedPhotosAlbum: outputURL) {
// library.writeVideoAtPathToSavedPhotosAlbum(outputURL,
// completionBlock: { (assetURL:NSURL!, error:NSError!) -> Void in
//
// completion!(AVAssetExportSessionStatus.Completed, exporter, outputURL)
// })
// }
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)
}
}
}
}
}
private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
private func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor),
at: kCMTimeZero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: kCMTimeZero)
}
return instruction
}
}

Resources