AVAssetExportSession export takes a lot of time - ios

My goal is to let user select video from photos and then let him to add labels over it.
Here is what I've got:
let audioAsset = AVURLAsset(url: selectedVideoURL)
let videoAsset = AVURLAsset(url: selectedVideoURL)
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
let clipAudioTrack = audioAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, audioAsset.duration), of: clipAudioTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = clipVideoTrack.preferredTransform
} catch {
print(error)
}
var videoSize = clipVideoTrack.naturalSize
if isVideoPortrait(asset: videoAsset) {
videoSize = CGSize(width: videoSize.height, height: videoSize.width)
}
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(videoLayer)
// adding label
let helloLabelLayer = CATextLayer()
helloLabelLayer.string = "Hello"
helloLabelLayer.font = "Signika-Semibold" as CFTypeRef?
helloLabelLayer.fontSize = 30.0
helloLabelLayer.contentsScale = mainScreen.scale
helloLabelLayer.alignmentMode = kCAAlignmentNatural
helloLabelLayer.frame = CGRect(x: 0.0, y: 0.0, width: 100.0, height: 50.0)
parentLayer.addSublayer(helloLabelLayer)
// creating composition
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
let layerInstruction = videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
if let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset640x480) {
let filename = NSTemporaryDirectory().appending("video.mov")
if FileManager.default.fileExists(atPath: filename) {
do {
try FileManager.default.removeItem(atPath: filename)
} catch {
print(error)
}
}
let url = URL(fileURLWithPath: filename)
assetExport.outputURL = url
assetExport.outputFileType = AVFileTypeMPEG4
assetExport.videoComposition = videoComp
print(NSDate().timeIntervalSince1970)
assetExport.exportAsynchronously {
print(NSDate().timeIntervalSince1970)
let library = ALAssetsLibrary()
library.writeVideoAtPath(toSavedPhotosAlbum: url, completionBlock: {
(url, error) in
switch assetExport.status {
case AVAssetExportSessionStatus.failed:
p("failed \(assetExport.error)")
case AVAssetExportSessionStatus.cancelled:
p("cancelled \(assetExport.error)")
default:
p("complete")
p(NSDate().timeIntervalSince1970)
if FileManager.default.fileExists(atPath: filename) {
do {
try FileManager.default.removeItem(atPath: filename)
} catch {
p(error)
}
}
print("Exported")
}
})
}
Implementation of isVideoPortrait function:
func isVideoPortrait(asset: AVAsset) -> Bool {
var isPortrait = false
let tracks = asset.tracks(withMediaType: AVMediaTypeVideo)
if tracks.count > 0 {
let videoTrack = tracks[0]
let t = videoTrack.preferredTransform
if t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0 {
isPortrait = true
}
if t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0 {
isPortrait = true
}
if t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0 {
isPortrait = false
}
if t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0 {
isPortrait = false
}
}
return isPortrait
}
And the last function for video composition layer instruction:
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
instruction.setTransform(transform, at: kCMTimeZero)
return instruction
}
The code works well, output video has label, but if I select 1 minute video, export takes 28 seconds.
I've search for it and tried to remove layerInsctuction transform, but no effect.
Tried to add:
assetExport.shouldOptimizeForNetworkUse = false
no effect either.
Also, tried to set AVAssetExportPresetPassthrough for AVAssetExportSession, in this case video exports with 1 second but labels have gone.
Any help would be appreciated, because I'm in stuck. Thanks for your time.

The only way I can think of is to reduce the quality via the bit rate and resolution.
This is done through a dictionary applied to the videoSettings of the AssetExporter, for this to work I had to use a Framework called SDAVAssetExportSession
Then by changing the videoSettings I could play with the quality to get an optimal quality / speed.
let compression = [AVVideoAverageBitRateKey : 2097152(DESIRED_BITRATE),AVVideoProfileLevelKey : AVVideoProfileLevelH264BaselineAutoLevel]
let videoSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : maxWidth, AVVideoHeightKey : maxHeight, AVVideoCompressionPropertiesKey:compression]
This was the only way I could speed things up.

This is not directly relevant to your question, but your code here is backwards:
assetExport.exportAsynchronously {
let library = ALAssetsLibrary()
library.writeVideoAtPath(toSavedPhotosAlbum: url, completionBlock: {
switch assetExport.status {
No no no. First you complete the asset export. Then you can copy again to somewhere else if that's what you want to do. So this needs to go like this:
assetExport.exportAsynchronously {
switch assetExport.status {
case .completed:
let library = ALAssetsLibrary()
library.writeVideoAtPath...
Other comments:
ALAssetsLibrary is dead. This is not the way to copy into the user's photo library. Use Photo framework.
Your original code is very odd, because there are a lot of other cases you are not testing for. You are just assuming that default means .completed. That's dangerous.

Related

Swift how to crop video view size by selected area? [duplicate]

I am recording a video in a square UIView but when I export the video is full screen 1080x1920 now I am wondering how I can reduce the video from being full screen to being square ratio 1:1...
Here is how I am setting my Video Camera up :
session = AVCaptureSession()
for device in AVCaptureDevice.devices() {
if let device = device as? AVCaptureDevice , device.position == AVCaptureDevicePosition.back {
self.device = device
}
}
for device in AVCaptureDevice.devices(withMediaType: AVMediaTypeAudio) {
let device = device as? AVCaptureDevice
let audioInput = try! AVCaptureDeviceInput(device: device)
session?.addInput(audioInput)
}
do {
if let session = session {
videoInput = try AVCaptureDeviceInput(device: device)
session.addInput(videoInput)
videoOutput = AVCaptureMovieFileOutput()
let totalSeconds = 60.0 //Total Seconds of capture time
let timeScale: Int32 = 30 //FPS
let maxDuration = CMTimeMakeWithSeconds(totalSeconds, timeScale)
videoOutput?.maxRecordedDuration = maxDuration
videoOutput?.minFreeDiskSpaceLimit = 1024 * 1024//SET MIN FREE SPACE IN BYTES FOR RECORDING TO CONTINUE ON A VOLUME
if session.canAddOutput(videoOutput) {
session.addOutput(videoOutput)
}
let videoLayer = AVCaptureVideoPreviewLayer(session: session)
videoLayer?.frame = self.videoPreview.bounds
videoLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
self.videoPreview.layer.addSublayer(videoLayer!)
session.startRunning()
I have seen several other posts but not found them very helpfull, and most of them are in Obj C...
If anyone can help me or put me in the correct direction it's much appreciated!
Firstly you need to make use of the AVCaptureFileOutputRecordingDelegate.
You specifically use the func capture( _ captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAt outputFileURL: URL!, fromConnections connections: [Any]!, error: Error! ) method to perform the cropping process, once the video has finished recording.
Here is an example of a cropping function I once implemented. You need to pass in the URL of the video that was recorded and a callback that is used to return the new URL of the cropped video once the cropping process is finished.
func cropVideo( _ outputFileUrl: URL, callback: #escaping ( _ newUrl: URL ) -> () )
{
// Get input clip
let videoAsset: AVAsset = AVAsset( url: outputFileUrl )
let clipVideoTrack = videoAsset.tracks( withMediaType: AVMediaTypeVideo ).first! as AVAssetTrack
// Make video to square
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize( width: clipVideoTrack.naturalSize.height, height: clipVideoTrack.naturalSize.height )
videoComposition.frameDuration = CMTimeMake( 1, self.framesPerSecond )
// Rotate to portrait
let transformer = AVMutableVideoCompositionLayerInstruction( assetTrack: clipVideoTrack )
let transform1 = CGAffineTransform( translationX: clipVideoTrack.naturalSize.height, y: -( clipVideoTrack.naturalSize.width - clipVideoTrack.naturalSize.height ) / 2 )
let transform2 = transform1.rotated(by: CGFloat( M_PI_2 ) )
transformer.setTransform( transform2, at: kCMTimeZero)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeMakeWithSeconds( self.intendedVideoLength, self.framesPerSecond ) )
instruction.layerInstructions = [transformer]
videoComposition.instructions = [instruction]
// Export
let croppedOutputFileUrl = URL( fileURLWithPath: FileManager.getOutputPath( String.random() ) )
let exporter = AVAssetExportSession(asset: videoAsset, presetName: AVAssetExportPresetHighestQuality)!
exporter.videoComposition = videoComposition
exporter.outputURL = croppedOutputFileUrl
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.exportAsynchronously( completionHandler: { () -> Void in
DispatchQueue.main.async(execute: {
callback( croppedOutputFileUrl )
})
})
}
Also, here is the implementation of my getOutputPath method:
func getOutputPath( _ name: String ) -> String
{
let documentPath = NSSearchPathForDirectoriesInDomains( .documentDirectory, .userDomainMask, true )[ 0 ] as NSString
let outputPath = "\(documentPath)/\(name).mov"
return outputPath
}
Hope this helps.
func cropFrame(videoAsset:AVAsset, animation:Bool) -> Void {
var insertTime = kCMTimeZero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
let defaultSize = CGSize(width: 1920, height: 1080) // Default video size
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
// Silence sound (in case of video has no sound track)
let silenceURL = Bundle.main.url(forResource: "silence", withExtension: "mp3")
let silenceAsset = AVAsset(url:silenceURL!)
let silenceSoundTrack = silenceAsset.tracks(withMediaType: AVMediaType.audio).first
// Init composition
let mixComposition = AVMutableComposition.init()
// Get audio track
var audioTrack:AVAssetTrack?
if videoAsset.tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first
}
else {
audioTrack = silenceSoundTrack
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = kCMTimeZero
let duration = videoAsset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
//let finalTimer = CMTimeAdd(CMTime(seconds: 5, preferredTimescale: videoAsset.duration.timescale), CMTime(seconds: 5, preferredTimescale: videoAsset.duration.timescale))
//Kalpesh crop video frames
if animation {
let timeScale = videoAsset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
//**********======== CROP YOUR VIDEO FRAME HERE MANUALLY ========**********
layerInstruction.setCropRectangle(CGRect(x: 0, y: 0, width: videoTrack.naturalSize.width, height: 300.0), at: startTime)
} else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
})
}
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset, standardSize:CGSize, atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var aspectFillRatio:CGFloat = 1
if assetTrack.naturalSize.height < assetTrack.naturalSize.width {
aspectFillRatio = standardSize.height / assetTrack.naturalSize.height
}
else {
aspectFillRatio = standardSize.width / assetTrack.naturalSize.width
}
if assetInfo.isPortrait {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor), at: atTime)
} else {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor)
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
concat = fixUpsideDown.concatenating(scaleFactor).concatenating(moveFactor)
}
instruction.setTransform(concat, at: atTime)
}
return instruction
}
func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
Call this way:
let avssets = AVAsset(url: VideoURL)
self.cropFrame(videoAsset: avssets, animation: true)

Merging video with original orientation

I have a link I can DM for a minimum working example!
Recording Videos
For recording, the AVCaptureConnection for an AVCaptureSession, I set isVideoMirrored to true when using the front camera and false when using the back camera. All in portrait orientation.
Saving Videos
When I save videos, I perform an AVAssetExportSession. If I used the front camera, I want to maintain the isVideoMirrored = true, so I create an AVMutableComposition to set the AVAsset video track's preferredTransform to CGAffineTransform(scaleX: -1.0, y: 1.0).rotated(by: CGFloat(Double.pi/2)). For the back camera, I export the AVAsset as outputted.
Part of my saving code:
if didCaptureWithFrontCamera {
let composition = AVMutableComposition()
let assetVideoTrack = asset.tracks(withMediaType: .video).last!
let assetAudioTrack = asset.tracks(withMediaType: .audio).last!
let compositionVideoTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
try? compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: asset.duration), of: assetVideoTrack, at: CMTime.zero)
try? compositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: asset.duration), of: assetAudioTrack, at: CMTime.zero)
compositionVideoTrack?.preferredTransform = CGAffineTransform(scaleX: -1.0, y: 1.0).rotated(by: CGFloat(Double.pi/2))
guard let exportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPreset1280x720) else {
handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = .mp4
exportSession.shouldOptimizeForNetworkUse = true
exportSession.exportAsynchronously { handler(exportSession) }
} else {
guard let exportSession = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset1280x720) else {
handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = .mp4
exportSession.shouldOptimizeForNetworkUse = true
exportSession.exportAsynchronously { handler(exportSession) }
}
Merging Videos
Later, to view the saved videos, I want to merge them together as a single video and maintain each by their original orientation via AVMutableComposition.
What partially has worked is setting the video track of AVMutableComposition to the preferredTransform property of the video track of an individual AVAsset video. The only problem is that a single orientation is applied to all the videos (i.e. mirroring isn't applied in a back camera recorded video and the same is applied to the front camera video too).
From solutions I've come across it appears I need to apply AVMutableVideoCompositionInstruction, but in trying to do so, the AVAssetExportSession doesn't seem to factor in the videoComposition instructions at all.
Any guidance would be extremely appreciated as I haven't been able to solve it for the life of me...
My attempted merge code:
func merge(videos: [AVURLAsset], for date: Date, completion: #escaping (_ url: URL, _ asset: AVAssetExportSession)->()) {
let videoComposition = AVMutableComposition()
var lastTime: CMTime = .zero
var count = 0
var instructions = [AVMutableVideoCompositionInstruction]()
let renderSize = CGSize(width: 720, height: 1280)
guard let videoCompositionTrack = videoComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
guard let audioCompositionTrack = videoComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
for video in videos {
if let videoTrack = video.tracks(withMediaType: .video)[safe: 0] {
//this is the only thing that seems to work, but work not in the way i'd hope where each video keeps its original orientation
//videoCompositionTrack.preferredTransform = videoTrack.preferredTransform
if let audioTrack = video.tracks(withMediaType: .audio)[safe: 0] {
do {
try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration), of: videoTrack, at: lastTime)
try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration), of: audioTrack, at: lastTime)
let layerInstruction = videoCompositionInstruction(videoTrack, asset: video, count: count)
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(start: lastTime, duration: video.duration)
videoCompositionInstruction.layerInstructions = [layerInstruction]
instructions.append(videoCompositionInstruction)
} catch {
return
}
lastTime = CMTimeAdd(lastTime, video.duration)
count += 1
} else {
do {
try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration), of: videoTrack, at: lastTime)
let layerInstruction = videoCompositionInstruction(videoTrack, asset: video, count: count)
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(start: lastTime, duration: video.duration)
videoCompositionInstruction.layerInstructions = [layerInstruction]
instructions.append(videoCompositionInstruction)
} catch {
return
}
lastTime = CMTimeAdd(lastTime, video.duration)
count += 1
}
}
}
let mutableVideoComposition = AVMutableVideoComposition()
mutableVideoComposition.instructions = instructions
mutableVideoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mutableVideoComposition.renderSize = renderSize
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: date)
let mergedURL = NSURL.fileURL(withPath: NSTemporaryDirectory() + "merged-\(date)" + ".mp4")
guard let exporter = AVAssetExportSession(asset: videoComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.outputURL = mergedURL
exporter.outputFileType = .mp4
exporter.videoComposition = mutableVideoComposition
exporter.shouldOptimizeForNetworkUse = true
completion(mergedURL, exporter)
}
func videoCompositionInstruction(_ firstTrack: AVAssetTrack, asset: AVAsset, count: Int) -> AVMutableVideoCompositionLayerInstruction {
let renderSize = CGSize(width: 720, height: 1280)
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let t = assetTrack.fixedPreferredTransform // new transform fix
let assetInfo = orientationFromTransform(t)
if assetInfo.isPortrait {
let scaleToFitRatio = renderSize.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var finalTransform = assetTrack.fixedPreferredTransform.concatenating(scaleFactor)
if assetInfo.orientation == .rightMirrored || assetInfo.orientation == .leftMirrored {
finalTransform = finalTransform.translatedBy(x: -t.ty, y: 0)
}
instruction.setTransform(t, at: CMTime.zero)
} else {
let renderRect = CGRect(x: 0, y: 0, width: renderSize.width, height: renderSize.height)
let videoRect = CGRect(origin: .zero, size: assetTrack.naturalSize).applying(assetTrack.fixedPreferredTransform)
let scale = renderRect.width / videoRect.width
let transform = CGAffineTransform(scaleX: renderRect.width / videoRect.width, y: (videoRect.height * scale) / assetTrack.naturalSize.height)
let translate = CGAffineTransform(translationX: .zero, y: ((renderSize.height - (videoRect.height * scale))) / 2)
instruction.setTransform(assetTrack.fixedPreferredTransform.concatenating(transform).concatenating(translate), at: .zero)
}
if count == 0 {
instruction.setOpacity(0.0, at: asset.duration)
}
return instruction
}
func orientationFromTransform(_ transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == 1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .rightMirrored
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .leftMirrored
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
extension AVAssetTrack {
var fixedPreferredTransform: CGAffineTransform {
var t = preferredTransform
switch(t.a, t.b, t.c, t.d) {
case (1, 0, 0, 1):
t.tx = 0
t.ty = 0
case (1, 0, 0, -1):
t.tx = 0
t.ty = naturalSize.height
case (-1, 0, 0, 1):
t.tx = naturalSize.width
t.ty = 0
case (-1, 0, 0, -1):
t.tx = naturalSize.width
t.ty = naturalSize.height
case (0, -1, 1, 0):
t.tx = 0
t.ty = naturalSize.width
case (0, 1, -1, 0):
t.tx = naturalSize.height
t.ty = 0
case (0, 1, 1, 0):
t.tx = 0
t.ty = 0
case (0, -1, -1, 0):
t.tx = naturalSize.height
t.ty = naturalSize.width
default:
break
}
return t
}
}
Assuming your transformations are correct, I updated your merge function.
The main change is using a single AVMutableVideoCompositionInstruction with multiple AVMutableVideoCompositionLayerInstruction, and passing the correct CMTime value to for the layer instruction to be executed at.
func merge(videos: [AVURLAsset],
for date: Date,
completion: #escaping (_ url: URL, _ asset: AVAssetExportSession)->()) {
let videoComposition = AVMutableComposition()
guard let videoCompositionTrack = videoComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let audioCompositionTrack = videoComposition.addMutableTrack(withMediaType: .audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
else { return }
var lastTime: CMTime = .zero
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
for video in videos {
guard let videoTrack = video.tracks(withMediaType: .video)[safe: 0] else { return }
// add audio track if available
if let audioTrack = video.tracks(withMediaType: .audio)[safe: 0] {
do {
try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration),
of: audioTrack,
at: lastTime)
} catch {
return
}
}
// add video track
do {
try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: video.duration),
of: videoTrack,
at: lastTime)
let layerInstruction = makeVideoCompositionInstruction(videoTrack,
asset: video,
atTime: lastTime)
layerInstructions.append(layerInstruction)
} catch {
return
}
lastTime = CMTimeAdd(lastTime, video.duration)
} // end for..in videos
let renderSize = CGSize(width: 720, height: 1280)
let videoInstruction = AVMutableVideoCompositionInstruction()
videoInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: lastTime)
videoInstruction.layerInstructions = layerInstructions
let mutableVideoComposition = AVMutableVideoComposition()
mutableVideoComposition.instructions = [videoInstruction]
mutableVideoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mutableVideoComposition.renderSize = renderSize
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: date)
let mergedURL = NSURL.fileURL(withPath: NSTemporaryDirectory() + "merged-\(date)" + ".mp4")
guard let exporter = AVAssetExportSession(asset: videoComposition,
presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.outputURL = mergedURL
exporter.outputFileType = .mp4
exporter.videoComposition = mutableVideoComposition
exporter.shouldOptimizeForNetworkUse = true
completion(mergedURL, exporter)
}
func makeVideoCompositionInstruction(_ videoTrack: AVAssetTrack,
asset: AVAsset,
atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {
let renderSize = CGSize(width: 720, height: 1280)
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let t = assetTrack.fixedPreferredTransform // new transform fix
let assetInfo = orientationFromTransform(t)
if assetInfo.isPortrait {
let scaleToFitRatio = renderSize.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var finalTransform = assetTrack.fixedPreferredTransform.concatenating(scaleFactor)
if assetInfo.orientation == .rightMirrored || assetInfo.orientation == .leftMirrored {
finalTransform = finalTransform.translatedBy(x: -t.ty, y: 0)
}
instruction.setTransform(t, at: atTime)
} else {
let renderRect = CGRect(x: 0, y: 0, width: renderSize.width, height: renderSize.height)
let videoRect = CGRect(origin: .zero, size: assetTrack.naturalSize).applying(assetTrack.fixedPreferredTransform)
let scale = renderRect.width / videoRect.width
let transform = CGAffineTransform(scaleX: renderRect.width / videoRect.width,
y: (videoRect.height * scale) / assetTrack.naturalSize.height)
let translate = CGAffineTransform(translationX: .zero,
y: ((renderSize.height - (videoRect.height * scale))) / 2)
instruction.setTransform(assetTrack.fixedPreferredTransform.concatenating(transform).concatenating(translate),
at: atTime)
}
// if atTime = 0, we can assume this is the first track being added
if atTime == .zero {
instruction.setOpacity(0.0,
at: asset.duration)
}
return instruction
}

Overlay two videos swift

I wanted to dig this question back up. I am having an issue overlaying two videos. I believe it has something to do with the transparency of the first AVMutableVideoCompositionLayerInstruction but I have played around with it extensively with no luck. Any suggestions would be greatly appreciated!:
func overlay(video firstAsset: AVURLAsset, withSecondVideo secondAsset: AVURLAsset) {
let mixComposition = AVMutableComposition()
let firstTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
let secondTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
guard let firstMediaTrack = firstAsset.tracks(withMediaType: AVMediaType.video).first else { return }
guard let secondMediaTrack = secondAsset.tracks(withMediaType: AVMediaType.video).first else { return }
do {
try firstTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAsset.duration), of: firstMediaTrack, at: kCMTimeZero)
try secondTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, secondAsset.duration), of: secondMediaTrack, at: kCMTimeZero)
} catch (let error) {
print(error)
}
self.width = max(firstMediaTrack.naturalSize.width, secondMediaTrack.naturalSize.width)
self.height = max(firstMediaTrack.naturalSize.height, secondMediaTrack.naturalSize.height)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize(width: width!, height: height!)
videoComposition.frameDuration = firstMediaTrack.minFrameDuration
let firstLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstMediaTrack)
let scale = CGAffineTransform(scaleX: 0.3, y: 0.3)
let move = CGAffineTransform(translationX: self.width! - ((self.width! * 0.3) + 10), y: 10)
firstLayerInstruction.setTransform(scale.concatenating(move), at: kCMTimeZero)
firstLayerInstruction.setOpacity(1.0, at: kCMTimeZero)
let secondlayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: secondMediaTrack)
secondlayerInstruction.setTransform((secondTrack?.preferredTransform)!, at: kCMTimeZero)
secondlayerInstruction.setOpacity(1.0, at: kCMTimeZero)
let combined = AVMutableVideoCompositionInstruction()
combined.timeRange = CMTimeRangeMake(kCMTimeZero, max(firstAsset.duration, secondAsset.duration))
combined.backgroundColor = UIColor.clear.cgColor
combined.layerInstructions = [firstLayerInstruction, secondlayerInstruction]
videoComposition.instructions = [combined]
let outputUrl = self.getPathForTempFileNamed(filename: "output.mov")
self.exportCompositedVideo(compiledVideo: mixComposition, toURL: outputUrl, withVideoComposition: videoComposition)
self.removeTempFileAtPath(path: outputUrl.absoluteString)
}
The expected result is one video with two videos overplayed inside of it. the first layer being a full screen video and the second layer being a smaller video positioned in the upper right hand corner. Oddly enough, when I use on AVMutableVideoCompositionInstruction and put both AVMutableVideoCompositionLayerInstruction's inside of its layer instructions, it works! - but the video used in the FirstMediaTrack is used for both layers? Played around with that for a while and then tried to implement the approach detailed here which has individual instructions for both layers but this approach results in just the first layer showing a full screen video with the second layer completely invisible.
Here is the code that is working for me, i based it off this tutorial. I found the key was setting the backgrounds as clear (found on this thread). It also has a scale in there as I was experimenting with making one video smaller.
import AVFoundation
import AVKit
import Photos
var myurl: URL?
func newoverlay(video firstAsset: AVURLAsset, withSecondVideo secondAsset: AVURLAsset) {
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
// 2 - Create two video tracks
guard let firstTrack = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration),
of: firstAsset.tracks(withMediaType: .video)[0],
at: CMTime.zero)
} catch {
print("Failed to load first track")
return
}
guard let secondTrack = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
do {
try secondTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: secondAsset.duration),
of: secondAsset.tracks(withMediaType: .video)[0],
at: CMTime.zero)
} catch {
print("Failed to load second track")
return
}
// 2.1
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: CMTimeAdd(firstAsset.duration, secondAsset.duration))
// 2.2
let firstInstruction = ViewController.videoCompositionInstruction(firstTrack, asset: firstAsset)
let scale = CGAffineTransform(scaleX: 0.3, y: 0.3)
let move = CGAffineTransform(translationX: 10, y: 10)
firstInstruction.setTransform(scale.concatenating(move), at: CMTime.zero)
let secondInstruction = ViewController.videoCompositionInstruction(secondTrack, asset: secondAsset)
// 2.3
mainInstruction.layerInstructions = [firstInstruction, secondInstruction]
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
let width = max(firstTrack.naturalSize.width, secondTrack.naturalSize.width)
let height = max(firstTrack.naturalSize.height, secondTrack.naturalSize.height)
mainComposition.renderSize = CGSize(width: width, height: height)
mainInstruction.backgroundColor = UIColor.clear.cgColor
// 4 - Get path
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else { return }
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = documentDirectory.appendingPathComponent("mergeVideo-\(date).mov")
// Check exists and remove old file
FileManager.default.removeItemIfExisted(url as URL)
// 5 - Create Exporter
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.outputURL = url
exporter.outputFileType = AVFileType.mov
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = mainComposition
// 6 - Perform the Export
exporter.exportAsynchronously() {
DispatchQueue.main.async {
print("Movie complete")
self.myurl = url as URL
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: url as URL)
}) { saved, error in
if saved {
print("Saved")
}
}
self.playVideo()
}
}
}
func playVideo() {
let player = AVPlayer(url: myurl!)
let playerLayer = AVPlayerLayer(player: player)
playerLayer.frame = self.view.bounds
self.view.layer.addSublayer(playerLayer)
player.play()
print("playing...")
}
static func videoCompositionInstruction(_ track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor), at: CMTime.zero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
.concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: CMTime.zero)
}
return instruction
}
static func orientationFromTransform(_ transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
}
extension FileManager {
func removeItemIfExisted(_ url:URL) -> Void {
if FileManager.default.fileExists(atPath: url.path) {
do {
try FileManager.default.removeItem(atPath: url.path)
}
catch {
print("Failed to delete file")
}
}
}
}

Swift 3 : How to export video with text using AVVideoComposition

I am trying to useAVVideoComposition to add some text on top of a video and save the video.
This is the code I use:
I Create an AVMutableComposition and AVVideoComposition
var mutableComp = AVMutableComposition()
var mutableVidComp = AVMutableVideoComposition()
var compositionSize : CGSize?
func configureAsset(){
let options = [AVURLAssetPreferPreciseDurationAndTimingKey : "true"]
let videoAsset = AVURLAsset(url: Bundle.main.url(forResource: "Car", withExtension: "mp4")! , options : options)
let videoAssetSourceTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo).first! as AVAssetTrack
compositionSize = videoAssetSourceTrack.naturalSize
let mutableVidTrack = mutableComp.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
let trackRange = CMTimeRangeMake(kCMTimeZero, videoAsset.duration)
do {
try mutableVidTrack.insertTimeRange( trackRange, of: videoAssetSourceTrack, at: kCMTimeZero)
mutableVidTrack.preferredTransform = videoAssetSourceTrack.preferredTransform
}catch { print(error) }
snapshot = mutableComp
mutableVidComp = AVMutableVideoComposition(propertiesOf: videoAsset)
}
II Setup the layers
func applyVideoEffectsToComposition() {
// 1 - Set up the text layer
let subTitle1Text = CATextLayer()
subTitle1Text.font = "Helvetica-Bold" as CFTypeRef
subTitle1Text.frame = CGRect(x: self.view.frame.midX - 60 , y: self.view.frame.midY - 50, width: 120, height: 100)
subTitle1Text.string = "Bench"
subTitle1Text.foregroundColor = UIColor.black.cgColor
subTitle1Text.alignmentMode = kCAAlignmentCenter
// 2 - The usual overlay
let overlayLayer = CALayer()
overlayLayer.addSublayer(subTitle1Text)
overlayLayer.frame = CGRect(x: 0, y: 0, width: compositionSize!.width, height: compositionSize!.height)
overlayLayer.masksToBounds = true
// 3 - set up the parent layer
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: compositionSize!.width, height: compositionSize!.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: compositionSize!.width, height: compositionSize!.height)
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(overlayLayer)
mutableVidComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
}
III . Save video with AVMutbaleVideoComposition
func saveAsset (){
func deleteFile(_ filePath:URL) {
guard FileManager.default.fileExists(atPath: filePath.path) else { return }
do {
try FileManager.default.removeItem(atPath: filePath.path) }
catch {fatalError("Unable to delete file: \(error) : \(#function).")} }
let documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] as URL
let filePath = documentsDirectory.appendingPathComponent("rendered-audio.mp4")
deleteFile(filePath)
if let exportSession = AVAssetExportSession(asset: mutableComp , presetName: AVAssetExportPresetHighestQuality){
exportSession.videoComposition = mutableVidComp
// exportSession.canPerformMultiplePassesOverSourceMediaData = true
exportSession.outputURL = filePath
exportSession.shouldOptimizeForNetworkUse = true
exportSession.timeRange = CMTimeRangeMake(kCMTimeZero, mutableComp.duration)
exportSession.outputFileType = AVFileTypeQuickTimeMovie
exportSession.exportAsynchronously {
print("finished: \(filePath) : \(exportSession.status.rawValue) ")
if exportSession.status.rawValue == 4 {
print("Export failed -> Reason: \(exportSession.error!.localizedDescription))")
print(exportSession.error!)
}
}
}
}
Then I run all three methods in the viewDidLoad method for a quick test. The problem is that when I run the app ,the result of the export is the original video without the title on it.
What am I missing here?
UPDATE
I notice that adding a subTitle1Text.backgroundColor property in
part II of the code makes a colored CGRect corresponding to subTitle1Text.frame appear on top of the video when exported.
(See Image)
When this code is modified for playback using AVSynchronizedLayer the desired layer can be seen on top of the video with text on it.
So perhaps this is a bug in AVFoundation itself.
I suppose I am only left with the option of using a customVideoCompositorClass. The problem with that is that it takes a lot of time to render the video . Here is an example that uses AVVideoCompositing
Here is full working code which I used in my project. It will show CATextLayer at bottom (0,0). And in export session finish it will replace new path in player item. I used one model from Objective C code to get orientation. Please do testing in device. AVPLayer will not show text layer properly in simulator.
let composition = AVMutableComposition.init()
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTimeMake(1, 30)
videoComposition.renderScale = 1.0
let compositionCommentaryTrack: AVMutableCompositionTrack? = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let compositionVideoTrack: AVMutableCompositionTrack? = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
let clipVideoTrack:AVAssetTrack = self.currentAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
let audioTrack: AVAssetTrack? = self.currentAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
try? compositionCommentaryTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, self.currentAsset.duration), of: audioTrack!, at: kCMTimeZero)
try? compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, self.currentAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
let orientation = VideoModel.videoOrientation(self.currentAsset)
var isPortrait = false
switch orientation {
case .landscapeRight:
isPortrait = false
case .landscapeLeft:
isPortrait = false
case .portrait:
isPortrait = true
case .portraitUpsideDown:
isPortrait = true
}
var naturalSize = clipVideoTrack.naturalSize
if isPortrait
{
naturalSize = CGSize.init(width: naturalSize.height, height: naturalSize.width)
}
videoComposition.renderSize = naturalSize
let scale = CGFloat(1.0)
var transform = CGAffineTransform.init(scaleX: CGFloat(scale), y: CGFloat(scale))
switch orientation {
case .landscapeRight: break
// isPortrait = false
case .landscapeLeft:
transform = transform.translatedBy(x: naturalSize.width, y: naturalSize.height)
transform = transform.rotated(by: .pi)
case .portrait:
transform = transform.translatedBy(x: naturalSize.width, y: 0)
transform = transform.rotated(by: CGFloat(M_PI_2))
case .portraitUpsideDown:break
}
let frontLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack!)
frontLayerInstruction.setTransform(transform, at: kCMTimeZero)
let MainInstruction = AVMutableVideoCompositionInstruction()
MainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
MainInstruction.layerInstructions = [frontLayerInstruction]
videoComposition.instructions = [MainInstruction]
let parentLayer = CALayer.init()
parentLayer.frame = CGRect.init(x: 0, y: 0, width: naturalSize.width, height: naturalSize.height)
let videoLayer = CALayer.init()
videoLayer.frame = parentLayer.frame
let layer = CATextLayer()
layer.string = "HELLO ALL"
layer.foregroundColor = UIColor.white.cgColor
layer.backgroundColor = UIColor.orange.cgColor
layer.fontSize = 32
layer.frame = CGRect.init(x: 0, y: 0, width: 300, height: 100)
var rct = layer.frame;
let widthScale = self.playerView.frame.size.width/naturalSize.width
rct.size.width /= widthScale
rct.size.height /= widthScale
rct.origin.x /= widthScale
rct.origin.y /= widthScale
parentLayer.addSublayer(videoLayer)
parentLayer.addSublayer(layer)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool.init(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let videoPath = documentsPath+"/cropEditVideo.mov"
let fileManager = FileManager.default
if fileManager.fileExists(atPath: videoPath)
{
try! fileManager.removeItem(atPath: videoPath)
}
print("video path \(videoPath)")
var exportSession = AVAssetExportSession.init(asset: composition, presetName: AVAssetExportPresetHighestQuality)
exportSession?.videoComposition = videoComposition
exportSession?.outputFileType = AVFileTypeQuickTimeMovie
exportSession?.outputURL = URL.init(fileURLWithPath: videoPath)
exportSession?.videoComposition = videoComposition
var exportProgress: Float = 0
let queue = DispatchQueue(label: "Export Progress Queue")
queue.async(execute: {() -> Void in
while exportSession != nil {
// int prevProgress = exportProgress;
exportProgress = (exportSession?.progress)!
print("current progress == \(exportProgress)")
sleep(1)
}
})
exportSession?.exportAsynchronously(completionHandler: {
if exportSession?.status == AVAssetExportSessionStatus.failed
{
print("Failed \(exportSession?.error)")
}else if exportSession?.status == AVAssetExportSessionStatus.completed
{
exportSession = nil
let asset = AVAsset.init(url: URL.init(fileURLWithPath: videoPath))
DispatchQueue.main.async {
let item = AVPlayerItem.init(asset: asset)
self.player.replaceCurrentItem(with: item)
let assetDuration = CMTimeGetSeconds(composition.duration)
self.progressSlider.maximumValue = Float(assetDuration)
self.syncLayer.removeFromSuperlayer()
self.lblIntro.isHidden = true
self.player.play()
// let url = URL.init(fileURLWithPath: videoPath)
// let activityVC = UIActivityViewController(activityItems: [url], applicationActivities: [])
// self.present(activityVC, animated: true, completion: nil)
}
}
})
Below is code of My VideoModel class
-(AVCaptureVideoOrientation)videoOrientation:(AVAsset *)asset
{
AVCaptureVideoOrientation result = 0;
NSArray *tracks = [asset tracksWithMediaType:AVMediaTypeVideo];
if([tracks count] > 0) {
AVAssetTrack *videoTrack = [tracks objectAtIndex:0];
CGAffineTransform t = videoTrack.preferredTransform;
// Portrait
if(t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0)
{
result = AVCaptureVideoOrientationPortrait;
}
// PortraitUpsideDown
if(t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0) {
result = AVCaptureVideoOrientationPortraitUpsideDown;
}
// LandscapeRight
if(t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0)
{
result = AVCaptureVideoOrientationLandscapeRight;
}
// LandscapeLeft
if(t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0)
{
result = AVCaptureVideoOrientationLandscapeLeft;
}
}
return result;
}
Let me know if you need any more help in this.

Crop video swift

I am recording a video in a square UIView but when I export the video is full screen 1080x1920 now I am wondering how I can reduce the video from being full screen to being square ratio 1:1...
Here is how I am setting my Video Camera up :
session = AVCaptureSession()
for device in AVCaptureDevice.devices() {
if let device = device as? AVCaptureDevice , device.position == AVCaptureDevicePosition.back {
self.device = device
}
}
for device in AVCaptureDevice.devices(withMediaType: AVMediaTypeAudio) {
let device = device as? AVCaptureDevice
let audioInput = try! AVCaptureDeviceInput(device: device)
session?.addInput(audioInput)
}
do {
if let session = session {
videoInput = try AVCaptureDeviceInput(device: device)
session.addInput(videoInput)
videoOutput = AVCaptureMovieFileOutput()
let totalSeconds = 60.0 //Total Seconds of capture time
let timeScale: Int32 = 30 //FPS
let maxDuration = CMTimeMakeWithSeconds(totalSeconds, timeScale)
videoOutput?.maxRecordedDuration = maxDuration
videoOutput?.minFreeDiskSpaceLimit = 1024 * 1024//SET MIN FREE SPACE IN BYTES FOR RECORDING TO CONTINUE ON A VOLUME
if session.canAddOutput(videoOutput) {
session.addOutput(videoOutput)
}
let videoLayer = AVCaptureVideoPreviewLayer(session: session)
videoLayer?.frame = self.videoPreview.bounds
videoLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
self.videoPreview.layer.addSublayer(videoLayer!)
session.startRunning()
I have seen several other posts but not found them very helpfull, and most of them are in Obj C...
If anyone can help me or put me in the correct direction it's much appreciated!
Firstly you need to make use of the AVCaptureFileOutputRecordingDelegate.
You specifically use the func capture( _ captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAt outputFileURL: URL!, fromConnections connections: [Any]!, error: Error! ) method to perform the cropping process, once the video has finished recording.
Here is an example of a cropping function I once implemented. You need to pass in the URL of the video that was recorded and a callback that is used to return the new URL of the cropped video once the cropping process is finished.
func cropVideo( _ outputFileUrl: URL, callback: #escaping ( _ newUrl: URL ) -> () )
{
// Get input clip
let videoAsset: AVAsset = AVAsset( url: outputFileUrl )
let clipVideoTrack = videoAsset.tracks( withMediaType: AVMediaTypeVideo ).first! as AVAssetTrack
// Make video to square
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize( width: clipVideoTrack.naturalSize.height, height: clipVideoTrack.naturalSize.height )
videoComposition.frameDuration = CMTimeMake( 1, self.framesPerSecond )
// Rotate to portrait
let transformer = AVMutableVideoCompositionLayerInstruction( assetTrack: clipVideoTrack )
let transform1 = CGAffineTransform( translationX: clipVideoTrack.naturalSize.height, y: -( clipVideoTrack.naturalSize.width - clipVideoTrack.naturalSize.height ) / 2 )
let transform2 = transform1.rotated(by: CGFloat( M_PI_2 ) )
transformer.setTransform( transform2, at: kCMTimeZero)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeMakeWithSeconds( self.intendedVideoLength, self.framesPerSecond ) )
instruction.layerInstructions = [transformer]
videoComposition.instructions = [instruction]
// Export
let croppedOutputFileUrl = URL( fileURLWithPath: FileManager.getOutputPath( String.random() ) )
let exporter = AVAssetExportSession(asset: videoAsset, presetName: AVAssetExportPresetHighestQuality)!
exporter.videoComposition = videoComposition
exporter.outputURL = croppedOutputFileUrl
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.exportAsynchronously( completionHandler: { () -> Void in
DispatchQueue.main.async(execute: {
callback( croppedOutputFileUrl )
})
})
}
Also, here is the implementation of my getOutputPath method:
func getOutputPath( _ name: String ) -> String
{
let documentPath = NSSearchPathForDirectoriesInDomains( .documentDirectory, .userDomainMask, true )[ 0 ] as NSString
let outputPath = "\(documentPath)/\(name).mov"
return outputPath
}
Hope this helps.
func cropFrame(videoAsset:AVAsset, animation:Bool) -> Void {
var insertTime = kCMTimeZero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
let defaultSize = CGSize(width: 1920, height: 1080) // Default video size
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
// Silence sound (in case of video has no sound track)
let silenceURL = Bundle.main.url(forResource: "silence", withExtension: "mp3")
let silenceAsset = AVAsset(url:silenceURL!)
let silenceSoundTrack = silenceAsset.tracks(withMediaType: AVMediaType.audio).first
// Init composition
let mixComposition = AVMutableComposition.init()
// Get audio track
var audioTrack:AVAssetTrack?
if videoAsset.tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first
}
else {
audioTrack = silenceSoundTrack
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = kCMTimeZero
let duration = videoAsset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
//let finalTimer = CMTimeAdd(CMTime(seconds: 5, preferredTimescale: videoAsset.duration.timescale), CMTime(seconds: 5, preferredTimescale: videoAsset.duration.timescale))
//Kalpesh crop video frames
if animation {
let timeScale = videoAsset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
//**********======== CROP YOUR VIDEO FRAME HERE MANUALLY ========**********
layerInstruction.setCropRectangle(CGRect(x: 0, y: 0, width: videoTrack.naturalSize.width, height: 300.0), at: startTime)
} else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
})
}
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset, standardSize:CGSize, atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var aspectFillRatio:CGFloat = 1
if assetTrack.naturalSize.height < assetTrack.naturalSize.width {
aspectFillRatio = standardSize.height / assetTrack.naturalSize.height
}
else {
aspectFillRatio = standardSize.width / assetTrack.naturalSize.width
}
if assetInfo.isPortrait {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor), at: atTime)
} else {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor)
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
concat = fixUpsideDown.concatenating(scaleFactor).concatenating(moveFactor)
}
instruction.setTransform(concat, at: atTime)
}
return instruction
}
func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
Call this way:
let avssets = AVAsset(url: VideoURL)
self.cropFrame(videoAsset: avssets, animation: true)

Resources