AVFoundation - Merge multiple video - add animation in between videos - ios

I am trying to merge images and video clips together. I kept an option to add animation in between videos and images. There has few options like fade-in, fade-out, rotate, slide-up, slide-down, left, right etc. For images I am able to add animation, but how to add animation for video? Specifically when a video clip is completed and another video clip is going to start that time I want to add animations. Now my merging functionality is working well. Only to add the animation in between the videos.
I have tried with:
instruction.setOpacityRamp(fromStartOpacity: <#T##Float#>, toEndOpacity: <#T##Float#>, timeRange: <#T##CMTimeRange#>)
but this option only showing fade-in/ fade-out effect. But other custom animation options where to add those effect and how?
Here is me source code for merging. Many dependant functions are there in the code. But I have posted only merging functionality code. I have commented with //HERE TO ADD THE ANIMATION. So that you can directly reach that point where I am trying to add animations.
func merge(allAssets: [MovieAssetPresentable], isHDR: Bool, success: #escaping (URL?) -> (Void), progress: #escaping (CGFloat) -> (Void), failed: #escaping (String?) -> (Void)) {
cancelExport()
let defaultSize = isHDR ? self.videoOutputResolution.HD : self.videoOutputResolution.lowQuality
let videoPresetName = self.getPresetName(resolution: defaultSize)
self.mergeSuccess = success
self.mergeError = failed
self.mergeProgress = progress
let mixComposition = AVMutableComposition()
let mainInstruction = AVMutableVideoCompositionInstruction()
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
guard let urlVideoForBackground = Bundle.main.url(forResource: "black", withExtension: "mov") else {
self.mergeError("Need black background video !")
return
}
let assetForBackground = AVAsset(url: urlVideoForBackground)
let trackForBackground = assetForBackground.tracks(withMediaType: AVMediaType.video).first
//Set output size
var outputSize = CGSize.zero
for asset in allAssets.filter({$0.assetType! == .video}) {
guard let videoAsset = asset.asset else { continue }
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
let assetInfo = self.orientationFromTransform(videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = CGSize(width: defaultSize.width, height: ((videoSize.height / videoSize.width) * defaultSize.width))
}
}
if outputSize == CGSize.zero {
outputSize = defaultSize
}
debugPrint("OUTPUT SIZE: \(outputSize)")
let layerContentsGravity = VideoSettings.shared.fetchVideoFitClips()
var layerImages = [CALayer]()
var insertTime = CMTime.zero
var audioMixInputParameters = [AVMutableAudioMixInputParameters]()
// Init Video layer
let videoLayer = CALayer()
videoLayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
videoLayer.contentsGravity = layerContentsGravity
let parentlayer = CALayer()
parentlayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
parentlayer.addSublayer(videoLayer)
for asset in allAssets.filter({$0.assetType! == .image || $0.assetType! == .video}) {
//Video speed level
let videoSpeed = Double(asset.videoSpeedLevel!)
if asset.assetType! == .video {
//Video asset
let ast = asset.asset!
let duration = asset.endTime! - asset.beginTime! //ast.duration
//Create AVMutableCompositionTrack object
guard let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
self.mergeError("Unable to create track.")
continue
}
//Add original video sound track
let originalSoundTrack: AVMutableCompositionTrack?
if asset.asset!.tracks(withMediaType: .audio).count > 0 {
originalSoundTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try originalSoundTrack?.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), of: ast.tracks(withMediaType: AVMediaType.audio)[0], at: insertTime)
} catch {
self.mergeError("Unable to create original audio track.")
continue
}
//Set video original sound track speed
originalSoundTrack?.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
let audioInputParams = AVMutableAudioMixInputParameters(track: originalSoundTrack)
audioInputParams.setVolume(asset.videoOriginalVolume!, at: CMTime.zero)
audioInputParams.trackID = originalSoundTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
}
//Set time range
do {
try track.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration),
of: ast.tracks(withMediaType: AVMediaType.video)[0],
at: insertTime)
} catch let err {
self.mergeError("Failed to load track: \(err.localizedDescription)")
continue
}
//Set video speed
track.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
insertTime = CMTimeAdd(insertTime, duration)
let instruction = self.videoCompositionInstruction(track, asset: ast, outputSize: outputSize)
// let instruction = videoCompositionInstructionForTrack(track: t, asset: ast, standardSize: outputSize, atTime: insertTime)
instruction.setOpacity(0.0, at: insertTime)
//HERE TO ADD THE ANIMATION
layerInstructions.append(instruction)
} else {
//Image data
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let defaultImageTime = CMTimeGetSeconds(asset.endTime!) - CMTimeGetSeconds(asset.beginTime!)
let duration = CMTime.init(seconds:defaultImageTime, preferredTimescale: assetForBackground.duration.timescale)
do {
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: duration),
of: trackForBackground!,
at: insertTime)
}
catch {
self.mergeError("Background time range error")
}
guard let image = UIImage(data: asset.imageData!) else { continue }
// Create Image layer
let imageLayer = CALayer()
imageLayer.frame = CGRect.init(origin: CGPoint.zero, size: outputSize)
imageLayer.contents = image.cgImage
imageLayer.opacity = 0
imageLayer.contentsGravity = layerContentsGravity
self.setOrientation(image: image, onLayer: imageLayer)
// Add Fade in & Fade out animation
let fadeInAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeInAnimation.duration = 1
fadeInAnimation.fromValue = NSNumber(value: 0)
fadeInAnimation.toValue = NSNumber(value: 1)
fadeInAnimation.isRemovedOnCompletion = false
fadeInAnimation.beginTime = CMTimeGetSeconds(insertTime) == 0 ? 0.05: CMTimeGetSeconds(insertTime)
fadeInAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeInAnimation, forKey: "opacityIN")
let fadeOutAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeOutAnimation.duration = 1
fadeOutAnimation.fromValue = NSNumber(value: 1)
fadeOutAnimation.toValue = NSNumber(value: 0)
fadeOutAnimation.isRemovedOnCompletion = false
fadeOutAnimation.beginTime = CMTimeGetSeconds(CMTimeAdd(insertTime, duration))
fadeOutAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeOutAnimation, forKey: "opacityOUT")
layerImages.append(imageLayer)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
}
// Add Image layers
for layer in layerImages {
parentlayer.addSublayer(layer)
}
//Add Water mark if Subscription not activated
if !AddManager.shared.hasActiveSubscription {
let imglogo = UIImage(named: "watermark")
let waterMarklayer = CALayer()
waterMarklayer.contents = imglogo?.cgImage
let sizeOfWaterMark = Utility.getWaterMarkSizeWithVideoSize(videoSize: outputSize, defaultSize: waterMarkSize)
debugPrint("sizeOfWaterMark=\(sizeOfWaterMark)")
waterMarklayer.frame = CGRect(x: outputSize.width - (sizeOfWaterMark.width+10), y: 5, width: sizeOfWaterMark.width, height: sizeOfWaterMark.height)
waterMarklayer.contentsGravity = .resizeAspect
waterMarklayer.opacity = 1.0
parentlayer.addSublayer(waterMarklayer)
}
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: insertTime)
mainInstruction.layerInstructions = layerInstructions
mainInstruction.backgroundColor = VideoSettings.shared.fetchVideoBackgroundColor().color.cgColor
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
mainComposition.renderScale = 1.0
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentlayer)
for audioAsset in allAssets.filter({$0.assetType! == .audio}) {
//NOTE: If you have requirement to increase/ decrease audio fade-in fade-out effect time, please modify fadeInFadeOutEffectTiming variable as second
let fadeInFadeOutEffectTiming = Double(3) //seconds
let volumeLevel = audioAsset.audioVolumeLevel!
let isFadeIn = audioAsset.audioFadeInEffect!
let isFadeOut = audioAsset.audioFadeOutEffect!
var audioBeginTime = audioAsset.beginTime!
var audioEndTime = audioAsset.endTime!
var audioTrackTime = audioAsset.audioTrackStartTime!
var trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
//If audio starting position (second) is greater than equals to zero (in order to video length)
if CMTimeGetSeconds(CMTimeAdd(audioTrackTime, audioBeginTime)) >= 0 {
//If audio starting position (second) more than video length, i.e. total video length is 20 second, but audio starting position is from 24 seconds, we sould not add the audio
if CMTimeCompare(CMTimeAdd(audioTrackTime, audioBeginTime), insertTime) == 1 {
trimmedAudioDuration = CMTime.zero
} else {
//If audio start position (seconds) + crop length is exceed total video length, we should add only the part within the video
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), insertTime) == 1 {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
trimmedAudioDuration = CMTimeSubtract(insertTime, audioTrackTime)
} else {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
}
}
}
//If audio start time is in negative (second)
else {
//If audio crop length is in negative (second)
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), CMTime.zero) == -1 {
trimmedAudioDuration = CMTime.zero
} else {
audioBeginTime = CMTime(seconds: abs(CMTimeGetSeconds(audioTrackTime)), preferredTimescale: audioTrackTime.timescale)
audioTrackTime = CMTime.zero
trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
if CMTimeCompare(trimmedAudioDuration, insertTime) == 1 {
trimmedAudioDuration = insertTime
}
}
}
if trimmedAudioDuration != CMTime.zero {
audioEndTime = CMTimeAdd(audioTrackTime, trimmedAudioDuration)
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try audioTrack?.insertTimeRange(CMTimeRangeMake(start: audioBeginTime , duration: trimmedAudioDuration),
of: audioAsset.asset!.tracks(withMediaType: AVMediaType.audio)[0] ,
at: audioTrackTime)
let audioInputParams = AVMutableAudioMixInputParameters(track: audioTrack)
var effectTime = CMTime(seconds: fadeInFadeOutEffectTiming, preferredTimescale: 600)
if CMTimeCompare(trimmedAudioDuration, CMTimeMultiply(effectTime, multiplier: 2)) == -1 {
effectTime = CMTime(seconds: CMTimeGetSeconds(trimmedAudioDuration) / 2, preferredTimescale: 600)
}
//Fade in effect
audioInputParams.setVolumeRamp(fromStartVolume: isFadeIn ? 0 : volumeLevel, toEndVolume: volumeLevel, timeRange: CMTimeRange(start: audioTrackTime, duration: effectTime))
//Fade out effect
audioInputParams.setVolumeRamp(fromStartVolume: volumeLevel, toEndVolume: isFadeOut ? 0 : volumeLevel, timeRange: CMTimeRange(start: CMTimeSubtract(audioEndTime, effectTime), duration: effectTime))
audioInputParams.trackID = audioTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
} catch {
print("Failed to load Audio track")
}
}
}
// 4 - Get path
guard let url = Utility.createFileAtDocumentDirectory(name: "mergeVideo-\(Date().timeIntervalSince1970).mp4") else {
debugPrint("Unable to file at document directory")
return
}
// 5 - Create Exporter
self.exporter = AVAssetExportSession(asset: mixComposition, presetName: videoPresetName)
guard let exp = self.exporter else {
debugPrint("Unable to export.")
return
}
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = audioMixInputParameters
exp.outputURL = url
exp.outputFileType = AVFileType.mp4
exp.shouldOptimizeForNetworkUse = true
exp.videoComposition = mainComposition
exp.audioMix = audioMix
//self.viewPieProgress.setProgress(0.0, animated: false)
//viewPieProgress.isHidden = isHDR
//timer for progress
self.timer = Timer.scheduledTimer(timeInterval: 1.0, target: self, selector: #selector(self.updateExportingProgress(timer:)), userInfo: exp, repeats: true)
// 6 - Perform the Export
exp.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exp)
}
}
}
I have tried many options but nothing meets my requirement. Please help me out.
If you required any other information from me, please feel free to comment on this post.
Thanks in advance.

Related

Swift how to crop video view size by selected area? [duplicate]

I am recording a video in a square UIView but when I export the video is full screen 1080x1920 now I am wondering how I can reduce the video from being full screen to being square ratio 1:1...
Here is how I am setting my Video Camera up :
session = AVCaptureSession()
for device in AVCaptureDevice.devices() {
if let device = device as? AVCaptureDevice , device.position == AVCaptureDevicePosition.back {
self.device = device
}
}
for device in AVCaptureDevice.devices(withMediaType: AVMediaTypeAudio) {
let device = device as? AVCaptureDevice
let audioInput = try! AVCaptureDeviceInput(device: device)
session?.addInput(audioInput)
}
do {
if let session = session {
videoInput = try AVCaptureDeviceInput(device: device)
session.addInput(videoInput)
videoOutput = AVCaptureMovieFileOutput()
let totalSeconds = 60.0 //Total Seconds of capture time
let timeScale: Int32 = 30 //FPS
let maxDuration = CMTimeMakeWithSeconds(totalSeconds, timeScale)
videoOutput?.maxRecordedDuration = maxDuration
videoOutput?.minFreeDiskSpaceLimit = 1024 * 1024//SET MIN FREE SPACE IN BYTES FOR RECORDING TO CONTINUE ON A VOLUME
if session.canAddOutput(videoOutput) {
session.addOutput(videoOutput)
}
let videoLayer = AVCaptureVideoPreviewLayer(session: session)
videoLayer?.frame = self.videoPreview.bounds
videoLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
self.videoPreview.layer.addSublayer(videoLayer!)
session.startRunning()
I have seen several other posts but not found them very helpfull, and most of them are in Obj C...
If anyone can help me or put me in the correct direction it's much appreciated!
Firstly you need to make use of the AVCaptureFileOutputRecordingDelegate.
You specifically use the func capture( _ captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAt outputFileURL: URL!, fromConnections connections: [Any]!, error: Error! ) method to perform the cropping process, once the video has finished recording.
Here is an example of a cropping function I once implemented. You need to pass in the URL of the video that was recorded and a callback that is used to return the new URL of the cropped video once the cropping process is finished.
func cropVideo( _ outputFileUrl: URL, callback: #escaping ( _ newUrl: URL ) -> () )
{
// Get input clip
let videoAsset: AVAsset = AVAsset( url: outputFileUrl )
let clipVideoTrack = videoAsset.tracks( withMediaType: AVMediaTypeVideo ).first! as AVAssetTrack
// Make video to square
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize( width: clipVideoTrack.naturalSize.height, height: clipVideoTrack.naturalSize.height )
videoComposition.frameDuration = CMTimeMake( 1, self.framesPerSecond )
// Rotate to portrait
let transformer = AVMutableVideoCompositionLayerInstruction( assetTrack: clipVideoTrack )
let transform1 = CGAffineTransform( translationX: clipVideoTrack.naturalSize.height, y: -( clipVideoTrack.naturalSize.width - clipVideoTrack.naturalSize.height ) / 2 )
let transform2 = transform1.rotated(by: CGFloat( M_PI_2 ) )
transformer.setTransform( transform2, at: kCMTimeZero)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeMakeWithSeconds( self.intendedVideoLength, self.framesPerSecond ) )
instruction.layerInstructions = [transformer]
videoComposition.instructions = [instruction]
// Export
let croppedOutputFileUrl = URL( fileURLWithPath: FileManager.getOutputPath( String.random() ) )
let exporter = AVAssetExportSession(asset: videoAsset, presetName: AVAssetExportPresetHighestQuality)!
exporter.videoComposition = videoComposition
exporter.outputURL = croppedOutputFileUrl
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.exportAsynchronously( completionHandler: { () -> Void in
DispatchQueue.main.async(execute: {
callback( croppedOutputFileUrl )
})
})
}
Also, here is the implementation of my getOutputPath method:
func getOutputPath( _ name: String ) -> String
{
let documentPath = NSSearchPathForDirectoriesInDomains( .documentDirectory, .userDomainMask, true )[ 0 ] as NSString
let outputPath = "\(documentPath)/\(name).mov"
return outputPath
}
Hope this helps.
func cropFrame(videoAsset:AVAsset, animation:Bool) -> Void {
var insertTime = kCMTimeZero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
let defaultSize = CGSize(width: 1920, height: 1080) // Default video size
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
// Silence sound (in case of video has no sound track)
let silenceURL = Bundle.main.url(forResource: "silence", withExtension: "mp3")
let silenceAsset = AVAsset(url:silenceURL!)
let silenceSoundTrack = silenceAsset.tracks(withMediaType: AVMediaType.audio).first
// Init composition
let mixComposition = AVMutableComposition.init()
// Get audio track
var audioTrack:AVAssetTrack?
if videoAsset.tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first
}
else {
audioTrack = silenceSoundTrack
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = kCMTimeZero
let duration = videoAsset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
//let finalTimer = CMTimeAdd(CMTime(seconds: 5, preferredTimescale: videoAsset.duration.timescale), CMTime(seconds: 5, preferredTimescale: videoAsset.duration.timescale))
//Kalpesh crop video frames
if animation {
let timeScale = videoAsset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
//**********======== CROP YOUR VIDEO FRAME HERE MANUALLY ========**********
layerInstruction.setCropRectangle(CGRect(x: 0, y: 0, width: videoTrack.naturalSize.width, height: 300.0), at: startTime)
} else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
})
}
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset, standardSize:CGSize, atTime: CMTime) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var aspectFillRatio:CGFloat = 1
if assetTrack.naturalSize.height < assetTrack.naturalSize.width {
aspectFillRatio = standardSize.height / assetTrack.naturalSize.height
}
else {
aspectFillRatio = standardSize.width / assetTrack.naturalSize.width
}
if assetInfo.isPortrait {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor), at: atTime)
} else {
let scaleFactor = CGAffineTransform(scaleX: aspectFillRatio, y: aspectFillRatio)
let posX = standardSize.width/2 - (assetTrack.naturalSize.width * aspectFillRatio)/2
let posY = standardSize.height/2 - (assetTrack.naturalSize.height * aspectFillRatio)/2
let moveFactor = CGAffineTransform(translationX: posX, y: posY)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(moveFactor)
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
concat = fixUpsideDown.concatenating(scaleFactor).concatenating(moveFactor)
}
instruction.setTransform(concat, at: atTime)
}
return instruction
}
func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
Call this way:
let avssets = AVAsset(url: VideoURL)
self.cropFrame(videoAsset: avssets, animation: true)

Merge video with Images according to resoulution of natural size

I am trying to merge stickers on a video. It works with the landscape right now and lots of issues with portrait videos.
With the landscape issue is on merge, I am unable to maintain sticker ratio and origin according to the video's natural size. With portrait, it's a total mess. If anyone wants to look out for the code and wants to test it, here is the GitHub link.
Here are the main 2 methods that matters
1st is to get resolution of video and according to that manage video container size that contains images
private func setupVideoPlayer() {
let currentFrameSize = currentVideoFrameSize()
videoeHeightConstraint.constant = currentFrameSize.height
videoWidthConstraint.constant = currentFrameSize.width
videoPlayer = AVPlayer(playerItem: playerItem)
let playerLayer = AVPlayerLayer(player: videoPlayer)
playerLayer.frame = CGRect(x: 0, y: 0, width: currentFrameSize.width, height: currentFrameSize.height)
playerLayer.videoGravity = .resizeAspect
videoContentView.layer.addSublayer(playerLayer)
videoContentView.bringSubviewToFront(stickersContentView)
videoPlayer?.play()
}
private func currentVideoFrameSize() -> CGSize {
guard let asset = playerItem?.asset as? AVURLAsset, let track = asset.tracks(withMediaType: AVMediaType.video).first else { return .zero }
let trackSize = track.naturalSize
let videoViewSize = videoContentView.superview!.bounds.size
let trackRatio = trackSize.width / trackSize.height
let videoViewRatio = videoViewSize.width / videoViewSize.height
var newSize: CGSize
if videoViewRatio > trackRatio {
newSize = CGSize(width: trackSize.width * videoViewSize.height / trackSize.height, height: videoViewSize.height)
} else {
newSize = CGSize(width: videoViewSize.width, height: trackSize.height * videoViewSize.width / trackSize.width)
}
let assetInfo = VideoManager.shared.orientationFromTransform(transform: track.preferredTransform)
if assetInfo.isPortrait {
let tempSize = newSize
newSize.width = tempSize.height
newSize.height = tempSize.width
}
return newSize
}
For merge:
func makeVideoFrom(video: VideoData, images: [VideoOverlayImage], completion:#escaping Completion) -> Void {
var outputSize: CGSize = .zero
var insertTime: CMTime = .zero
var arrayLayerInstructions: [AVMutableVideoCompositionLayerInstruction] = []
var arrayLayerImages: [CALayer] = []
// Init composition
let mixComposition = AVMutableComposition()
// Get video track
guard let videoTrack = video.asset.tracks(withMediaType: AVMediaType.video).first else { return }
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
// Get audio track
var audioTrack: AVAssetTrack?
if video.asset.tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = video.asset.tracks(withMediaType: AVMediaType.audio).first
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = CMTime.zero
let duration = video.asset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: video.asset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
let timeScale = video.asset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
} catch {
print("Load track error")
}
// Merge
for image in images {
let animatedImageLayer = CALayer()
let aspectWidth = assetInfo.isPortrait ? outputSize.width/video.frame.height : outputSize.width/video.frame.width
let aspectHeight = assetInfo.isPortrait ? outputSize.height/video.frame.width : outputSize.height/video.frame.height
let aspectRatio = min(aspectWidth, aspectHeight)
let scaledWidth = image.frame.width * aspectRatio
let scaledHeight = image.frame.height * aspectRatio
let cx = (image.frame.minX * aspectRatio) + (scaledWidth / 2)
let cy = (image.frame.minY * aspectRatio) + (scaledHeight / 2)
var iFrame = image.frame
iFrame.size.width = scaledWidth
iFrame.size.height = scaledWidth
animatedImageLayer.frame = iFrame
animatedImageLayer.position = CGPoint(x: assetInfo.isPortrait ? cy : cx, y: assetInfo.isPortrait ? cx : cy)
if let animatedURL = URL(string: image.url), let animation = animatedImage(with: animatedURL) {
animatedImageLayer.add(animation, forKey: "contents")
}
arrayLayerImages.append(animatedImageLayer)
}
// Init Video layer
let videoLayer = CALayer()
videoLayer.frame = CGRect(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
parentlayer.addSublayer(videoLayer)
// Add Image layers
arrayLayerImages.forEach { parentlayer.addSublayer($0) }
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.renderSize = outputSize
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentlayer)
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
// Export to file
let path = NSTemporaryDirectory().appending("stickers_video_merge.mov")
let exportURL = URL(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = .mov
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
}
}
}
Major issues:
In landscape videos, image origin not maintained also it's size according to natural size looks bigger than expected
In portrait videos, I am unable to get the video layer in the background although it's there and playing. it displayed a black view in the end result. Also, image origin and size not maintained according to the original video size.
You can also check the dev branch where you don't need to maintain any ratio but there is an issue with the image frame and portrait video. Anyone can run it on your device and understand the actual problem.

First Video Is Coming after merge different videos

I am creating a video collage app in which I am merging multiple videos in different frames and make a single video. But all frames are showing first video. Another videos are not showing after merge. Please give me suggestion as soon as possible. My code is below.
func newoverlay() {
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
var arrayOfComposition = Array<AVMutableCompositionTrack>()
var trackInstruction = Array<AVVideoCompositionLayerInstruction>()
var videolayer = Array<CALayer>()
var i:Int = 0
let mainInstruction = AVMutableVideoCompositionInstruction()
var assetDuration:CMTime = CMTime.zero
var box = Array<CALayer>()
var arrOfIns = Array<AVMutableVideoCompositionInstruction>()
var atTimeM : CMTime = CMTimeMake(value: 0, timescale: 0)
var lastAsset: AVURLAsset!
// 2 - Create two video tracks
for videoAssetss in firstAsset {
guard var firstTrack = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: (videoAssetss as? AVURLAsset)!.duration),
of: (videoAssetss as? AVURLAsset)!.tracks(withMediaType: .video)[0],
at: CMTime.zero)
var firstInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack)
guard let cor = photoFrameCordinate[i] as? CGRect else{return}
if videoAssetss as! AVURLAsset != firstAsset.last as! AVURLAsset{
firstInstruction.setOpacity(0, at: assetDuration) // asseteDuration
}
let transform = CGAffineTransform(scaleX: 0.4, y:1).concatenating(CGAffineTransform(translationX: trackInstruction[i-1]., y: -cor.origin.y))
firstInstruction.setTransform(transform, at: CMTime.zero)
assetDuration = CMTimeAdd(assetDuration, (videoAssetss as! AVURLAsset).duration)
lastAsset = videoAssetss as? AVURLAsset
trackInstruction.append(firstInstruction)
i += 1
// arrayOfComposition.append(firstTrack)
} catch {
print("Failed to load first track")
return
}
}
// Watermark Effect
let width: CGFloat = widthConstraintViewForImage.constant
let height = heightConstraintViewForImage.constant
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
//Mark: Frame layer
let bglayer = CALayer()
bglayer.contents = imgViewForAdminImage.image?.cgImage
bglayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
bglayer.backgroundColor = UIColor.clear.cgColor
for index in 0..<videoURLS.count{
var videoBox = CALayer()
guard let cor = photoFrameCordinate[index] as? CGRect else{return}
videoBox.frame = CGRect(x: cor.origin.x, y: parentlayer.frame.maxY-(cor.origin.y+cor.size.height), width: cor.size.width, height: cor.size.height)
videoBox.backgroundColor = UIColor.green.cgColor
videoBox.masksToBounds = true
var vlayer = CALayer()
vlayer.contentsScale = 1.0
vlayer.contentsGravity = CALayerContentsGravity.center
vlayer.frame = CGRect(x: 0, y: 0, width:cor.size.width, height: cor.size.height)
vlayer.backgroundColor = UIColor.yellow.cgColor
videolayer.append(vlayer)
videoBox.addSublayer(vlayer)
box.append(videoBox)
bglayer.addSublayer(videoBox)
}
parentlayer.addSublayer(bglayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayers: videolayer, in: parentlayer)
// 2.1
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetDuration)
mainInstruction.layerInstructions = trackInstruction
mainInstruction.backgroundColor = UIColor.red.cgColor
layercomposition.instructions = [mainInstruction]
// layercomposition.renderSize = CGSizeMake(videoSize.width * scale, videoSize.height * scale)
layercomposition.renderScale = 1.0
layercomposition.renderSize = CGSize(width: width, height: height)
// create new file to receive data
let dirPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let docsDir = dirPaths[0] as NSString
let movieFilePath = docsDir.appendingPathComponent("result.mp4")
let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName:AVAssetExportPresetMediumQuality)
assetExport?.outputFileType = AVFileType.mp4
assetExport?.videoComposition = layercomposition
// Check exist and remove old file
FileManager.default.removeItemIfExisted(movieDestinationUrl as URL)
assetExport?.outputURL = movieDestinationUrl as URL
assetExport?.exportAsynchronously(completionHandler: {
switch assetExport!.status {
case AVAssetExportSession.Status.failed:
print("failed")
print(assetExport?.error ?? "unknown error")
case AVAssetExportSession.Status.cancelled:
print("cancelled")
print(assetExport?.error ?? "unknown error")
default:
print("Movie complete")
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: movieDestinationUrl as URL)
}) { saved, error in
if saved {
print("Saved")
}else{
print(error!)
}
}
self.playVideo()`enter code here`
}
})
}
When I set the opacity of the first video to 0 then the second video is showing in all frames. I think all videos are coming but behind the first video that's why only first video is showing in the all frames.
I used
let videolayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: asset)
videolayerInstruction.setCropRectangle(CGRec(), at: Time())

How to add a Fade In and Fade Out effect on Video AVAsset in Swift3 iOS

I am developing a Video application in Swift3. Where I have to convert any text to Video and then have to add a Fade In and Fade Out effect and post the Fade effect Video to server. I don't have to use any Third Party Library for Fade effect.
I can able to convert my Text to a Video, My problem is how can I add Fade In and Fade Out on Video AVAsset.
Can anyone suggest me to achieve this. I cannot find any recent answers to this problem. Thanks for any help!
Fade Out effect
let parentLayer = CALayer()
let fadeOut = CABasicAnimation(keyPath: "opacity")
fadeOut.fromValue = 1.0
fadeOut.toValue = 0.0
fadeOut.duration = 5.0//This will video duration
fadeOut.setValue("video", forKey:"fadeOut")
fadeOut.isRemovedOnCompletion = false
fadeOut.fillMode = CAMediaTimingFillMode.forwards
parentLayer.add(fadeOut, forKey: "opacity")
Fade in effect
fadeIn.fromValue = 0.0
fadeIn.toValue = 1.0
Add to your player
self.playerView?.playerLayer?.add(fadeOut, forKey: nil)
Add to your assets
var startTime = CMTime.zero
var timeDuration = CMTimeMake(value: 3, timescale: 1)
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
//MARK: Fade in effect
layerInstruction.setOpacityRamp(fromStartOpacity: 0.0, toEndOpacity: 1.0, timeRange: CMTimeRange(start: startTime, duration: timeDuration))
//MARK: Fade out effect
startTime = CMTimeSubtract(mutableComposition.duration, CMTimeMake(value: 3, timescale: 1))
timeDuration = CMTimeMake(value: 3, timescale: 1)
layerInstruction.setOpacityRamp(
fromStartOpacity: 1.0,
toEndOpacity: 0.0,
timeRange: CMTimeRangeMake(start: startTime, duration: timeDuration)
)
AVVideoCompositionLayerInstruction
An array of instances of AVVideoCompositionLayerInstruction that specify how video frames from source tracks should be layered and composed.
AVMutableVideoCompositionInstruction
An AVVideoComposition object maintains an array of instructions to perform its composition.
Example Swift4:
I merged videos with fade-in and fade-out effect and change sequence of audio
func doMerge(arrayVideos:[AVAsset], arrayAudios:[AVAsset], animation:Bool, completion:#escaping Completion) -> Void {
var insertTime = kCMTimeZero
var audioInsertTime = kCMTimeZero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayVideos {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
outputSize = videoSize
}
// Init composition
let mixComposition = AVMutableComposition.init()
for index in 0..<arrayVideos.count {
// Get video track
guard let videoTrack = arrayVideos[index].tracks(withMediaType: AVMediaType.video).first else { continue }
// Get audio track
var audioTrack:AVAssetTrack?
if index < arrayAudios.count {
if arrayAudios[index].tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = arrayAudios[index].tracks(withMediaType: AVMediaType.audio).first
}
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = kCMTimeZero
let duration = arrayVideos[index].duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration), of: videoTrack, at: insertTime)
// Add audio track to audio composition at specific time
var audioDuration = kCMTimeZero
if index < arrayAudios.count {
audioDuration = arrayAudios[index].duration
}
if let audioTrack = audioTrack {
do {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, audioDuration), of: audioTrack, at: audioInsertTime)
}
catch {
print(error.localizedDescription)
}
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!, asset: arrayVideos[index], standardSize: outputSize, atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = arrayVideos[index].duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp (fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
audioInsertTime = CMTimeAdd(audioInsertTime, audioDuration)
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
}
})
}

Merge two videos in Ios

I can merge two videos, but when I see the final result the duration of the video is correct but it only plays the first video and for the duration of the second video remains a static image.
For example:
two videos of 6 seconds each makes a video of 12 seconds, i can see it correctly until 6 seconds, later it blocks the images
func mergeVideos(videoMergedUrl:URL) {
let mainComposition = AVMutableVideoComposition()
var startDuration:CMTime = kCMTimeZero
let mainInstruction = AVMutableVideoCompositionInstruction()
let mixComposition = AVMutableComposition()
var allVideoInstruction = [AVMutableVideoCompositionLayerInstruction]()
for i:Int in 0 ..< listSegment.count {
let currentAsset = listSegment[i]
let currentTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
try currentTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, currentAsset.duration), of: currentAsset.tracks(withMediaType: AVMediaType.video)[0], at: startDuration)
let currentInstruction:AVMutableVideoCompositionLayerInstruction = videoCompositionInstructionForTrack(currentTrack!, asset: currentAsset)
//currentInstruction.setOpacityRamp(fromStartOpacity: 0.0, toEndOpacity: 1.0, timeRange:CMTimeRangeMake(startDuration, CMTimeMake(1, 1)))
/*if i != assets.count - 1 {
//Sets Fade out effect at the end of the video.
currentInstruction.setOpacityRamp(fromStartOpacity: 1.0,
toEndOpacity: 0.0,
timeRange:CMTimeRangeMake(
CMTimeSubtract(
CMTimeAdd(currentAsset.duration, startDuration),
CMTimeMake(1, 1)),
CMTimeMake(2, 1)))
}*/
/*let transform:CGAffineTransform = currentTrack!.preferredTransform
if orientationFromTransform(transform).isPortrait {
let outputSize:CGSize = CGSize(width: 640, height: 480)
let horizontalRatio = CGFloat(outputSize.width) / (currentTrack?.naturalSize.width)!
let verticalRatio = CGFloat(outputSize.height) / (currentTrack?.naturalSize.height)!
let scaleToFitRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let FirstAssetScaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
if currentAsset.g_orientation == .landscapeLeft {
let rotation = CGAffineTransform(rotationAngle: .pi)
let translateToCenter = CGAffineTransform(translationX: 640, y: 480)
let mixedTransform = rotation.concatenating(translateToCenter)
currentInstruction.setTransform((currentTrack?.preferredTransform.concatenating(FirstAssetScaleFactor).concatenating(mixedTransform))!, at: kCMTimeZero)
} else {
currentInstruction.setTransform((currentTrack?.preferredTransform.concatenating(FirstAssetScaleFactor))!, at: kCMTimeZero)
}
}*/
allVideoInstruction.append(currentInstruction) //Add video instruction in Instructions Array.
startDuration = CMTimeAdd(startDuration, currentAsset.duration)
} catch _ {
print("ERROR_LOADING_VIDEO")
}
}
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, startDuration)
mainInstruction.layerInstructions = allVideoInstruction
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = CGSize(width: 640, height: 480)
let manager = FileManager.default
_ = try? manager.removeItem(at: videoMergedUrl)
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset640x480) else { return }
exporter.outputURL = videoMergedUrl
exporter.outputFileType = AVFileType.mp4
exporter.shouldOptimizeForNetworkUse = false
exporter.videoComposition = mainComposition
// Perform the Export
exporter.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exporter)
}
}
}
I had the same problem after following this tutorial. I fixed it by adding clips to the composition using AVMutableComposition.insertTimeRange instead of addMutableTrack.

Resources