I am trying to understand the animation added here in videos. What animation is happening here? I have understood that how is it adding animation to the video as a whole but I want to know how can we add animation to each image of video differently.
I have used the following code to pick an image and merge videos.
class ImageVideoMakerController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
playPauseBtn.isHidden = true
fromPlayVideo = true
fromSave = false
setUpInitialView()
}
func setUpInitialView(){
setUpArrays()
buildVideoFromImageArray()
transitionScrollViewCreation()
filterScrollContents()
}
#objc func filterActionTapped(sender:UIButton){
fromFilter = true
fromTransition = false
if(sender.tag==0){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CISepiaTone"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==1){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectChrome"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==2){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectTransfer"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==3){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectTonal"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==4){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectProcess"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==5){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectNoir"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==6){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectInstant"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}
else if(sender.tag==7){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectFade"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}
}
func applyFilter(globalFilterToBeApplied:String){
let filter = CIFilter(name: globalFilterToBeApplied)!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
let source = request.sourceImage.clampedToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
let output = filter.outputImage!.cropped(to: request.sourceImage.extent)
request.finish(with: output, context: nil)
})
globalrVideoComposition = composition
self.playVideoInPlayer(animatedVideoURL:self.globalVideoURL as URL) }
func playVideoInPlayer(animatedVideoURL:URL){
if(globalFilterName != nil){
self.asset = AVAsset.init(url:animatedVideoURL as URL)
let newPlayerItem = AVPlayerItem.init(asset:self.asset);
newPlayerItem.videoComposition=globalrVideoComposition
self.player = AVPlayer.init(playerItem:newPlayerItem)
}else{
let newPlayerItem = AVPlayerItem.init(url:animatedVideoURL)
self.player = AVPlayer.init(playerItem:newPlayerItem)
}
NotificationCenter.default.addObserver(self, selector: #selector(self.finishedPlaying(_:)), name: NSNotification.Name.AVPlayerItemDidPlayToEndTime, object:nil)
self.playerLayer = AVPlayerLayer.init(player:self.player)
let width: CGFloat = self.videoContainerView.frame.size.width
let height: CGFloat = self.videoContainerView.frame.size.height
self.playerLayer.frame = CGRect(x: 0.0, y:0, width: width, height: height)
self.playerLayer.backgroundColor = UIColor.black.cgColor
self.playerLayer.videoGravity = .resizeAspectFill
self.videoContainerView.layer.addSublayer( self.playerLayer)
self.playPauseBtn.isHidden = false
self.playPauseBtn.setImage(UIImage.init(named:"pause"), for:.normal)
DispatchQueue.main.async {
MBProgressHUD.hideAllHUDs(for:self.view, animated:true)
self.player.play()
}
}
func exportVideoWithAnimation() {
let composition = AVMutableComposition()
let track = self.asset?.tracks(withMediaType: AVMediaType.video)
let videoTrack:AVAssetTrack = track![0] as AVAssetTrack
let timerange = CMTimeRangeMake(kCMTimeZero, (self.asset?.duration)!)
let compositionVideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID())!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
//if your video has sound, you don’t need to check this
if self.audioIsEnabled {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!
for audioTrack in (self.asset?.tracks(withMediaType: AVMediaType.audio))! {
do {
try compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
} catch {
print(error)
}
}
}
let size = videoTrack.naturalSize
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentlayer.addSublayer(videolayer)
var time = [0.00001, 3, 6, 9, 12] //I used this time array to determine the start time of a frame animation. Each frame will stay for 3 secs, thats why their difference is 3
var imgarray = self.selectedImageArray
for image in 0..<self.selectedImageArray.count {
let nextPhoto = imgarray[image]
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio)
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
let blackLayer = CALayer()
///#7. opacity(1->0)(top->bottom)///
//#3. top->bottom///
//MARK:- Animations==================================
///#1. left->right///
if(self.globalSelectedTransitionTag == 0){
blackLayer.frame = CGRect(x: -videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
blackLayer.backgroundColor = UIColor.black.cgColor
let imageLayer = CALayer()
imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
imageLayer.contents = imgarray[image].cgImage
blackLayer.addSublayer(imageLayer)
let animation = CABasicAnimation()
animation.keyPath = "position.x"
animation.fromValue = -videoTrack.naturalSize.width
animation.toValue = 5 * (videoTrack.naturalSize.width)
animation.duration = 5
animation.beginTime = CFTimeInterval(time[image])
animation.fillMode = kCAFillModeForwards
animation.isRemovedOnCompletion = false
blackLayer.add(animation, forKey: "opacity")
}
parentlayer.addSublayer(blackLayer)
}
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
if(fromTransition){
self.globalrVideoComposition = layercomposition
}
let animatedVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video2.mp4")
self.removeFileAtURLIfExists(url: animatedVideoURL)
guard let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = self.globalrVideoComposition
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = animatedVideoURL as URL
print("****** animatedVideoURL *****",animatedVideoURL)
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(String(describing: assetExport.error))")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(String(describing: assetExport.error))")
default:
print("Exported")
if(self.fromPlayVideo){
DispatchQueue.main.async {
self.globalVideoURL = animatedVideoURL; self.playVideoInPlayer(animatedVideoURL: animatedVideoURL as URL)
}
}else if(self.fromSave){
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: animatedVideoURL as URL)
}) { saved, error in
}
if saved {
}else{
}
}
}
}
})
}
//MARK:- Make ScrollViews
#objc func transitionTapped(sender:UIButton){
self.fromSave = false
self.fromPlayVideo = true
self.playPauseBtn.isHidden = true
self.playerLayer.removeFromSuperlayer()
globalSelectedTransitionTag = sender.tag
exportVideoWithAnimation()
}
}
If I'm not mistaken the animation there is nothing more than an opacity animation.
let animation = CABasicAnimation(keyPath: "opacity")
it "fades in" over a few seconds.
It looks like there's also a "scale animation" which just scales it up in size.
The code you give is badly written and messy, so I would forget about it and not look at it, as a beginner.
As a beginner, I would not jump in to "video .. and animations too!" at first.
Just try making some "simple" animations in your app. A good thing to start with is something that "slides on and off the screen" or perhaps just fades in and out. (So, try doing those things to a button or the like.)
Related
I am recording 3 videos and merging them into one. I need text on all the videos. am doing it but no success. the video goes black and the audio comes fine. When I do it without CALayer everything is good Need Help.
i there something wring with the code or I am doing it the wrong way please guide me.
private func doMerge(arrayVideos:[AVAsset], animation:Bool, completion:#escaping Completion) -> Void {
var insertTime = CMTime.zero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayVideos {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
}
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
// Silence sound (in case of video has no sound track)
// let silenceURL = Bundle.main.url(forResource: "silence", withExtension: "mp3")
// let silenceAsset = AVAsset(url:silenceURL!)
// let silenceSoundTrack = silenceAsset.tracks(withMediaType: AVMediaType.audio).first
// Init composition
let mixComposition = AVMutableComposition.init()
for videoAsset in arrayVideos {
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
// Get audio track
var audioTrack:AVAssetTrack?
if videoAsset.tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first
}
else {
// audioTrack = silenceSoundTrack
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = CMTime.zero
let duration = videoAsset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = videoAsset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
// Watermark Effect
let size = videoTrack.naturalSize
// create text Layer
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.contentsScale = UIScreen.main.scale
titleLayer.string = "Dummy text"
titleLayer.foregroundColor = UIColor.white.cgColor
titleLayer.font = UIFont(name: "Helvetica", size: 28)
titleLayer.shadowOpacity = 0.5
titleLayer.alignmentMode = CATextLayerAlignmentMode.center
titleLayer.frame = CGRect(x: 0, y: 50, width: size.width, height: size.height)
let videolayer = CALayer()
videolayer.backgroundColor = UIColor.clear.cgColor
// videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
// let layercomposition = AVMutableVideoComposition()
// layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
// layercomposition.renderSize = size
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: titleLayer)
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
// mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
}
})
}
Just Change This Part
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
// let size = videoTrack.naturalSize
// create text Layer
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.contentsScale = UIScreen.main.scale
titleLayer.string = questions[counter]
counter = counter + 1
titleLayer.foregroundColor = UIColor.black.cgColor
titleLayer.font = UIFont(name: "Helvetica", size: 28)
titleLayer.shadowOpacity = 0.5
titleLayer.alignmentMode = CATextLayerAlignmentMode.center
titleLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let videolayer = CALayer()
videolayer.backgroundColor = UIColor.clear.cgColor
videolayer.backgroundColor = UIColor.red.cgColor
videolayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(titleLayer)
// let layercomposition = AVMutableVideoComposition()
// layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
// layercomposition.renderSize = size
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
I am creating a video collage app in which I am merging multiple videos in different frames and make a single video. But all frames are showing first video. Another videos are not showing after merge. Please give me suggestion as soon as possible. My code is below.
func newoverlay() {
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
var arrayOfComposition = Array<AVMutableCompositionTrack>()
var trackInstruction = Array<AVVideoCompositionLayerInstruction>()
var videolayer = Array<CALayer>()
var i:Int = 0
let mainInstruction = AVMutableVideoCompositionInstruction()
var assetDuration:CMTime = CMTime.zero
var box = Array<CALayer>()
var arrOfIns = Array<AVMutableVideoCompositionInstruction>()
var atTimeM : CMTime = CMTimeMake(value: 0, timescale: 0)
var lastAsset: AVURLAsset!
// 2 - Create two video tracks
for videoAssetss in firstAsset {
guard var firstTrack = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: (videoAssetss as? AVURLAsset)!.duration),
of: (videoAssetss as? AVURLAsset)!.tracks(withMediaType: .video)[0],
at: CMTime.zero)
var firstInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack)
guard let cor = photoFrameCordinate[i] as? CGRect else{return}
if videoAssetss as! AVURLAsset != firstAsset.last as! AVURLAsset{
firstInstruction.setOpacity(0, at: assetDuration) // asseteDuration
}
let transform = CGAffineTransform(scaleX: 0.4, y:1).concatenating(CGAffineTransform(translationX: trackInstruction[i-1]., y: -cor.origin.y))
firstInstruction.setTransform(transform, at: CMTime.zero)
assetDuration = CMTimeAdd(assetDuration, (videoAssetss as! AVURLAsset).duration)
lastAsset = videoAssetss as? AVURLAsset
trackInstruction.append(firstInstruction)
i += 1
// arrayOfComposition.append(firstTrack)
} catch {
print("Failed to load first track")
return
}
}
// Watermark Effect
let width: CGFloat = widthConstraintViewForImage.constant
let height = heightConstraintViewForImage.constant
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
//Mark: Frame layer
let bglayer = CALayer()
bglayer.contents = imgViewForAdminImage.image?.cgImage
bglayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
bglayer.backgroundColor = UIColor.clear.cgColor
for index in 0..<videoURLS.count{
var videoBox = CALayer()
guard let cor = photoFrameCordinate[index] as? CGRect else{return}
videoBox.frame = CGRect(x: cor.origin.x, y: parentlayer.frame.maxY-(cor.origin.y+cor.size.height), width: cor.size.width, height: cor.size.height)
videoBox.backgroundColor = UIColor.green.cgColor
videoBox.masksToBounds = true
var vlayer = CALayer()
vlayer.contentsScale = 1.0
vlayer.contentsGravity = CALayerContentsGravity.center
vlayer.frame = CGRect(x: 0, y: 0, width:cor.size.width, height: cor.size.height)
vlayer.backgroundColor = UIColor.yellow.cgColor
videolayer.append(vlayer)
videoBox.addSublayer(vlayer)
box.append(videoBox)
bglayer.addSublayer(videoBox)
}
parentlayer.addSublayer(bglayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayers: videolayer, in: parentlayer)
// 2.1
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetDuration)
mainInstruction.layerInstructions = trackInstruction
mainInstruction.backgroundColor = UIColor.red.cgColor
layercomposition.instructions = [mainInstruction]
// layercomposition.renderSize = CGSizeMake(videoSize.width * scale, videoSize.height * scale)
layercomposition.renderScale = 1.0
layercomposition.renderSize = CGSize(width: width, height: height)
// create new file to receive data
let dirPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let docsDir = dirPaths[0] as NSString
let movieFilePath = docsDir.appendingPathComponent("result.mp4")
let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName:AVAssetExportPresetMediumQuality)
assetExport?.outputFileType = AVFileType.mp4
assetExport?.videoComposition = layercomposition
// Check exist and remove old file
FileManager.default.removeItemIfExisted(movieDestinationUrl as URL)
assetExport?.outputURL = movieDestinationUrl as URL
assetExport?.exportAsynchronously(completionHandler: {
switch assetExport!.status {
case AVAssetExportSession.Status.failed:
print("failed")
print(assetExport?.error ?? "unknown error")
case AVAssetExportSession.Status.cancelled:
print("cancelled")
print(assetExport?.error ?? "unknown error")
default:
print("Movie complete")
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: movieDestinationUrl as URL)
}) { saved, error in
if saved {
print("Saved")
}else{
print(error!)
}
}
self.playVideo()`enter code here`
}
})
}
When I set the opacity of the first video to 0 then the second video is showing in all frames. I think all videos are coming but behind the first video that's why only first video is showing in the all frames.
I used
let videolayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: asset)
videolayerInstruction.setCropRectangle(CGRec(), at: Time())
I am trying to merge images and video clips together. I kept an option to add animation in between videos and images. There has few options like fade-in, fade-out, rotate, slide-up, slide-down, left, right etc. For images I am able to add animation, but how to add animation for video? Specifically when a video clip is completed and another video clip is going to start that time I want to add animations. Now my merging functionality is working well. Only to add the animation in between the videos.
I have tried with:
instruction.setOpacityRamp(fromStartOpacity: <#T##Float#>, toEndOpacity: <#T##Float#>, timeRange: <#T##CMTimeRange#>)
but this option only showing fade-in/ fade-out effect. But other custom animation options where to add those effect and how?
Here is me source code for merging. Many dependant functions are there in the code. But I have posted only merging functionality code. I have commented with //HERE TO ADD THE ANIMATION. So that you can directly reach that point where I am trying to add animations.
func merge(allAssets: [MovieAssetPresentable], isHDR: Bool, success: #escaping (URL?) -> (Void), progress: #escaping (CGFloat) -> (Void), failed: #escaping (String?) -> (Void)) {
cancelExport()
let defaultSize = isHDR ? self.videoOutputResolution.HD : self.videoOutputResolution.lowQuality
let videoPresetName = self.getPresetName(resolution: defaultSize)
self.mergeSuccess = success
self.mergeError = failed
self.mergeProgress = progress
let mixComposition = AVMutableComposition()
let mainInstruction = AVMutableVideoCompositionInstruction()
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
guard let urlVideoForBackground = Bundle.main.url(forResource: "black", withExtension: "mov") else {
self.mergeError("Need black background video !")
return
}
let assetForBackground = AVAsset(url: urlVideoForBackground)
let trackForBackground = assetForBackground.tracks(withMediaType: AVMediaType.video).first
//Set output size
var outputSize = CGSize.zero
for asset in allAssets.filter({$0.assetType! == .video}) {
guard let videoAsset = asset.asset else { continue }
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
let assetInfo = self.orientationFromTransform(videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = CGSize(width: defaultSize.width, height: ((videoSize.height / videoSize.width) * defaultSize.width))
}
}
if outputSize == CGSize.zero {
outputSize = defaultSize
}
debugPrint("OUTPUT SIZE: \(outputSize)")
let layerContentsGravity = VideoSettings.shared.fetchVideoFitClips()
var layerImages = [CALayer]()
var insertTime = CMTime.zero
var audioMixInputParameters = [AVMutableAudioMixInputParameters]()
// Init Video layer
let videoLayer = CALayer()
videoLayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
videoLayer.contentsGravity = layerContentsGravity
let parentlayer = CALayer()
parentlayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
parentlayer.addSublayer(videoLayer)
for asset in allAssets.filter({$0.assetType! == .image || $0.assetType! == .video}) {
//Video speed level
let videoSpeed = Double(asset.videoSpeedLevel!)
if asset.assetType! == .video {
//Video asset
let ast = asset.asset!
let duration = asset.endTime! - asset.beginTime! //ast.duration
//Create AVMutableCompositionTrack object
guard let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
self.mergeError("Unable to create track.")
continue
}
//Add original video sound track
let originalSoundTrack: AVMutableCompositionTrack?
if asset.asset!.tracks(withMediaType: .audio).count > 0 {
originalSoundTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try originalSoundTrack?.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), of: ast.tracks(withMediaType: AVMediaType.audio)[0], at: insertTime)
} catch {
self.mergeError("Unable to create original audio track.")
continue
}
//Set video original sound track speed
originalSoundTrack?.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
let audioInputParams = AVMutableAudioMixInputParameters(track: originalSoundTrack)
audioInputParams.setVolume(asset.videoOriginalVolume!, at: CMTime.zero)
audioInputParams.trackID = originalSoundTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
}
//Set time range
do {
try track.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration),
of: ast.tracks(withMediaType: AVMediaType.video)[0],
at: insertTime)
} catch let err {
self.mergeError("Failed to load track: \(err.localizedDescription)")
continue
}
//Set video speed
track.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
insertTime = CMTimeAdd(insertTime, duration)
let instruction = self.videoCompositionInstruction(track, asset: ast, outputSize: outputSize)
// let instruction = videoCompositionInstructionForTrack(track: t, asset: ast, standardSize: outputSize, atTime: insertTime)
instruction.setOpacity(0.0, at: insertTime)
//HERE TO ADD THE ANIMATION
layerInstructions.append(instruction)
} else {
//Image data
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let defaultImageTime = CMTimeGetSeconds(asset.endTime!) - CMTimeGetSeconds(asset.beginTime!)
let duration = CMTime.init(seconds:defaultImageTime, preferredTimescale: assetForBackground.duration.timescale)
do {
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: duration),
of: trackForBackground!,
at: insertTime)
}
catch {
self.mergeError("Background time range error")
}
guard let image = UIImage(data: asset.imageData!) else { continue }
// Create Image layer
let imageLayer = CALayer()
imageLayer.frame = CGRect.init(origin: CGPoint.zero, size: outputSize)
imageLayer.contents = image.cgImage
imageLayer.opacity = 0
imageLayer.contentsGravity = layerContentsGravity
self.setOrientation(image: image, onLayer: imageLayer)
// Add Fade in & Fade out animation
let fadeInAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeInAnimation.duration = 1
fadeInAnimation.fromValue = NSNumber(value: 0)
fadeInAnimation.toValue = NSNumber(value: 1)
fadeInAnimation.isRemovedOnCompletion = false
fadeInAnimation.beginTime = CMTimeGetSeconds(insertTime) == 0 ? 0.05: CMTimeGetSeconds(insertTime)
fadeInAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeInAnimation, forKey: "opacityIN")
let fadeOutAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeOutAnimation.duration = 1
fadeOutAnimation.fromValue = NSNumber(value: 1)
fadeOutAnimation.toValue = NSNumber(value: 0)
fadeOutAnimation.isRemovedOnCompletion = false
fadeOutAnimation.beginTime = CMTimeGetSeconds(CMTimeAdd(insertTime, duration))
fadeOutAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeOutAnimation, forKey: "opacityOUT")
layerImages.append(imageLayer)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
}
// Add Image layers
for layer in layerImages {
parentlayer.addSublayer(layer)
}
//Add Water mark if Subscription not activated
if !AddManager.shared.hasActiveSubscription {
let imglogo = UIImage(named: "watermark")
let waterMarklayer = CALayer()
waterMarklayer.contents = imglogo?.cgImage
let sizeOfWaterMark = Utility.getWaterMarkSizeWithVideoSize(videoSize: outputSize, defaultSize: waterMarkSize)
debugPrint("sizeOfWaterMark=\(sizeOfWaterMark)")
waterMarklayer.frame = CGRect(x: outputSize.width - (sizeOfWaterMark.width+10), y: 5, width: sizeOfWaterMark.width, height: sizeOfWaterMark.height)
waterMarklayer.contentsGravity = .resizeAspect
waterMarklayer.opacity = 1.0
parentlayer.addSublayer(waterMarklayer)
}
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: insertTime)
mainInstruction.layerInstructions = layerInstructions
mainInstruction.backgroundColor = VideoSettings.shared.fetchVideoBackgroundColor().color.cgColor
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
mainComposition.renderScale = 1.0
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentlayer)
for audioAsset in allAssets.filter({$0.assetType! == .audio}) {
//NOTE: If you have requirement to increase/ decrease audio fade-in fade-out effect time, please modify fadeInFadeOutEffectTiming variable as second
let fadeInFadeOutEffectTiming = Double(3) //seconds
let volumeLevel = audioAsset.audioVolumeLevel!
let isFadeIn = audioAsset.audioFadeInEffect!
let isFadeOut = audioAsset.audioFadeOutEffect!
var audioBeginTime = audioAsset.beginTime!
var audioEndTime = audioAsset.endTime!
var audioTrackTime = audioAsset.audioTrackStartTime!
var trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
//If audio starting position (second) is greater than equals to zero (in order to video length)
if CMTimeGetSeconds(CMTimeAdd(audioTrackTime, audioBeginTime)) >= 0 {
//If audio starting position (second) more than video length, i.e. total video length is 20 second, but audio starting position is from 24 seconds, we sould not add the audio
if CMTimeCompare(CMTimeAdd(audioTrackTime, audioBeginTime), insertTime) == 1 {
trimmedAudioDuration = CMTime.zero
} else {
//If audio start position (seconds) + crop length is exceed total video length, we should add only the part within the video
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), insertTime) == 1 {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
trimmedAudioDuration = CMTimeSubtract(insertTime, audioTrackTime)
} else {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
}
}
}
//If audio start time is in negative (second)
else {
//If audio crop length is in negative (second)
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), CMTime.zero) == -1 {
trimmedAudioDuration = CMTime.zero
} else {
audioBeginTime = CMTime(seconds: abs(CMTimeGetSeconds(audioTrackTime)), preferredTimescale: audioTrackTime.timescale)
audioTrackTime = CMTime.zero
trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
if CMTimeCompare(trimmedAudioDuration, insertTime) == 1 {
trimmedAudioDuration = insertTime
}
}
}
if trimmedAudioDuration != CMTime.zero {
audioEndTime = CMTimeAdd(audioTrackTime, trimmedAudioDuration)
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try audioTrack?.insertTimeRange(CMTimeRangeMake(start: audioBeginTime , duration: trimmedAudioDuration),
of: audioAsset.asset!.tracks(withMediaType: AVMediaType.audio)[0] ,
at: audioTrackTime)
let audioInputParams = AVMutableAudioMixInputParameters(track: audioTrack)
var effectTime = CMTime(seconds: fadeInFadeOutEffectTiming, preferredTimescale: 600)
if CMTimeCompare(trimmedAudioDuration, CMTimeMultiply(effectTime, multiplier: 2)) == -1 {
effectTime = CMTime(seconds: CMTimeGetSeconds(trimmedAudioDuration) / 2, preferredTimescale: 600)
}
//Fade in effect
audioInputParams.setVolumeRamp(fromStartVolume: isFadeIn ? 0 : volumeLevel, toEndVolume: volumeLevel, timeRange: CMTimeRange(start: audioTrackTime, duration: effectTime))
//Fade out effect
audioInputParams.setVolumeRamp(fromStartVolume: volumeLevel, toEndVolume: isFadeOut ? 0 : volumeLevel, timeRange: CMTimeRange(start: CMTimeSubtract(audioEndTime, effectTime), duration: effectTime))
audioInputParams.trackID = audioTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
} catch {
print("Failed to load Audio track")
}
}
}
// 4 - Get path
guard let url = Utility.createFileAtDocumentDirectory(name: "mergeVideo-\(Date().timeIntervalSince1970).mp4") else {
debugPrint("Unable to file at document directory")
return
}
// 5 - Create Exporter
self.exporter = AVAssetExportSession(asset: mixComposition, presetName: videoPresetName)
guard let exp = self.exporter else {
debugPrint("Unable to export.")
return
}
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = audioMixInputParameters
exp.outputURL = url
exp.outputFileType = AVFileType.mp4
exp.shouldOptimizeForNetworkUse = true
exp.videoComposition = mainComposition
exp.audioMix = audioMix
//self.viewPieProgress.setProgress(0.0, animated: false)
//viewPieProgress.isHidden = isHDR
//timer for progress
self.timer = Timer.scheduledTimer(timeInterval: 1.0, target: self, selector: #selector(self.updateExportingProgress(timer:)), userInfo: exp, repeats: true)
// 6 - Perform the Export
exp.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exp)
}
}
}
I have tried many options but nothing meets my requirement. Please help me out.
If you required any other information from me, please feel free to comment on this post.
Thanks in advance.
I am trying to make a video editor in which I have done the following:
1)Collected photos from user gallery.
2)Converted that array of photos in videos .
3)Added animations to videos.
4)Played Videos
The code i have done for that are as follows:-
1)Collected photos from user gallery.
func openImagePicker(){
let customColor = UIColor.init(red: 64.0/255.0, green: 0.0, blue: 144.0/255.0, alpha: 1.0)
let customCameraColor = UIColor.init(red: 86.0/255.0, green: 1.0/255.0, blue: 236.0/255.0, alpha: 1.0)
pickerViewController.numberOfPhotoToSelect = 5; pickerViewController.theme.titleLabelTextColor = UIColor.white
pickerViewController.theme.navigationBarBackgroundColor = customColor
pickerViewController.theme.tintColor = UIColor.white
pickerViewController.theme.orderTintColor = customCameraColor
pickerViewController.theme.cameraVeilColor = customCameraColor
pickerViewController.theme.cameraIconColor = UIColor.white
pickerViewController.theme.statusBarStyle = .lightContent
self.yms_presentCustomAlbumPhotoView(pickerViewController, delegate: self)
}
func photoPickerViewController(_ picker: YMSPhotoPickerViewController!, didFinishPickingImages photoAssets: [PHAsset]!) {
picker.dismiss(animated: true) {
self.selectedImageArray = NSMutableArray()
let imageManager = PHImageManager.init()
let options = PHImageRequestOptions.init()
options.deliveryMode = .highQualityFormat
options.resizeMode = .exact
options.isSynchronous = true
for asset: PHAsset in photoAssets
{
let targetSize = CGSize(width:self.view.frame.size.width
, height:self.view.frame.size.width)
imageManager.requestImage(for: asset, targetSize:targetSize, contentMode: .aspectFill, options: options, resultHandler: { (image, info) in
self.selectedImageArray.add(image!)
})
}
let imageVideaMakerController = self.storyboard?.instantiateViewController(withIdentifier: "VideoEditorController") as! VideoEditorController
imageVideaMakerController.selectedImageArray = self.selectedImageArray as! [UIImage]
self.navigationController!.pushViewController(imageVideaMakerController, animated: true)
}
}
2)Converted that array of photos in videos .
override func viewDidAppear(_ animated: Bool) {
self.navigationController?.navigationBar.isHidden = false
setUpInitialView()
collectionView.reloadData()
}
//MARK:- Custom Methods
func setUpInitialView(){
let loadingNotification = MBProgressHUD.showAdded(to: view, animated: true)
loadingNotification.mode = MBProgressHUDMode.indeterminate
loadingNotification.label.text = "Loading"
buildVideoFromImageArray()
//filterScrollContents()
}
func buildVideoFromImageArray() {
imageArrayToVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video1.MP4")
removeFileAtURLIfExists(url: imageArrayToVideoURL)
guard let videoWriter = try? AVAssetWriter(outputURL: imageArrayToVideoURL as URL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
let zeroTime = CMTimeMake(Int64(imagesPerSecond),Int32(1))
videoWriter.startSession(atSourceTime: zeroTime)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
let fps: Int32 = 1
let framePerSecond: Int64 = Int64(self.imagesPerSecond)
let frameDuration = CMTimeMake(Int64(self.imagesPerSecond), fps)
var frameCount: Int64 = 0
var appendSucceeded = true
var newImageArr = self.selectedImageArray
while (!newImageArr.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = newImageArr.remove(at: 0)
let lastFrameTime = CMTimeMake(frameCount * framePerSecond, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(self.outputSize.width), height: Int(self.outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: CGFloat(self.outputSize.width), height: CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
//let aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("-----video1 url = \(self.imageArrayToVideoURL)")
self.globalVideoURL = self.imageArrayToVideoURL
self.asset = AVAsset.init(url:self.imageArrayToVideoURL as URL)
self.exportVideoWithAnimation()
}
})
}
}
3)Added animations to videos.
func exportVideoWithAnimation() {
let composition = AVMutableComposition()
let track = self.asset?.tracks(withMediaType: AVMediaType.video)
let videoTrack:AVAssetTrack = track![0] as AVAssetTrack
let timerange = CMTimeRangeMake(kCMTimeZero, (self.asset?.duration)!)
let compositionVideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID())!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
//if your video has sound, you don’t need to check this
if self.audioIsEnabled {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!
for audioTrack in (self.asset?.tracks(withMediaType: AVMediaType.audio))! {
do {
try compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
} catch {
print(error)
}
}
}
let size = videoTrack.naturalSize
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentlayer.addSublayer(videolayer)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//this is the animation part
var time = [0.00001, 3, 6, 9, 12] //I used this time array to determine the start time of a frame animation. Each frame will stay for 3 secs, thats why their difference is 3
var imgarray = self.selectedImageArray
for image in 0..<self.selectedImageArray.count {
let nextPhoto = imgarray[image]
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio)
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
let blackLayer = CALayer()
///#7. opacity(1->0)(top->bottom)///
//#3. top->bottom///
//MARK:- Animations==================================
///#1. left->right///
if(self.globalSelectedTransitionTag == 0){
blackLayer.frame = CGRect(x: -videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
blackLayer.backgroundColor = UIColor.black.cgColor
let imageLayer = CALayer()
imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
imageLayer.contents = imgarray[image].cgImage
blackLayer.addSublayer(imageLayer)
let animation = CABasicAnimation()
animation.keyPath = "position.x"
animation.fromValue = -videoTrack.naturalSize.width
animation.toValue = 5 * (videoTrack.naturalSize.width)
animation.duration = 5
animation.beginTime = CFTimeInterval(time[image])
animation.fillMode = kCAFillModeForwards
animation.isRemovedOnCompletion = false
blackLayer.add(animation, forKey: "opacity")
}
parentlayer.addSublayer(blackLayer)
}
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
if(fromTransition){
self.globalrVideoComposition = layercomposition
}
let animatedVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video2.mp4")
self.removeFileAtURLIfExists(url: animatedVideoURL)
guard let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = self.globalrVideoComposition
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = animatedVideoURL as URL
print("****** animatedVideoURL *****",animatedVideoURL)
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(String(describing: assetExport.error))")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(String(describing: assetExport.error))")
default:
print("Exported")
if(self.fromPlayVideo){
DispatchQueue.main.async {
self.globalVideoURL = animatedVideoURL; self.playVideoInPlayer(animatedVideoURL: animatedVideoURL as URL)
}
}else if(self.fromSave){
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: animatedVideoURL as URL)
print("222222 animatedVideoURL",animatedVideoURL)
}) { saved, error in
DispatchQueue.main.async {
MBProgressHUD.hideAllHUDs(for: self.view, animated: true)
}
if saved {
let alertController = UIAlertController(title: "Your video was successfully saved", message: nil, preferredStyle: .alert)
let defaultAction = UIAlertAction(title: "OK", style: .default, handler: nil)
alertController.addAction(defaultAction)
print("The task is done,enjoy now!")
self.present(alertController, animated: true, completion: nil)
}else{
}
}
}
}
})
}
4)Played Videos
func playVideoInPlayer(animatedVideoURL:URL){
if(globalFilterName != nil){
self.asset = AVAsset.init(url:animatedVideoURL as URL)
let newPlayerItem = AVPlayerItem.init(asset:self.asset);
newPlayerItem.videoComposition=globalrVideoComposition
self.player = AVPlayer.init(playerItem:newPlayerItem)
}else{
let newPlayerItem = AVPlayerItem.init(url:animatedVideoURL)
self.player = AVPlayer.init(playerItem:newPlayerItem)
}
NotificationCenter.default.addObserver(self, selector: #selector(self.finishedPlaying(_:)), name: NSNotification.Name.AVPlayerItemDidPlayToEndTime, object:nil)
self.playerLayer = AVPlayerLayer.init(player:self.player)
let width: CGFloat = self.videoContainerView.frame.size.width
let height: CGFloat = self.videoContainerView.frame.size.height
self.playerLayer.frame = CGRect(x: 0.0, y:0, width: width, height: height)
self.playerLayer.backgroundColor = UIColor.black.cgColor
self.playerLayer.videoGravity = .resizeAspectFill
self.videoContainerView.layer.addSublayer( self.playerLayer)
self.playPauseBtn.isHidden = false
self.playPauseBtn.setImage(UIImage.init(named:"pause"), for:.normal)
DispatchQueue.main.async {
MBProgressHUD.hideAllHUDs(for:self.view, animated:true)
self.player.play()
}
}
This whole task is woking fine the only problem is ,it is taking a lot of time to play the video with all setups(conversion image to video and adding animations).
Please help me to reduce the time so that the user has to not wait for long time when they land to play video after collecting images from imagepicker.
Any help or guidance would be highly appreciated.Thanks in advance!
I can merge two videos, but when I see the final result the duration of the video is correct but it only plays the first video and for the duration of the second video remains a static image.
For example:
two videos of 6 seconds each makes a video of 12 seconds, i can see it correctly until 6 seconds, later it blocks the images
func mergeVideos(videoMergedUrl:URL) {
let mainComposition = AVMutableVideoComposition()
var startDuration:CMTime = kCMTimeZero
let mainInstruction = AVMutableVideoCompositionInstruction()
let mixComposition = AVMutableComposition()
var allVideoInstruction = [AVMutableVideoCompositionLayerInstruction]()
for i:Int in 0 ..< listSegment.count {
let currentAsset = listSegment[i]
let currentTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
try currentTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, currentAsset.duration), of: currentAsset.tracks(withMediaType: AVMediaType.video)[0], at: startDuration)
let currentInstruction:AVMutableVideoCompositionLayerInstruction = videoCompositionInstructionForTrack(currentTrack!, asset: currentAsset)
//currentInstruction.setOpacityRamp(fromStartOpacity: 0.0, toEndOpacity: 1.0, timeRange:CMTimeRangeMake(startDuration, CMTimeMake(1, 1)))
/*if i != assets.count - 1 {
//Sets Fade out effect at the end of the video.
currentInstruction.setOpacityRamp(fromStartOpacity: 1.0,
toEndOpacity: 0.0,
timeRange:CMTimeRangeMake(
CMTimeSubtract(
CMTimeAdd(currentAsset.duration, startDuration),
CMTimeMake(1, 1)),
CMTimeMake(2, 1)))
}*/
/*let transform:CGAffineTransform = currentTrack!.preferredTransform
if orientationFromTransform(transform).isPortrait {
let outputSize:CGSize = CGSize(width: 640, height: 480)
let horizontalRatio = CGFloat(outputSize.width) / (currentTrack?.naturalSize.width)!
let verticalRatio = CGFloat(outputSize.height) / (currentTrack?.naturalSize.height)!
let scaleToFitRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let FirstAssetScaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
if currentAsset.g_orientation == .landscapeLeft {
let rotation = CGAffineTransform(rotationAngle: .pi)
let translateToCenter = CGAffineTransform(translationX: 640, y: 480)
let mixedTransform = rotation.concatenating(translateToCenter)
currentInstruction.setTransform((currentTrack?.preferredTransform.concatenating(FirstAssetScaleFactor).concatenating(mixedTransform))!, at: kCMTimeZero)
} else {
currentInstruction.setTransform((currentTrack?.preferredTransform.concatenating(FirstAssetScaleFactor))!, at: kCMTimeZero)
}
}*/
allVideoInstruction.append(currentInstruction) //Add video instruction in Instructions Array.
startDuration = CMTimeAdd(startDuration, currentAsset.duration)
} catch _ {
print("ERROR_LOADING_VIDEO")
}
}
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, startDuration)
mainInstruction.layerInstructions = allVideoInstruction
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = CGSize(width: 640, height: 480)
let manager = FileManager.default
_ = try? manager.removeItem(at: videoMergedUrl)
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset640x480) else { return }
exporter.outputURL = videoMergedUrl
exporter.outputFileType = AVFileType.mp4
exporter.shouldOptimizeForNetworkUse = false
exporter.videoComposition = mainComposition
// Perform the Export
exporter.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exporter)
}
}
}
I had the same problem after following this tutorial. I fixed it by adding clips to the composition using AVMutableComposition.insertTimeRange instead of addMutableTrack.