let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let assetTrack = asset.tracks(withMediaType: .video).first
else {
print("Something is wrong with the asset.")
onComplete(nil)
return
}
do {
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(
withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid) {
try compositionAudioTrack.insertTimeRange(
timeRange,
of: audioAssetTrack,
at: .zero)
}
} catch {
//print(error)
onComplete(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoInfo = orientation(from: assetTrack.preferredTransform)
let videoSize: CGSize
if videoInfo.isPortrait {
videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width)
} else {
videoSize = assetTrack.naturalSize
}
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let textLayer = CATextLayer()
textLayer.string = mainTextview.text
textLayer.shouldRasterize = true
textLayer.rasterizationScale = UIScreen.main.scale
//textLayer.backgroundColor = UIColor.black.cgColor
switch textAlignment {
case .center:
textLayer.alignmentMode = .center
case .justified:
textLayer.alignmentMode = .justified
case .left:
textLayer.alignmentMode = .left
case .right:
textLayer.alignmentMode = .right
default:
textLayer.alignmentMode = .center
}
textLayer.isWrapped = true
textLayer.foregroundColor = mainTextview.textColor?.cgColor
textLayer.fontSize = 70
switch textWeight {
case .bold:
textLayer.font = UIFont.systemFont(ofSize: 0, weight: .bold)
case .light:
textLayer.font = UIFont.systemFont(ofSize: 0, weight: .light)
case .regular:
textLayer.font = UIFont.systemFont(ofSize: 0, weight: .regular)
case .medium:
textLayer.font = UIFont.systemFont(ofSize: 0, weight: .medium)
}
switch textviewPosition {
case .Center:
textLayer.frame = CGRect(x: 0, y: videoSize.height * 0.30, width: videoSize.width, height: 500)
case .Top:
textLayer.frame = CGRect(x: 0, y: videoSize.height * 0.60, width: videoSize.width, height: 500)
case .Bottom:
textLayer.frame = CGRect(x: 10, y: 0, width: videoSize.width, height: 500)
}
textLayer.displayIfNeeded()
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(textLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(
start: .zero,
duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality)
else {
//print("Cannot create export session.")
onComplete(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension(".mp4")
export.videoComposition = videoComposition
export.outputFileType = .mp4
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
onComplete(exportURL)
default:
onComplete(nil)
break
}
}
}
}
This is the function I use to overlay the text on video (videoURL), I pass in the url for the video which is saved in the document directory and use a text view already created for the user to retrieve the text, weight, font and alignment for the overlay thanks. Majority of time this process takes 15 seconds which is a long time for users to wait.
Exporting video is labor-intensive and takes significant time, and there's nothing you can do about that.
However, the process is asynchronous so there is no need to make the user "wait". Also, the exporter vends a Progress object, so you can easily show progress as a psychological trick (it is amazing how time flies when you're watching a progress indicator).
Thanks for your response under where I initiated the AVAssetExportSession I added a timer:
self.exportTimer = Timer.scheduledTimer(timeInterval: 0.1, target: self, selector: #selector(displayData), userInfo: nil, repeats: true)
Now I can correctly update the display using this method:
#objc private func displayData() {
self.progressBar.progress = exportSession.progress;
if self.progressBar.progress > .99) {
self.exportTimer.invalidate()
}
}
I also added Invalidate() to the export.status switch statement for when there is an error.
Related
I have been trying to get AVAssetExportSession to save a video with animations overlaid, but am running into a problem where the exported video has a runtime of 0:00.
As I understand it, the AVAssetExportSession APIs need a base video, which can be a black video that runs for a few seconds. You can then write over and cover up as much of that base video as you'd like using CALayers. If the animations run over the runtime of the base video, the exported video will extend itself to contain the runtime of the animations.
The base video is 5 sec long, but still yet the exported video is 0:00. Interestingly the exported video does contain the black background from the source video and the very first frame of the animation (the layers).
Has anyone run into this before and know of a good solution/what I'm missing?
Code for context...
#objc func saveMovie() {
print("save movie")
self.selectedFrame = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
self.selectedBounds = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
let mainLayer = CALayer()
mainLayer.frame = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
let videoLayer = CALayer()
videoLayer.frame = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
let animationLayer = CALayer()
animationLayer.frame = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
animationLayer.addSublayer(makeBackground())
animationLayer.addSublayer(makeHeadingTextLayer())
mainLayer.addSublayer(videoLayer)
mainLayer.addSublayer(animationLayer)
if let sourceVideoUrl = Bundle.main.url(
forResource: "SourceVideo",
withExtension: "mp4"
) {
// Load Video Asset to Use As Base
print(sourceVideoUrl.absoluteString)
let baseVideoAsset = AVURLAsset(url: sourceVideoUrl)
// Create Composition for the video to live in
let composition = AVMutableComposition()
composition.naturalSize = CGSize(width: 1080.0, height: 1920.0)
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: AVMediaType.video,
preferredTrackID: kCMPersistentTrackID_Invalid
),
let assetTrack = baseVideoAsset.tracks(
withMediaType: .video
).first
else {
print("something is wrong with the asset")
return
}
do {
// this crashes, so just hard coding 5 seconds right now
//let baseVideoDuration = try await baseVideoAsset.load(.duration)
//print("\(baseVideoDuration)")
let timeRange = CMTimeRangeMake(
start: .zero,
duration: CMTime(value: 5, timescale: 30)
)
try compositionTrack.insertTimeRange(
timeRange,
of: assetTrack,
at: .zero
)
} catch {
print("issue with video track insert time range")
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoSize = CGSize(width: 1080.0, height: 1920.0)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: mainLayer
)
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(
start: CMTime.zero,
duration: CMTimeMake(value: 10, timescale: 30)
)
videoComposition.instructions = [videoCompositionInstruction]
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: assetTrack)
let transform = assetTrack.preferredTransform
layerInstruction.setTransform(transform, at: .zero)
videoCompositionInstruction.layerInstructions = [layerInstruction]
guard let exporter = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPreset1920x1080
) else {
print("failed to create exporter")
return
}
let videoName = UUID().uuidString
let exportUrl = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension("mov")
exporter.videoComposition = videoComposition
exporter.outputFileType = .mov
exporter.outputURL = exportUrl
exporter.timeRange = CMTimeRangeMake(start: .zero, duration: CMTimeMake(value: 10, timescale: 30))
NSLog("Composition Duration: %ld seconds", lround(CMTimeGetSeconds(composition.duration)));
exporter.exportAsynchronously {
DispatchQueue.main.async {
switch exporter.status {
case .failed:
print("failed to export")
print(exporter.error ?? "no error")
case .cancelled:
print("canceled")
case .completed:
print("completed")
UISaveVideoAtPathToSavedPhotosAlbum(
exportUrl.relativePath,
self,
nil,
nil
)
case .unknown:
print("unknown status")
default:
break
}
}
}
}
}
I am recording 3 videos and merging them into one. I need text on all the videos. am doing it but no success. the video goes black and the audio comes fine. When I do it without CALayer everything is good Need Help.
i there something wring with the code or I am doing it the wrong way please guide me.
private func doMerge(arrayVideos:[AVAsset], animation:Bool, completion:#escaping Completion) -> Void {
var insertTime = CMTime.zero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayVideos {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
}
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
// Silence sound (in case of video has no sound track)
// let silenceURL = Bundle.main.url(forResource: "silence", withExtension: "mp3")
// let silenceAsset = AVAsset(url:silenceURL!)
// let silenceSoundTrack = silenceAsset.tracks(withMediaType: AVMediaType.audio).first
// Init composition
let mixComposition = AVMutableComposition.init()
for videoAsset in arrayVideos {
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
// Get audio track
var audioTrack:AVAssetTrack?
if videoAsset.tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first
}
else {
// audioTrack = silenceSoundTrack
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = CMTime.zero
let duration = videoAsset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = videoAsset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
// Watermark Effect
let size = videoTrack.naturalSize
// create text Layer
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.contentsScale = UIScreen.main.scale
titleLayer.string = "Dummy text"
titleLayer.foregroundColor = UIColor.white.cgColor
titleLayer.font = UIFont(name: "Helvetica", size: 28)
titleLayer.shadowOpacity = 0.5
titleLayer.alignmentMode = CATextLayerAlignmentMode.center
titleLayer.frame = CGRect(x: 0, y: 50, width: size.width, height: size.height)
let videolayer = CALayer()
videolayer.backgroundColor = UIColor.clear.cgColor
// videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
// let layercomposition = AVMutableVideoComposition()
// layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
// layercomposition.renderSize = size
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: titleLayer)
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
// mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
}
})
}
Just Change This Part
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
// let size = videoTrack.naturalSize
// create text Layer
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.contentsScale = UIScreen.main.scale
titleLayer.string = questions[counter]
counter = counter + 1
titleLayer.foregroundColor = UIColor.black.cgColor
titleLayer.font = UIFont(name: "Helvetica", size: 28)
titleLayer.shadowOpacity = 0.5
titleLayer.alignmentMode = CATextLayerAlignmentMode.center
titleLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let videolayer = CALayer()
videolayer.backgroundColor = UIColor.clear.cgColor
videolayer.backgroundColor = UIColor.red.cgColor
videolayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(titleLayer)
// let layercomposition = AVMutableVideoComposition()
// layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
// layercomposition.renderSize = size
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
I am attempting to make a new video using an image that will always be size: CGSize(375, 667), but with a video that will be different sizes, and with the contentMode of .`aspectFit'. The problem is that I cannot figure out how to make the whole video composition the correct size (i.e. the image size), and instead it is the videos natural size with a bunch of weird outcomes. (edit note: the video should be centered in the view like a normal aspectFit would do for a UIImageView for example..)
here is an example of what i am trying to achieve... note that I already have the image and the video, all i need to do is make the new video with them. And this is how it should look like (in the image):
desired result image here --
Here is the code I am attempting currently, with a placeholder image of "background" (a random 375, 667 image in Assets..): I think I may be doing the stuff around the comment "important stuff" improperly... but i cannot figure it out currently :/
func makeVideo(fromVideoAt videoURL: URL, forName name: String, onComplete: #escaping (URL?) -> Void) {
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let assetTrack = asset.tracks(withMediaType: .video).first
else {
print("Something is wrong with the asset.")
onComplete(nil)
return
}
do {
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
} catch {
print(error)
onComplete(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoInfo = orientation(from: assetTrack.preferredTransform)
//Important stuff potentially? general below:
let videoSize: CGSize
if videoInfo.isPortrait {
videoSize = CGSize(width: 720, height: 1280)
} else {
videoSize = CGSize(width: 720, height: 1280) //720.0, 1280 tiktok default..?
}
//the Background image:
let backgroundLayer = CALayer()
backgroundLayer.frame = CGRect(origin: .zero, size: videoSize) //videosize
backgroundLayer.contents = UIImage(named: "background")?.cgImage
backgroundLayer.contentsGravity = .resizeAspectFill
backgroundLayer.backgroundColor = UIColor.red.cgColor
//Video layer:
let videoLayer = CALayer()
// videoLayer.frame = CGRect(origin: .zero, size: CGSize(width: composition.naturalSize.width, height: composition.naturalSize.height)) //videosize
videoLayer.backgroundColor = UIColor.yellow.cgColor
print(composition.naturalSize, "<-- composition.naturalSize")
videoLayer.frame = CGRect(origin: .zero, size: CGSize(width: videoSize.width, height: composition.naturalSize.height))//CGRect(x: 0, y: 0, width: videoSize.width, height: composition.naturalSize.height)
//OutPutlayer putting the together?
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: CGSize(width: 720, height: 1280)) //videosize
outputLayer.backgroundColor = UIColor.white.cgColor
outputLayer.addSublayer(backgroundLayer)
outputLayer.addSublayer(videoLayer)
// outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: outputLayer)
//Setting Up Instructions
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(for: compositionTrack, assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
//EXPORTING
guard let export = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality) else {
print("Cannot create export session.")
onComplete(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent(videoName).appendingPathExtension("mp4")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
onComplete(exportURL)
default:
print("Something went wrong during export.")
print(export.error ?? "unknown error")
onComplete(nil)
break
}
}
}
}
Try to use this code https://github.com/vabe1337/VBVideoEditor. It render video like TikTok, Instagram.
I am trying to make a video editor in which I have done the following:
1)Collected photos from user gallery.
2)Converted that array of photos in videos .
3)Added animations to videos.
4)Played Videos
The code i have done for that are as follows:-
1)Collected photos from user gallery.
func openImagePicker(){
let customColor = UIColor.init(red: 64.0/255.0, green: 0.0, blue: 144.0/255.0, alpha: 1.0)
let customCameraColor = UIColor.init(red: 86.0/255.0, green: 1.0/255.0, blue: 236.0/255.0, alpha: 1.0)
pickerViewController.numberOfPhotoToSelect = 5; pickerViewController.theme.titleLabelTextColor = UIColor.white
pickerViewController.theme.navigationBarBackgroundColor = customColor
pickerViewController.theme.tintColor = UIColor.white
pickerViewController.theme.orderTintColor = customCameraColor
pickerViewController.theme.cameraVeilColor = customCameraColor
pickerViewController.theme.cameraIconColor = UIColor.white
pickerViewController.theme.statusBarStyle = .lightContent
self.yms_presentCustomAlbumPhotoView(pickerViewController, delegate: self)
}
func photoPickerViewController(_ picker: YMSPhotoPickerViewController!, didFinishPickingImages photoAssets: [PHAsset]!) {
picker.dismiss(animated: true) {
self.selectedImageArray = NSMutableArray()
let imageManager = PHImageManager.init()
let options = PHImageRequestOptions.init()
options.deliveryMode = .highQualityFormat
options.resizeMode = .exact
options.isSynchronous = true
for asset: PHAsset in photoAssets
{
let targetSize = CGSize(width:self.view.frame.size.width
, height:self.view.frame.size.width)
imageManager.requestImage(for: asset, targetSize:targetSize, contentMode: .aspectFill, options: options, resultHandler: { (image, info) in
self.selectedImageArray.add(image!)
})
}
let imageVideaMakerController = self.storyboard?.instantiateViewController(withIdentifier: "VideoEditorController") as! VideoEditorController
imageVideaMakerController.selectedImageArray = self.selectedImageArray as! [UIImage]
self.navigationController!.pushViewController(imageVideaMakerController, animated: true)
}
}
2)Converted that array of photos in videos .
override func viewDidAppear(_ animated: Bool) {
self.navigationController?.navigationBar.isHidden = false
setUpInitialView()
collectionView.reloadData()
}
//MARK:- Custom Methods
func setUpInitialView(){
let loadingNotification = MBProgressHUD.showAdded(to: view, animated: true)
loadingNotification.mode = MBProgressHUDMode.indeterminate
loadingNotification.label.text = "Loading"
buildVideoFromImageArray()
//filterScrollContents()
}
func buildVideoFromImageArray() {
imageArrayToVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video1.MP4")
removeFileAtURLIfExists(url: imageArrayToVideoURL)
guard let videoWriter = try? AVAssetWriter(outputURL: imageArrayToVideoURL as URL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
let zeroTime = CMTimeMake(Int64(imagesPerSecond),Int32(1))
videoWriter.startSession(atSourceTime: zeroTime)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
let fps: Int32 = 1
let framePerSecond: Int64 = Int64(self.imagesPerSecond)
let frameDuration = CMTimeMake(Int64(self.imagesPerSecond), fps)
var frameCount: Int64 = 0
var appendSucceeded = true
var newImageArr = self.selectedImageArray
while (!newImageArr.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = newImageArr.remove(at: 0)
let lastFrameTime = CMTimeMake(frameCount * framePerSecond, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(self.outputSize.width), height: Int(self.outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: CGFloat(self.outputSize.width), height: CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
//let aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("-----video1 url = \(self.imageArrayToVideoURL)")
self.globalVideoURL = self.imageArrayToVideoURL
self.asset = AVAsset.init(url:self.imageArrayToVideoURL as URL)
self.exportVideoWithAnimation()
}
})
}
}
3)Added animations to videos.
func exportVideoWithAnimation() {
let composition = AVMutableComposition()
let track = self.asset?.tracks(withMediaType: AVMediaType.video)
let videoTrack:AVAssetTrack = track![0] as AVAssetTrack
let timerange = CMTimeRangeMake(kCMTimeZero, (self.asset?.duration)!)
let compositionVideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID())!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
//if your video has sound, you don’t need to check this
if self.audioIsEnabled {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!
for audioTrack in (self.asset?.tracks(withMediaType: AVMediaType.audio))! {
do {
try compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
} catch {
print(error)
}
}
}
let size = videoTrack.naturalSize
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentlayer.addSublayer(videolayer)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//this is the animation part
var time = [0.00001, 3, 6, 9, 12] //I used this time array to determine the start time of a frame animation. Each frame will stay for 3 secs, thats why their difference is 3
var imgarray = self.selectedImageArray
for image in 0..<self.selectedImageArray.count {
let nextPhoto = imgarray[image]
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio)
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
let blackLayer = CALayer()
///#7. opacity(1->0)(top->bottom)///
//#3. top->bottom///
//MARK:- Animations==================================
///#1. left->right///
if(self.globalSelectedTransitionTag == 0){
blackLayer.frame = CGRect(x: -videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
blackLayer.backgroundColor = UIColor.black.cgColor
let imageLayer = CALayer()
imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
imageLayer.contents = imgarray[image].cgImage
blackLayer.addSublayer(imageLayer)
let animation = CABasicAnimation()
animation.keyPath = "position.x"
animation.fromValue = -videoTrack.naturalSize.width
animation.toValue = 5 * (videoTrack.naturalSize.width)
animation.duration = 5
animation.beginTime = CFTimeInterval(time[image])
animation.fillMode = kCAFillModeForwards
animation.isRemovedOnCompletion = false
blackLayer.add(animation, forKey: "opacity")
}
parentlayer.addSublayer(blackLayer)
}
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
if(fromTransition){
self.globalrVideoComposition = layercomposition
}
let animatedVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video2.mp4")
self.removeFileAtURLIfExists(url: animatedVideoURL)
guard let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = self.globalrVideoComposition
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = animatedVideoURL as URL
print("****** animatedVideoURL *****",animatedVideoURL)
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(String(describing: assetExport.error))")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(String(describing: assetExport.error))")
default:
print("Exported")
if(self.fromPlayVideo){
DispatchQueue.main.async {
self.globalVideoURL = animatedVideoURL; self.playVideoInPlayer(animatedVideoURL: animatedVideoURL as URL)
}
}else if(self.fromSave){
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: animatedVideoURL as URL)
print("222222 animatedVideoURL",animatedVideoURL)
}) { saved, error in
DispatchQueue.main.async {
MBProgressHUD.hideAllHUDs(for: self.view, animated: true)
}
if saved {
let alertController = UIAlertController(title: "Your video was successfully saved", message: nil, preferredStyle: .alert)
let defaultAction = UIAlertAction(title: "OK", style: .default, handler: nil)
alertController.addAction(defaultAction)
print("The task is done,enjoy now!")
self.present(alertController, animated: true, completion: nil)
}else{
}
}
}
}
})
}
4)Played Videos
func playVideoInPlayer(animatedVideoURL:URL){
if(globalFilterName != nil){
self.asset = AVAsset.init(url:animatedVideoURL as URL)
let newPlayerItem = AVPlayerItem.init(asset:self.asset);
newPlayerItem.videoComposition=globalrVideoComposition
self.player = AVPlayer.init(playerItem:newPlayerItem)
}else{
let newPlayerItem = AVPlayerItem.init(url:animatedVideoURL)
self.player = AVPlayer.init(playerItem:newPlayerItem)
}
NotificationCenter.default.addObserver(self, selector: #selector(self.finishedPlaying(_:)), name: NSNotification.Name.AVPlayerItemDidPlayToEndTime, object:nil)
self.playerLayer = AVPlayerLayer.init(player:self.player)
let width: CGFloat = self.videoContainerView.frame.size.width
let height: CGFloat = self.videoContainerView.frame.size.height
self.playerLayer.frame = CGRect(x: 0.0, y:0, width: width, height: height)
self.playerLayer.backgroundColor = UIColor.black.cgColor
self.playerLayer.videoGravity = .resizeAspectFill
self.videoContainerView.layer.addSublayer( self.playerLayer)
self.playPauseBtn.isHidden = false
self.playPauseBtn.setImage(UIImage.init(named:"pause"), for:.normal)
DispatchQueue.main.async {
MBProgressHUD.hideAllHUDs(for:self.view, animated:true)
self.player.play()
}
}
This whole task is woking fine the only problem is ,it is taking a lot of time to play the video with all setups(conversion image to video and adding animations).
Please help me to reduce the time so that the user has to not wait for long time when they land to play video after collecting images from imagepicker.
Any help or guidance would be highly appreciated.Thanks in advance!
I am trying to understand the animation added here in videos. What animation is happening here? I have understood that how is it adding animation to the video as a whole but I want to know how can we add animation to each image of video differently.
I have used the following code to pick an image and merge videos.
class ImageVideoMakerController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
playPauseBtn.isHidden = true
fromPlayVideo = true
fromSave = false
setUpInitialView()
}
func setUpInitialView(){
setUpArrays()
buildVideoFromImageArray()
transitionScrollViewCreation()
filterScrollContents()
}
#objc func filterActionTapped(sender:UIButton){
fromFilter = true
fromTransition = false
if(sender.tag==0){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CISepiaTone"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==1){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectChrome"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==2){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectTransfer"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==3){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectTonal"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==4){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectProcess"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==5){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectNoir"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==6){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectInstant"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}
else if(sender.tag==7){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectFade"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}
}
func applyFilter(globalFilterToBeApplied:String){
let filter = CIFilter(name: globalFilterToBeApplied)!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
let source = request.sourceImage.clampedToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
let output = filter.outputImage!.cropped(to: request.sourceImage.extent)
request.finish(with: output, context: nil)
})
globalrVideoComposition = composition
self.playVideoInPlayer(animatedVideoURL:self.globalVideoURL as URL) }
func playVideoInPlayer(animatedVideoURL:URL){
if(globalFilterName != nil){
self.asset = AVAsset.init(url:animatedVideoURL as URL)
let newPlayerItem = AVPlayerItem.init(asset:self.asset);
newPlayerItem.videoComposition=globalrVideoComposition
self.player = AVPlayer.init(playerItem:newPlayerItem)
}else{
let newPlayerItem = AVPlayerItem.init(url:animatedVideoURL)
self.player = AVPlayer.init(playerItem:newPlayerItem)
}
NotificationCenter.default.addObserver(self, selector: #selector(self.finishedPlaying(_:)), name: NSNotification.Name.AVPlayerItemDidPlayToEndTime, object:nil)
self.playerLayer = AVPlayerLayer.init(player:self.player)
let width: CGFloat = self.videoContainerView.frame.size.width
let height: CGFloat = self.videoContainerView.frame.size.height
self.playerLayer.frame = CGRect(x: 0.0, y:0, width: width, height: height)
self.playerLayer.backgroundColor = UIColor.black.cgColor
self.playerLayer.videoGravity = .resizeAspectFill
self.videoContainerView.layer.addSublayer( self.playerLayer)
self.playPauseBtn.isHidden = false
self.playPauseBtn.setImage(UIImage.init(named:"pause"), for:.normal)
DispatchQueue.main.async {
MBProgressHUD.hideAllHUDs(for:self.view, animated:true)
self.player.play()
}
}
func exportVideoWithAnimation() {
let composition = AVMutableComposition()
let track = self.asset?.tracks(withMediaType: AVMediaType.video)
let videoTrack:AVAssetTrack = track![0] as AVAssetTrack
let timerange = CMTimeRangeMake(kCMTimeZero, (self.asset?.duration)!)
let compositionVideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID())!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
//if your video has sound, you don’t need to check this
if self.audioIsEnabled {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!
for audioTrack in (self.asset?.tracks(withMediaType: AVMediaType.audio))! {
do {
try compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
} catch {
print(error)
}
}
}
let size = videoTrack.naturalSize
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentlayer.addSublayer(videolayer)
var time = [0.00001, 3, 6, 9, 12] //I used this time array to determine the start time of a frame animation. Each frame will stay for 3 secs, thats why their difference is 3
var imgarray = self.selectedImageArray
for image in 0..<self.selectedImageArray.count {
let nextPhoto = imgarray[image]
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio)
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
let blackLayer = CALayer()
///#7. opacity(1->0)(top->bottom)///
//#3. top->bottom///
//MARK:- Animations==================================
///#1. left->right///
if(self.globalSelectedTransitionTag == 0){
blackLayer.frame = CGRect(x: -videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
blackLayer.backgroundColor = UIColor.black.cgColor
let imageLayer = CALayer()
imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
imageLayer.contents = imgarray[image].cgImage
blackLayer.addSublayer(imageLayer)
let animation = CABasicAnimation()
animation.keyPath = "position.x"
animation.fromValue = -videoTrack.naturalSize.width
animation.toValue = 5 * (videoTrack.naturalSize.width)
animation.duration = 5
animation.beginTime = CFTimeInterval(time[image])
animation.fillMode = kCAFillModeForwards
animation.isRemovedOnCompletion = false
blackLayer.add(animation, forKey: "opacity")
}
parentlayer.addSublayer(blackLayer)
}
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
if(fromTransition){
self.globalrVideoComposition = layercomposition
}
let animatedVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video2.mp4")
self.removeFileAtURLIfExists(url: animatedVideoURL)
guard let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = self.globalrVideoComposition
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = animatedVideoURL as URL
print("****** animatedVideoURL *****",animatedVideoURL)
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(String(describing: assetExport.error))")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(String(describing: assetExport.error))")
default:
print("Exported")
if(self.fromPlayVideo){
DispatchQueue.main.async {
self.globalVideoURL = animatedVideoURL; self.playVideoInPlayer(animatedVideoURL: animatedVideoURL as URL)
}
}else if(self.fromSave){
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: animatedVideoURL as URL)
}) { saved, error in
}
if saved {
}else{
}
}
}
}
})
}
//MARK:- Make ScrollViews
#objc func transitionTapped(sender:UIButton){
self.fromSave = false
self.fromPlayVideo = true
self.playPauseBtn.isHidden = true
self.playerLayer.removeFromSuperlayer()
globalSelectedTransitionTag = sender.tag
exportVideoWithAnimation()
}
}
If I'm not mistaken the animation there is nothing more than an opacity animation.
let animation = CABasicAnimation(keyPath: "opacity")
it "fades in" over a few seconds.
It looks like there's also a "scale animation" which just scales it up in size.
The code you give is badly written and messy, so I would forget about it and not look at it, as a beginner.
As a beginner, I would not jump in to "video .. and animations too!" at first.
Just try making some "simple" animations in your app. A good thing to start with is something that "slides on and off the screen" or perhaps just fades in and out. (So, try doing those things to a button or the like.)