I am trying to make a video editor in which I have done the following:
1)Collected photos from user gallery.
2)Converted that array of photos in videos .
3)Added animations to videos.
4)Played Videos
The code i have done for that are as follows:-
1)Collected photos from user gallery.
func openImagePicker(){
let customColor = UIColor.init(red: 64.0/255.0, green: 0.0, blue: 144.0/255.0, alpha: 1.0)
let customCameraColor = UIColor.init(red: 86.0/255.0, green: 1.0/255.0, blue: 236.0/255.0, alpha: 1.0)
pickerViewController.numberOfPhotoToSelect = 5; pickerViewController.theme.titleLabelTextColor = UIColor.white
pickerViewController.theme.navigationBarBackgroundColor = customColor
pickerViewController.theme.tintColor = UIColor.white
pickerViewController.theme.orderTintColor = customCameraColor
pickerViewController.theme.cameraVeilColor = customCameraColor
pickerViewController.theme.cameraIconColor = UIColor.white
pickerViewController.theme.statusBarStyle = .lightContent
self.yms_presentCustomAlbumPhotoView(pickerViewController, delegate: self)
}
func photoPickerViewController(_ picker: YMSPhotoPickerViewController!, didFinishPickingImages photoAssets: [PHAsset]!) {
picker.dismiss(animated: true) {
self.selectedImageArray = NSMutableArray()
let imageManager = PHImageManager.init()
let options = PHImageRequestOptions.init()
options.deliveryMode = .highQualityFormat
options.resizeMode = .exact
options.isSynchronous = true
for asset: PHAsset in photoAssets
{
let targetSize = CGSize(width:self.view.frame.size.width
, height:self.view.frame.size.width)
imageManager.requestImage(for: asset, targetSize:targetSize, contentMode: .aspectFill, options: options, resultHandler: { (image, info) in
self.selectedImageArray.add(image!)
})
}
let imageVideaMakerController = self.storyboard?.instantiateViewController(withIdentifier: "VideoEditorController") as! VideoEditorController
imageVideaMakerController.selectedImageArray = self.selectedImageArray as! [UIImage]
self.navigationController!.pushViewController(imageVideaMakerController, animated: true)
}
}
2)Converted that array of photos in videos .
override func viewDidAppear(_ animated: Bool) {
self.navigationController?.navigationBar.isHidden = false
setUpInitialView()
collectionView.reloadData()
}
//MARK:- Custom Methods
func setUpInitialView(){
let loadingNotification = MBProgressHUD.showAdded(to: view, animated: true)
loadingNotification.mode = MBProgressHUDMode.indeterminate
loadingNotification.label.text = "Loading"
buildVideoFromImageArray()
//filterScrollContents()
}
func buildVideoFromImageArray() {
imageArrayToVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video1.MP4")
removeFileAtURLIfExists(url: imageArrayToVideoURL)
guard let videoWriter = try? AVAssetWriter(outputURL: imageArrayToVideoURL as URL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
let zeroTime = CMTimeMake(Int64(imagesPerSecond),Int32(1))
videoWriter.startSession(atSourceTime: zeroTime)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
let fps: Int32 = 1
let framePerSecond: Int64 = Int64(self.imagesPerSecond)
let frameDuration = CMTimeMake(Int64(self.imagesPerSecond), fps)
var frameCount: Int64 = 0
var appendSucceeded = true
var newImageArr = self.selectedImageArray
while (!newImageArr.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = newImageArr.remove(at: 0)
let lastFrameTime = CMTimeMake(frameCount * framePerSecond, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(self.outputSize.width), height: Int(self.outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: CGFloat(self.outputSize.width), height: CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
//let aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("-----video1 url = \(self.imageArrayToVideoURL)")
self.globalVideoURL = self.imageArrayToVideoURL
self.asset = AVAsset.init(url:self.imageArrayToVideoURL as URL)
self.exportVideoWithAnimation()
}
})
}
}
3)Added animations to videos.
func exportVideoWithAnimation() {
let composition = AVMutableComposition()
let track = self.asset?.tracks(withMediaType: AVMediaType.video)
let videoTrack:AVAssetTrack = track![0] as AVAssetTrack
let timerange = CMTimeRangeMake(kCMTimeZero, (self.asset?.duration)!)
let compositionVideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID())!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
//if your video has sound, you don’t need to check this
if self.audioIsEnabled {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!
for audioTrack in (self.asset?.tracks(withMediaType: AVMediaType.audio))! {
do {
try compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
} catch {
print(error)
}
}
}
let size = videoTrack.naturalSize
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentlayer.addSublayer(videolayer)
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//this is the animation part
var time = [0.00001, 3, 6, 9, 12] //I used this time array to determine the start time of a frame animation. Each frame will stay for 3 secs, thats why their difference is 3
var imgarray = self.selectedImageArray
for image in 0..<self.selectedImageArray.count {
let nextPhoto = imgarray[image]
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio)
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
let blackLayer = CALayer()
///#7. opacity(1->0)(top->bottom)///
//#3. top->bottom///
//MARK:- Animations==================================
///#1. left->right///
if(self.globalSelectedTransitionTag == 0){
blackLayer.frame = CGRect(x: -videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
blackLayer.backgroundColor = UIColor.black.cgColor
let imageLayer = CALayer()
imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
imageLayer.contents = imgarray[image].cgImage
blackLayer.addSublayer(imageLayer)
let animation = CABasicAnimation()
animation.keyPath = "position.x"
animation.fromValue = -videoTrack.naturalSize.width
animation.toValue = 5 * (videoTrack.naturalSize.width)
animation.duration = 5
animation.beginTime = CFTimeInterval(time[image])
animation.fillMode = kCAFillModeForwards
animation.isRemovedOnCompletion = false
blackLayer.add(animation, forKey: "opacity")
}
parentlayer.addSublayer(blackLayer)
}
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
if(fromTransition){
self.globalrVideoComposition = layercomposition
}
let animatedVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video2.mp4")
self.removeFileAtURLIfExists(url: animatedVideoURL)
guard let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = self.globalrVideoComposition
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = animatedVideoURL as URL
print("****** animatedVideoURL *****",animatedVideoURL)
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(String(describing: assetExport.error))")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(String(describing: assetExport.error))")
default:
print("Exported")
if(self.fromPlayVideo){
DispatchQueue.main.async {
self.globalVideoURL = animatedVideoURL; self.playVideoInPlayer(animatedVideoURL: animatedVideoURL as URL)
}
}else if(self.fromSave){
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: animatedVideoURL as URL)
print("222222 animatedVideoURL",animatedVideoURL)
}) { saved, error in
DispatchQueue.main.async {
MBProgressHUD.hideAllHUDs(for: self.view, animated: true)
}
if saved {
let alertController = UIAlertController(title: "Your video was successfully saved", message: nil, preferredStyle: .alert)
let defaultAction = UIAlertAction(title: "OK", style: .default, handler: nil)
alertController.addAction(defaultAction)
print("The task is done,enjoy now!")
self.present(alertController, animated: true, completion: nil)
}else{
}
}
}
}
})
}
4)Played Videos
func playVideoInPlayer(animatedVideoURL:URL){
if(globalFilterName != nil){
self.asset = AVAsset.init(url:animatedVideoURL as URL)
let newPlayerItem = AVPlayerItem.init(asset:self.asset);
newPlayerItem.videoComposition=globalrVideoComposition
self.player = AVPlayer.init(playerItem:newPlayerItem)
}else{
let newPlayerItem = AVPlayerItem.init(url:animatedVideoURL)
self.player = AVPlayer.init(playerItem:newPlayerItem)
}
NotificationCenter.default.addObserver(self, selector: #selector(self.finishedPlaying(_:)), name: NSNotification.Name.AVPlayerItemDidPlayToEndTime, object:nil)
self.playerLayer = AVPlayerLayer.init(player:self.player)
let width: CGFloat = self.videoContainerView.frame.size.width
let height: CGFloat = self.videoContainerView.frame.size.height
self.playerLayer.frame = CGRect(x: 0.0, y:0, width: width, height: height)
self.playerLayer.backgroundColor = UIColor.black.cgColor
self.playerLayer.videoGravity = .resizeAspectFill
self.videoContainerView.layer.addSublayer( self.playerLayer)
self.playPauseBtn.isHidden = false
self.playPauseBtn.setImage(UIImage.init(named:"pause"), for:.normal)
DispatchQueue.main.async {
MBProgressHUD.hideAllHUDs(for:self.view, animated:true)
self.player.play()
}
}
This whole task is woking fine the only problem is ,it is taking a lot of time to play the video with all setups(conversion image to video and adding animations).
Please help me to reduce the time so that the user has to not wait for long time when they land to play video after collecting images from imagepicker.
Any help or guidance would be highly appreciated.Thanks in advance!
Related
I am creating a video collage app in which I am merging multiple videos in different frames and make a single video. But all frames are showing first video. Another videos are not showing after merge. Please give me suggestion as soon as possible. My code is below.
func newoverlay() {
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
var arrayOfComposition = Array<AVMutableCompositionTrack>()
var trackInstruction = Array<AVVideoCompositionLayerInstruction>()
var videolayer = Array<CALayer>()
var i:Int = 0
let mainInstruction = AVMutableVideoCompositionInstruction()
var assetDuration:CMTime = CMTime.zero
var box = Array<CALayer>()
var arrOfIns = Array<AVMutableVideoCompositionInstruction>()
var atTimeM : CMTime = CMTimeMake(value: 0, timescale: 0)
var lastAsset: AVURLAsset!
// 2 - Create two video tracks
for videoAssetss in firstAsset {
guard var firstTrack = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: (videoAssetss as? AVURLAsset)!.duration),
of: (videoAssetss as? AVURLAsset)!.tracks(withMediaType: .video)[0],
at: CMTime.zero)
var firstInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack)
guard let cor = photoFrameCordinate[i] as? CGRect else{return}
if videoAssetss as! AVURLAsset != firstAsset.last as! AVURLAsset{
firstInstruction.setOpacity(0, at: assetDuration) // asseteDuration
}
let transform = CGAffineTransform(scaleX: 0.4, y:1).concatenating(CGAffineTransform(translationX: trackInstruction[i-1]., y: -cor.origin.y))
firstInstruction.setTransform(transform, at: CMTime.zero)
assetDuration = CMTimeAdd(assetDuration, (videoAssetss as! AVURLAsset).duration)
lastAsset = videoAssetss as? AVURLAsset
trackInstruction.append(firstInstruction)
i += 1
// arrayOfComposition.append(firstTrack)
} catch {
print("Failed to load first track")
return
}
}
// Watermark Effect
let width: CGFloat = widthConstraintViewForImage.constant
let height = heightConstraintViewForImage.constant
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
//Mark: Frame layer
let bglayer = CALayer()
bglayer.contents = imgViewForAdminImage.image?.cgImage
bglayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
bglayer.backgroundColor = UIColor.clear.cgColor
for index in 0..<videoURLS.count{
var videoBox = CALayer()
guard let cor = photoFrameCordinate[index] as? CGRect else{return}
videoBox.frame = CGRect(x: cor.origin.x, y: parentlayer.frame.maxY-(cor.origin.y+cor.size.height), width: cor.size.width, height: cor.size.height)
videoBox.backgroundColor = UIColor.green.cgColor
videoBox.masksToBounds = true
var vlayer = CALayer()
vlayer.contentsScale = 1.0
vlayer.contentsGravity = CALayerContentsGravity.center
vlayer.frame = CGRect(x: 0, y: 0, width:cor.size.width, height: cor.size.height)
vlayer.backgroundColor = UIColor.yellow.cgColor
videolayer.append(vlayer)
videoBox.addSublayer(vlayer)
box.append(videoBox)
bglayer.addSublayer(videoBox)
}
parentlayer.addSublayer(bglayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayers: videolayer, in: parentlayer)
// 2.1
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetDuration)
mainInstruction.layerInstructions = trackInstruction
mainInstruction.backgroundColor = UIColor.red.cgColor
layercomposition.instructions = [mainInstruction]
// layercomposition.renderSize = CGSizeMake(videoSize.width * scale, videoSize.height * scale)
layercomposition.renderScale = 1.0
layercomposition.renderSize = CGSize(width: width, height: height)
// create new file to receive data
let dirPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let docsDir = dirPaths[0] as NSString
let movieFilePath = docsDir.appendingPathComponent("result.mp4")
let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName:AVAssetExportPresetMediumQuality)
assetExport?.outputFileType = AVFileType.mp4
assetExport?.videoComposition = layercomposition
// Check exist and remove old file
FileManager.default.removeItemIfExisted(movieDestinationUrl as URL)
assetExport?.outputURL = movieDestinationUrl as URL
assetExport?.exportAsynchronously(completionHandler: {
switch assetExport!.status {
case AVAssetExportSession.Status.failed:
print("failed")
print(assetExport?.error ?? "unknown error")
case AVAssetExportSession.Status.cancelled:
print("cancelled")
print(assetExport?.error ?? "unknown error")
default:
print("Movie complete")
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: movieDestinationUrl as URL)
}) { saved, error in
if saved {
print("Saved")
}else{
print(error!)
}
}
self.playVideo()`enter code here`
}
})
}
When I set the opacity of the first video to 0 then the second video is showing in all frames. I think all videos are coming but behind the first video that's why only first video is showing in the all frames.
I used
let videolayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: asset)
videolayerInstruction.setCropRectangle(CGRec(), at: Time())
I build a App, where user can make a Video of a few images.
I took the code from this post: Making video from UIImage array with different transition animations
Everything is working fine, but I have one big problem and I don't know how to solve it.
When the video was created, the image is not filling the hole screen and sometimes it turns to another side or the image is cut in half.
First image is the original image and the second image is the created video with the problems I have.
I really need some help.
var outputSize = CGSize(width: 1920 , height: 1280)
var imagesPerSecond: TimeInterval = 0
var fps: Int32 = 0
var selectedPhotosArray = [UIImage]()
var imageArrayToVideoURL = NSURL()
var asset: AVAsset!
var imageCount = 1
My Code
func buildVideoFromImageArray() {
for image in arrayOfImages {
selectedPhotosArray.append(image)
}
imageArrayToVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video1.MP4")
removeFileAtURLIfExists(url: imageArrayToVideoURL)
guard let videoWriter = try? AVAssetWriter(outputURL: imageArrayToVideoURL as URL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecType.h264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't applay the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
let zeroTime = CMTimeMake(value: Int64(imagesPerSecond),timescale: self.fps)
videoWriter.startSession(atSourceTime: zeroTime)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
//let fps: Int32 = 1
let framePerSecond: Int64 = Int64(self.imagesPerSecond)
let frameDuration = CMTimeMake(value: Int64(self.imagesPerSecond), timescale: self.fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!self.selectedPhotosArray.isEmpty) { // wird so lange ausgeführt, bis noch etwas im Array steht
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = self.selectedPhotosArray.remove(at: 0) // foto wird aus dem selectedPhotosArray gelöscht
let lastFrameTime = CMTimeMake(value: frameCount * framePerSecond, timescale: self.fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(self.outputSize.width), height: Int(self.outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: CGFloat(self.outputSize.width), height: CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
//let aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("-----video1 url = \(self.imageArrayToVideoURL)")
//self.asset = AVAsset(url: self.imageArrayToVideoURL as URL)
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: self.imageArrayToVideoURL as URL)
}) { saved, error in
if saved {
let fetchOptions = PHFetchOptions()
fetchOptions.sortDescriptors = [NSSortDescriptor(key: "creationDate", ascending: false)]
let fetchResult = PHAsset.fetchAssets(with: .video, options: fetchOptions).firstObject
// fetchResult is your latest video PHAsset
// To fetch latest image replace .video with .image
}
}
}
})
}
}
func removeFileAtURLIfExists(url: NSURL) {
if let filePath = url.path {
let fileManager = FileManager.default
if fileManager.fileExists(atPath: filePath) {
do{
try fileManager.removeItem(atPath: filePath)
} catch let error as NSError {
print("Couldn't remove existing destination file: \(error)")
}
}
}
}`
I am developing a Video based Application in Swift. Where I am exporting a Video clip with Watermark logo and Fade In Out effect. Here is my code:
func watermark(video videoAsset:AVAsset, videoModal:VideoModel, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : PDWatermarkPosition, withMode mode: SpeedoVideoMode, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
let servicemodel = ServiceModel()
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).sync {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
if videoAsset.tracks(withMediaType: AVMediaTypeVideo).count == 0
{
completion!(nil, nil, nil)
return
}
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
self.addAudioTrack(composition: mixComposition, videoAsset: videoAsset as! AVURLAsset, withMode: mode, videoModal:videoModal)
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize //CGSize(width: 375, height: 300)
//to add Watermark
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
//videoLayer.backgroundColor = UIColor.red.cgColor
parentLayer.addSublayer(videoLayer)
if name != nil {
let watermarkImage = UIImage(named: name)
let imageLayer = CALayer()
//imageLayer.backgroundColor = UIColor.purple.cgColor
imageLayer.contents = watermarkImage?.cgImage
var xPosition : CGFloat = 0.0
var yPosition : CGFloat = 0.0
let imageSize : CGFloat = 150
switch (position) {
case .TopLeft:
xPosition = 0
yPosition = 0
break
case .TopRight:
xPosition = videoSize.width - imageSize - 100
yPosition = 80
break
case .BottomLeft:
xPosition = 0
yPosition = videoSize.height - imageSize
break
case .BottomRight, .Default:
xPosition = videoSize.width - imageSize
yPosition = videoSize.height - imageSize
break
}
imageLayer.frame = CGRect(x: xPosition, y: yPosition, width: imageSize, height: imageSize)
imageLayer.opacity = 0.75
parentLayer.addSublayer(imageLayer)
if text != nil {
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 20
titleLayer.alignmentMode = kCAAlignmentRight
titleLayer.frame = CGRect(x: 0, y: yPosition - imageSize, width: videoSize.width - imageSize/2 - 4, height: 57)
titleLayer.foregroundColor = UIColor.lightGray.cgColor
parentLayer.addSublayer(titleLayer)
}
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
//Add Fade In Out effects
let startTime = CMTime(seconds: Double(0), preferredTimescale: 1000)
let endTime = CMTime(seconds: Double(1), preferredTimescale: 1000)
let timeRange = CMTimeRange(start: startTime, end: endTime)
layerInstruction.setOpacityRamp(fromStartOpacity: 0.1, toEndOpacity: 1.0, timeRange: timeRange)
let startTime1 = CMTime(seconds: videoAsset.duration.seconds-1, preferredTimescale: 1000)
let endTime1 = CMTime(seconds: videoAsset.duration.seconds, preferredTimescale: 1000)
let timeRange1 = CMTimeRange(start: startTime1, end: endTime1)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.1, timeRange: timeRange1)
arrayLayerInstructions.append(layerInstruction)
instruction.layerInstructions = arrayLayerInstructions
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("\(videoModal.fileID).mov")
let filePath = url.path
let fileManager = FileManager.default
do {
if fileManager.fileExists(atPath: filePath) {
print("FILE AVAILABLE")
try fileManager.removeItem(atPath:filePath)
} else {
print("FILE NOT AVAILABLE")
}
} catch _ {
}
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
let timeRangetoTrim = CMTimeRange(start: CMTime(seconds: Double(videoModal.leftRangeValue), preferredTimescale: 1000),
end: CMTime(seconds: Double(videoModal.rightRangeValue), preferredTimescale: 1000))
exporter?.timeRange = timeRangetoTrim
exporter?.shouldOptimizeForNetworkUse = false
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter?.outputURL
if flag {
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)// Getting error here
}
}
}
}
}
func addAudioTrack(composition: AVMutableComposition, videoAsset: AVURLAsset, withMode mode: SpeedoVideoMode, videoModal:VideoFileModel) {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
let audioTracks = videoAsset.tracks(withMediaType: AVMediaTypeAudio)
for audioTrack in audioTracks {
try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
}
}
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform: transform)
var scaleToFitRatio = UIScreen.main.bounds.width / 375
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor),
at: kCMTimeZero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor).concatenating(CGAffineTransform(translationX: 0, y: 0))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = 375 + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: CGFloat(yFix))
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: kCMTimeZero)
}
return instruction
}
private func orientationFromTransform(transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
My code is working fine for some of the videos and sometimes it's not working for some videos too. I am getting below error due to AVAssetExportSessionStatus failed :
Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could
not be completed" UserInfo={NSLocalizedFailureReason=An unknown error
occurred (-12780), NSLocalizedDescription=The operation could not be
completed, NSUnderlyingError=0x28262c240 {Error
Domain=NSOSStatusErrorDomain Code=-12780 "(null)"}}
Can anyone help me on this? Thank you in advance.
This method func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction { is wrong because you need to provide and AVAssetTrack which has the actual videos.
But instead of that, you are passing AVCompositionTrack which is still need to be composed, so replace your method with this func videoCompositionInstructionForTrack(track: AVAssetTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {.
Now when you are calling the actual method, you need to pass clipVideoTrack, i.e., let layerInstruction = self.videoCompositionInstructionForTrack(track: clipVideoTrack, asset: videoAsset).
let me know, if you are still facing the error!
I am trying to understand the animation added here in videos. What animation is happening here? I have understood that how is it adding animation to the video as a whole but I want to know how can we add animation to each image of video differently.
I have used the following code to pick an image and merge videos.
class ImageVideoMakerController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
playPauseBtn.isHidden = true
fromPlayVideo = true
fromSave = false
setUpInitialView()
}
func setUpInitialView(){
setUpArrays()
buildVideoFromImageArray()
transitionScrollViewCreation()
filterScrollContents()
}
#objc func filterActionTapped(sender:UIButton){
fromFilter = true
fromTransition = false
if(sender.tag==0){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CISepiaTone"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==1){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectChrome"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==2){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectTransfer"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==3){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectTonal"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==4){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectProcess"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==5){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectNoir"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}else if(sender.tag==6){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectInstant"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}
else if(sender.tag==7){
player.pause()
player.seek(to: kCMTimeZero)
globalFilterName = "CIPhotoEffectFade"
applyFilter(globalFilterToBeApplied: globalFilterName!)
}
}
func applyFilter(globalFilterToBeApplied:String){
let filter = CIFilter(name: globalFilterToBeApplied)!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
let source = request.sourceImage.clampedToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
let output = filter.outputImage!.cropped(to: request.sourceImage.extent)
request.finish(with: output, context: nil)
})
globalrVideoComposition = composition
self.playVideoInPlayer(animatedVideoURL:self.globalVideoURL as URL) }
func playVideoInPlayer(animatedVideoURL:URL){
if(globalFilterName != nil){
self.asset = AVAsset.init(url:animatedVideoURL as URL)
let newPlayerItem = AVPlayerItem.init(asset:self.asset);
newPlayerItem.videoComposition=globalrVideoComposition
self.player = AVPlayer.init(playerItem:newPlayerItem)
}else{
let newPlayerItem = AVPlayerItem.init(url:animatedVideoURL)
self.player = AVPlayer.init(playerItem:newPlayerItem)
}
NotificationCenter.default.addObserver(self, selector: #selector(self.finishedPlaying(_:)), name: NSNotification.Name.AVPlayerItemDidPlayToEndTime, object:nil)
self.playerLayer = AVPlayerLayer.init(player:self.player)
let width: CGFloat = self.videoContainerView.frame.size.width
let height: CGFloat = self.videoContainerView.frame.size.height
self.playerLayer.frame = CGRect(x: 0.0, y:0, width: width, height: height)
self.playerLayer.backgroundColor = UIColor.black.cgColor
self.playerLayer.videoGravity = .resizeAspectFill
self.videoContainerView.layer.addSublayer( self.playerLayer)
self.playPauseBtn.isHidden = false
self.playPauseBtn.setImage(UIImage.init(named:"pause"), for:.normal)
DispatchQueue.main.async {
MBProgressHUD.hideAllHUDs(for:self.view, animated:true)
self.player.play()
}
}
func exportVideoWithAnimation() {
let composition = AVMutableComposition()
let track = self.asset?.tracks(withMediaType: AVMediaType.video)
let videoTrack:AVAssetTrack = track![0] as AVAssetTrack
let timerange = CMTimeRangeMake(kCMTimeZero, (self.asset?.duration)!)
let compositionVideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID())!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
//if your video has sound, you don’t need to check this
if self.audioIsEnabled {
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!
for audioTrack in (self.asset?.tracks(withMediaType: AVMediaType.audio))! {
do {
try compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
} catch {
print(error)
}
}
}
let size = videoTrack.naturalSize
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentlayer.addSublayer(videolayer)
var time = [0.00001, 3, 6, 9, 12] //I used this time array to determine the start time of a frame animation. Each frame will stay for 3 secs, thats why their difference is 3
var imgarray = self.selectedImageArray
for image in 0..<self.selectedImageArray.count {
let nextPhoto = imgarray[image]
let horizontalRatio = CGFloat(self.outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio)
let newSize: CGSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
let blackLayer = CALayer()
///#7. opacity(1->0)(top->bottom)///
//#3. top->bottom///
//MARK:- Animations==================================
///#1. left->right///
if(self.globalSelectedTransitionTag == 0){
blackLayer.frame = CGRect(x: -videoTrack.naturalSize.width, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
blackLayer.backgroundColor = UIColor.black.cgColor
let imageLayer = CALayer()
imageLayer.frame = CGRect(x: x, y: y, width: newSize.width, height: newSize.height)
imageLayer.contents = imgarray[image].cgImage
blackLayer.addSublayer(imageLayer)
let animation = CABasicAnimation()
animation.keyPath = "position.x"
animation.fromValue = -videoTrack.naturalSize.width
animation.toValue = 5 * (videoTrack.naturalSize.width)
animation.duration = 5
animation.beginTime = CFTimeInterval(time[image])
animation.fillMode = kCAFillModeForwards
animation.isRemovedOnCompletion = false
blackLayer.add(animation, forKey: "opacity")
}
parentlayer.addSublayer(blackLayer)
}
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
if(fromTransition){
self.globalrVideoComposition = layercomposition
}
let animatedVideoURL = NSURL(fileURLWithPath: NSHomeDirectory() + "/Documents/video2.mp4")
self.removeFileAtURLIfExists(url: animatedVideoURL)
guard let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = self.globalrVideoComposition
assetExport.outputFileType = AVFileType.mp4
assetExport.outputURL = animatedVideoURL as URL
print("****** animatedVideoURL *****",animatedVideoURL)
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(String(describing: assetExport.error))")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(String(describing: assetExport.error))")
default:
print("Exported")
if(self.fromPlayVideo){
DispatchQueue.main.async {
self.globalVideoURL = animatedVideoURL; self.playVideoInPlayer(animatedVideoURL: animatedVideoURL as URL)
}
}else if(self.fromSave){
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: animatedVideoURL as URL)
}) { saved, error in
}
if saved {
}else{
}
}
}
}
})
}
//MARK:- Make ScrollViews
#objc func transitionTapped(sender:UIButton){
self.fromSave = false
self.fromPlayVideo = true
self.playPauseBtn.isHidden = true
self.playerLayer.removeFromSuperlayer()
globalSelectedTransitionTag = sender.tag
exportVideoWithAnimation()
}
}
If I'm not mistaken the animation there is nothing more than an opacity animation.
let animation = CABasicAnimation(keyPath: "opacity")
it "fades in" over a few seconds.
It looks like there's also a "scale animation" which just scales it up in size.
The code you give is badly written and messy, so I would forget about it and not look at it, as a beginner.
As a beginner, I would not jump in to "video .. and animations too!" at first.
Just try making some "simple" animations in your app. A good thing to start with is something that "slides on and off the screen" or perhaps just fades in and out. (So, try doing those things to a button or the like.)
I'm a newbie to Swift. I'm trying to add a watermark with reference to code from SO. My original video resolution is 1280 X 720, but the output video is a shrunk version.
Here are the before and after pictures
Here is my function to create a watermark.
private func watermark(video videoAsset:AVAsset, watermarkText text : String!, image : CGImage!, saveToLibrary flag : Bool, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize
print("Video size", videoSize.height) //720
print("Video size", videoSize.width) //1280
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
videoLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
parentLayer.addSublayer(videoLayer)
if text != nil {
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.red.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 15
titleLayer.alignmentMode = kCAAlignmentCenter
titleLayer.bounds = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(titleLayer)
} else if image != nil {
let imageLayer = CALayer()
imageLayer.contents = image
let width: CGFloat = (self.imageView.image?.size.width)!
let height: CGFloat = (self.imageView.image?.size.height)!
print("Video size", height) //720
print("Video size", width) //1280
imageLayer.frame = CGRect(x: 0.0, y: 0.0, width: width, height: height)
imageLayer.opacity = 0.65
parentLayer.addSublayer(imageLayer)
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, Int32(clipVideoTrack.nominalFrameRate))
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mov")
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter?.outputURL
if flag {
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)
}
}
}
}
}
While the size of the watermark image is correct, the video is shrunk.
can you try this function
private func watermark(video videoAsset: AVAsset, watermarkText text : String!, image : CGImage!, saveToLibrary flag : Bool, completion : ((_ status : AVAssetExportSessionStatus ?, _ session: AVAssetExportSession ?, _ outputURL : URL ?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
videoLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
parentLayer.addSublayer(videoLayer)
// if text != nil {
// let titleLayer = CATextLayer()
// titleLayer.backgroundColor = UIColor.red.cgColor
// titleLayer.string = text
// titleLayer.font = "Helvetica" as CFTypeRef
// titleLayer.fontSize = 15
// titleLayer.alignmentMode = kCAAlignmentCenter
// titleLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
// parentLayer.addSublayer(titleLayer)
// } else
if image != nil {
let imageLayer = CALayer()
imageLayer.contents = image
let width: CGFloat = (self.imageView.image ?.size.width)!
let height: CGFloat = (self.imageView.image ?.size.height)!
//
print("Video size", height)
print("Video size", width)
imageLayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
// imageLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
imageLayer.opacity = 1
parentLayer.addSublayer(imageLayer)
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, Int32(clipVideoTrack.nominalFrameRate))
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
let videotrack = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
// let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mp4")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return}
exporter.videoComposition = videoComp
exporter.outputFileType = AVFileTypeMPEG4
exporter.outputURL = url
exporter.exportAsynchronously() {
DispatchQueue.main.async {
if exporter.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter.outputURL
if flag {
// Save to library
// let library = ALAssetsLibrary()
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) {
saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
// if library.videoAtPathIs(compatibleWithSavedPhotosAlbum: outputURL) {
// library.writeVideoAtPathToSavedPhotosAlbum(outputURL,
// completionBlock: { (assetURL:NSURL!, error:NSError!) -> Void in
//
// completion!(AVAssetExportSessionStatus.Completed, exporter, outputURL)
// })
// }
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter.status, exporter, nil)
}
}
}
}
}
The code above for creating the watermarked video seems not to be the reason for the smaller output resolution.
Problem
The resolution depends on what kind of AVAsset is put into the watermark method.
Example:
Frequently an UIImagePickerController is used. There is the delegate method
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any])
There one often can see something like this:
let url = info[UIImagePickerControllerMediaURL] as? URL
let videoAsset = AVAsset(url: url!)
self.watermark(video: videoAsset, watermarkText: nil, image: self.imageView.image?.cgImage ...
But with the lines above a downsized input image is used, e.g. instead of having a video with 1920x1080 one has a reduced video size of 1280x720.
Solution
A method for determining the AVAsset from the PHAsset could look like this:
private func videoAsset(for asset: PHAsset, completion: #escaping (AVAsset?) -> Void) {
let requestOptions = PHVideoRequestOptions()
requestOptions.version = .original
PHImageManager.default().requestAVAsset(forVideo: asset, options: requestOptions, resultHandler: {
(avAsset, avAudioMix, info) in
completion(avAsset)
})
}
And where to get the PHAsset from? It can also be determined in the didFinishPickingMediaWithInfo method by using UIImagePickerControllerPHAsset:
let asset = info[UIImagePickerControllerPHAsset] as? PHAsset
Quick Test
For a quick test one could use:
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
if let asset = info[UIImagePickerControllerPHAsset] as? PHAsset {
picker.dismiss(animated: true, completion: { [weak self] in
self?.videoAsset(for: asset, completion: { (avAsset) in
if let videoAsset = avAsset {
DispatchQueue.main.async {
self?.watermark(video: videoAsset, watermarkText: nil, image: self?.imageView.image?.cgImage, saveToLibrary: true) { (exportStat: AVAssetExportSessionStatus? , session: AVAssetExportSession?, url: URL?) in
print("url: \(String(describing: url?.debugDescription))")
}
}
}
})
})
}
}
The result is a video in the original resolution with a watermark on the lower left, see screenshot of resulting video: