After applying an AVVideoComposition to my AVPlayerItem, the filter I apply does work, but the video gets rotated in the AVPlayerLayer.
I know for a fact that the problem is not with the filtered frame because if I show the frame in a UIImageView, the frame is rendered 100% correctly.
The video shows correctly until I apply a videoComposition. Setting the videoGravity on the AVPlayerLayer does not help.
The video gets rotated 90º clockwise and gets stretched in the layer.
Essentially, the video is displayed perfectly in the AVPlayerLayer before the AVPlayerItem is fed through the AVMutableVideoComposition. Once that happens, the video is rotated -90º, and then scaled to fit the same dimensions as the video before filtering. This suggests to me that it does not realize that its transform is already correct, and so it is reapplying the transform on itself.
Why is this happening, and how can I fix it?
Here is some code:
private func filterVideo(with filter: Filter?) {
if let player = player, let playerItem = player.currentItem {
let composition = AVMutableComposition()
let videoAssetTrack = playerItem.asset.tracks(withMediaType: .video).first
let videoCompositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
try? videoCompositionTrack?.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration: playerItem.asset.duration), of: videoAssetTrack!, at: kCMTimeZero)
videoCompositionTrack?.preferredTransform = videoAssetTrack!.preferredTransform
let videoComposition = AVMutableVideoComposition(asset: composition, applyingCIFiltersWithHandler: { (request) in
let filteredImage = <...>
request.finish(with: filteredImage, context: nil)
})
playerItem.videoComposition = videoComposition
}
}
You have a problem in the renderingSize of AVVideoComposition. You should apply transform on AVMutableVideoCompositionInstruction (ie. Rotate and translate transform ).
I have done it in Objective-c and am posting my code. You can convert the syntax into Swift
Objective-c
//------------------------------------
// FIXING ORIENTATION
//------------------------------------
AVMutableVideoCompositionInstruction * MainInstruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
MainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeAdd(firstAsset.duration, secondAsset.duration));
AVMutableVideoCompositionLayerInstruction *FirstlayerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:secondTrack]; // second
AVAssetTrack *FirstAssetTrack = [[firstAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
UIImageOrientation FirstAssetOrientation_ = UIImageOrientationUp;
BOOL isFirstAssetPortrait_ = NO;
CGAffineTransform firstTransform = FirstAssetTrack.preferredTransform;
if(firstTransform.a == 0 && firstTransform.b == 1.0 && firstTransform.c == -1.0 && firstTransform.d == 0) {FirstAssetOrientation_= UIImageOrientationRight; isFirstAssetPortrait_ = YES;}
if(firstTransform.a == 0 && firstTransform.b == -1.0 && firstTransform.c == 1.0 && firstTransform.d == 0) {FirstAssetOrientation_ = UIImageOrientationLeft; isFirstAssetPortrait_ = YES;}
if(firstTransform.a == 1.0 && firstTransform.b == 0 && firstTransform.c == 0 && firstTransform.d == 1.0) {FirstAssetOrientation_ = UIImageOrientationUp;}
if(firstTransform.a == -1.0 && firstTransform.b == 0 && firstTransform.c == 0 && firstTransform.d == -1.0) {FirstAssetOrientation_ = UIImageOrientationDown;}
CGFloat FirstAssetScaleToFitRatio = 320.0/FirstAssetTrack.naturalSize.width;
if(isFirstAssetPortrait_){
FirstAssetScaleToFitRatio = 320.0/FirstAssetTrack.naturalSize.height;
CGAffineTransform FirstAssetScaleFactor = CGAffineTransformMakeScale(FirstAssetScaleToFitRatio,FirstAssetScaleToFitRatio);
[FirstlayerInstruction setTransform:CGAffineTransformConcat(FirstAssetTrack.preferredTransform, FirstAssetScaleFactor) atTime:kCMTimeZero];
}else{
CGAffineTransform FirstAssetScaleFactor = CGAffineTransformMakeScale(FirstAssetScaleToFitRatio,FirstAssetScaleToFitRatio);
[FirstlayerInstruction setTransform:CGAffineTransformConcat(CGAffineTransformConcat(FirstAssetTrack.preferredTransform, FirstAssetScaleFactor),CGAffineTransformMakeTranslation(0, 160)) atTime:kCMTimeZero];
}
[FirstlayerInstruction setOpacity:0.0 atTime:firstAsset.duration];
AVMutableVideoCompositionLayerInstruction *SecondlayerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:firstTrack];
AVAssetTrack *SecondAssetTrack = [[secondAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
UIImageOrientation SecondAssetOrientation_ = UIImageOrientationUp;
BOOL isSecondAssetPortrait_ = NO;
CGAffineTransform secondTransform = SecondAssetTrack.preferredTransform;
if(secondTransform.a == 0 && secondTransform.b == 1.0 && secondTransform.c == -1.0 && secondTransform.d == 0) {SecondAssetOrientation_= UIImageOrientationRight; isSecondAssetPortrait_ = YES;}
if(secondTransform.a == 0 && secondTransform.b == -1.0 && secondTransform.c == 1.0 && secondTransform.d == 0) {SecondAssetOrientation_ = UIImageOrientationLeft; isSecondAssetPortrait_ = YES;}
if(secondTransform.a == 1.0 && secondTransform.b == 0 && secondTransform.c == 0 && secondTransform.d == 1.0) {SecondAssetOrientation_ = UIImageOrientationUp;}
if(secondTransform.a == -1.0 && secondTransform.b == 0 && secondTransform.c == 0 && secondTransform.d == -1.0) {SecondAssetOrientation_ = UIImageOrientationDown;}
CGFloat SecondAssetScaleToFitRatio = 320.0/SecondAssetTrack.naturalSize.width;
if(isSecondAssetPortrait_){
SecondAssetScaleToFitRatio = 320.0/SecondAssetTrack.naturalSize.height;
CGAffineTransform SecondAssetScaleFactor = CGAffineTransformMakeScale(SecondAssetScaleToFitRatio,SecondAssetScaleToFitRatio);
[SecondlayerInstruction setTransform:CGAffineTransformConcat(SecondAssetTrack.preferredTransform, SecondAssetScaleFactor) atTime:firstAsset.duration];
}else{
;
CGAffineTransform SecondAssetScaleFactor = CGAffineTransformMakeScale(SecondAssetScaleToFitRatio,SecondAssetScaleToFitRatio);
[SecondlayerInstruction setTransform:CGAffineTransformConcat(CGAffineTransformConcat(SecondAssetTrack.preferredTransform, SecondAssetScaleFactor),CGAffineTransformMakeTranslation(0, 160)) atTime:secondAsset.duration];
}
MainInstruction.layerInstructions = [NSArray arrayWithObjects:SecondlayerInstruction,nil];;
AVMutableVideoComposition *MainCompositionInst = [AVMutableVideoComposition videoComposition];
MainCompositionInst.instructions = [NSArray arrayWithObject:MainInstruction];
MainCompositionInst.frameDuration = CMTimeMake(1, 30);
MainCompositionInst.renderSize = CGSizeMake(320.0, 480.0);
// Now , you have Orientation Fixed Instrucation layer
// add this composition to your video 😀
// If you want to export Video than you can do like below
NSString *documentsDirectory = [NSHomeDirectory()
stringByAppendingPathComponent:#"Documents"];
NSString *myPathDocs = [documentsDirectory stringByAppendingPathComponent:[NSString stringWithFormat:#"final_merged_video-%d.mp4",arc4random() % 1000]];
NSURL *url = [NSURL fileURLWithPath:myPathDocs];
// 5 - Create exporter
AVAssetExportSession *exporter = [[AVAssetExportSession alloc] initWithAsset:mixComposition
presetName:AVAssetExportPreset640x480];
exporter.outputURL=url;
exporter.videoComposition=MainCompositionInst;
exporter.outputFileType = AVFileTypeQuickTimeMovie;
exporter.shouldOptimizeForNetworkUse = YES;
[exporter exportAsynchronouslyWithCompletionHandler:^{
dispatch_async(dispatch_get_main_queue(), ^
{
[[AppDelegate Getdelegate] hideIndicator];
[self exportDidFinish:exporter];
});
}];
for Swift
see this answer Click here
in addition you can also try to rotate your video layer by applying rotation transform on it.
#define degreeToRadian(x) (M_PI * x / 180.0)
[_playerLayer setAffineTransform:CGAffineTransformMakeRotation(degreeToRadian(degree))]
If You are trying to play AVMutableCompostion You should set AVAssetTrack's preferredTransform to AVMutableCompositionTrack's preferredTransform.
let asset = AVAsset(url: url!)
let composition = AVMutableComposition()
let compositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
let videoTrack = asset.tracks(withMediaType: AVMediaTypeVideo).first
try? compositionTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, asset.duration), of: videoTrack!, at: kCMTimeZero)
compositionTrack.preferredTransform = (videoTrack?.preferredTransform)!
let playerItem = AVPlayerItem(asset: composition)
let filter = CIFilter(name: "CIColorInvert")
playerItem.videoComposition = AVVideoComposition(asset: composition, applyingCIFiltersWithHandler: { (request: AVAsynchronousCIImageFilteringRequest) in
filter?.setValue(request.sourceImage, forKey: kCIInputImageKey)
request.finish(with: (filter?.outputImage)!, context: nil)
})
.... the rest of code
Instead of assuming that the image will be filtered, check first if filteredImage is nil. If not, then request.finish(with: filteredImage, context: nil)
However, if it is nil you must request.finish(with: SomeError)
This is as per the docs.
What worked for me at the end:
private func filterVideo(with filter: Filter?) {
guard let player = playerLayer?.player, let playerItem = player.currentItem else { return }
let videoComposition = AVVideoComposition(asset: playerItem.asset, applyingCIFiltersWithHandler: { (request) in
if let filter = filter {
if let filteredImage = filter.filterImage(request.sourceImage) {
let output = filteredImage.cropping(to: request.sourceImage.extent)
request.finish(with: output, context: nil)
} else {
printError("Image not filtered")
request.finish(with: RenderError.couldNotFilter)
}
} else {
let output = request.sourceImage.cropping(to: request.sourceImage.extent)
request.finish(with: output, context: nil)
}
})
playerItem.videoComposition = videoComposition
}
This is the filterImage function of Filter, which is just a nice little wrapper for CIFilter:
func filterImage(_ ciImage: CIImage) -> CIImage? {
guard let filter = ciFilter else { return nil }
filter.setDefaults()
filter.setValue(ciImage, forKey: kCIInputImageKey)
guard let filteredImageData = filter.value(forKey: kCIOutputImageKey) as? CIImage else { return nil }
return filteredImageData
}
Try this code below which worked for me
// Grab the source track from AVURLAsset for example.
let assetV = YourAVASSET.tracks(withMediaType: AVMediaTypeVideo).last
// Grab the composition video track from AVMutableComposition you already made.
let compositionV = YourCompostion.tracks(withMediaType: AVMediaTypeVideo).last
// Apply the original transform.
if ((assetV != nil) && (compostionV != nil)) {
compostionV?.preferredTransform = (assetV?.preferredTransform)!
}
And then go ahead an export your video...
Related
Objective : I have a Video over which I have a UIView which contains animated GIFs(not locally stored, but using giphy api), Texts, or hand drawings. I want to export this along with the image in a single video.
What I did :
I created a UIView on which the animations are. Then converted that to CALayer and added to video with AVMutableVideoCompotion.
Problem : The UIView with animations is being converted to an Image instead of a video. How can I solve this.
Below is the Program for my export session. Any pointers will be really helpful.
func convertVideoAndSaveTophotoLibrary(videoURL: URL) {
let file = FileManager.shared.getDocumentDirectory(path: currentFilename)
FileManager.shared.clearPreviousFiles(withPath: file.path)
// File to composit
let asset = AVURLAsset(url: videoURL as URL)
let composition = AVMutableComposition.init()
composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
let clipVideoTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
// Rotate to potrait
let transformer = AVMutableVideoCompositionLayerInstruction(assetTrack: clipVideoTrack)
let videoTransform:CGAffineTransform = clipVideoTrack.preferredTransform
//fix orientation
var videoAssetOrientation_ = UIImage.Orientation.up
var isVideoAssetPortrait_ = false
if videoTransform.a == 0 && videoTransform.b == 1.0 && videoTransform.c == -1.0 && videoTransform.d == 0 {
videoAssetOrientation_ = UIImage.Orientation.right
isVideoAssetPortrait_ = true
}
if videoTransform.a == 0 && videoTransform.b == -1.0 && videoTransform.c == 1.0 && videoTransform.d == 0 {
videoAssetOrientation_ = UIImage.Orientation.left
isVideoAssetPortrait_ = true
}
if videoTransform.a == 1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == 1.0 {
videoAssetOrientation_ = UIImage.Orientation.up
}
if videoTransform.a == -1.0 && videoTransform.b == 0 && videoTransform.c == 0 && videoTransform.d == -1.0 {
videoAssetOrientation_ = UIImage.Orientation.down;
}
transformer.setTransform(clipVideoTrack.preferredTransform, at: CMTime.zero)
transformer.setOpacity(0.0, at: asset.duration)
//adjust the render size if neccessary
var naturalSize: CGSize
if(isVideoAssetPortrait_){
naturalSize = CGSize(width: clipVideoTrack.naturalSize.height, height: clipVideoTrack.naturalSize.width)
} else {
naturalSize = clipVideoTrack.naturalSize;
}
var renderWidth: CGFloat!
var renderHeight: CGFloat!
renderWidth = naturalSize.width
renderHeight = naturalSize.height
let parentlayer = CALayer()
let videoLayer = CALayer()
let watermarkLayer = CALayer()
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize(width: renderWidth, height: renderHeight)
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
videoComposition.renderScale = 1.0
//---------------------->>>>>> converting uiview to uiimage
watermarkLayer.contents = canvasView.asImage().cgImage
parentlayer.frame = CGRect(origin: CGPoint(x: 0, y: 0), size: naturalSize)
videoLayer.frame = CGRect(origin: CGPoint(x: 0, y: 0), size: naturalSize)
watermarkLayer.frame = CGRect(origin: CGPoint(x: 0, y: 0), size: naturalSize)
parentlayer.addSublayer(videoLayer)
parentlayer.addSublayer(watermarkLayer)
//---------------------->>>>>> Add view to video
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayers: [videoLayer], in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: CMTimeMakeWithSeconds(60, preferredTimescale: 30))
instruction.layerInstructions = [transformer]
videoComposition.instructions = [instruction]
let exporter = AVAssetExportSession.init(asset: asset, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputFileType = AVFileType.mp4
exporter?.outputURL = file
exporter?.videoComposition = videoComposition
exporter?.shouldOptimizeForNetworkUse = true
exporter!.exportAsynchronously(completionHandler: {() -> Void in
if exporter?.status == .completed {
let outputURL: URL? = exporter?.outputURL
self.saveToPhotoLibrary(url: outputURL!)
}
})
}
Converting UIView to UIimage
extension UIView {
func asImage() -> UIImage {
let renderer = UIGraphicsImageRenderer(bounds: bounds)
return renderer.image { rendererContext in
layer.render(in: rendererContext.cgContext)
}
}
}
Code for Adding the GIF(I am using the Giphy API here), so the gif is downloaded and then added
func didSelectMedia(giphyViewController: GiphyViewController, media: GPHMedia) {
addMedia(media: media)
giphyViewController.dismiss(animated: true) { [weak self] in
self?.giphy = nil
}
}
// GPHMediaView is a subclass of UIImageView
func addMedia(media: GPHMedia) {
let mediaView = GPHMediaView()
mediaView.media = media
mediaView.contentMode = .scaleAspectFill
mediaView.frame.size = CGSize(width: 150, height: 150)
mediaView.center = canvasView.center
canvasView.addSubview(mediaView)
print(mediaView.frame)
self.addGesturesTo(mediaView)
}
What I am getting: The cat over the video is a gif. But sadly all i get is one frame. Now I know that is because I am converting the view to image. But that's the solution I need to know. How do I have the gif merged to the video.
You have two ways to archive this. First you can convert gif to video and add it to composition, but you lose alpha channel. Second way and more relevant is to add CAKeyframeAnimation on gif layer. To do this you should get all image frames from gif and put it all to key CAKeyframeAnimation.values and set duration which equal to framesCount * framesPerSecond.
class func makeContentAnimation(beginTime: Double, values: [Any], frameRate: Double) -> CAKeyframeAnimation {
let animation = CAKeyframeAnimation(keyPath: "contents")
animation.values = values
animation.beginTime = beginTime.isZero ? AVCoreAnimationBeginTimeAtZero : beginTime
animation.duration = frameRate * Double(values.count)
animation.isRemovedOnCompletion = false
animation.repeatCount = .infinity
return animation
}
I am trying to simply increase the speed of my exporting of my merged video.
Here is the code: //from my extensive research online and on SO, I have pretty much come down to the preset PassThrough makes it super fast, however as I wrote in a comment in the code, my merging code does not seem to work with that preset for export :/
static func videoCompositionInstruction(_ track: AVCompositionTrack, asset: AVAsset)
-> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = 1080 / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = 1080 / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var finalTransform = assetTrack.preferredTransform.concatenating(scaleFactor)
//was needed in my case (if video not taking entire screen and leaving some parts black - don't know when actually needed so you'll have to try and see when it's needed)
if assetInfo.orientation == .rightMirrored || assetInfo.orientation == .leftMirrored {
finalTransform = finalTransform.translatedBy(x: -transform.ty, y: 0)
}
instruction.setTransform(finalTransform, at: CMTime.zero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
.concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: CMTime.zero)
}
return instruction
}
static func orientationFromTransform(_ transform: CGAffineTransform)
-> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == 1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .rightMirrored
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .leftMirrored
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
func mergeVideosTestSQ(arrayVideos:[AVAsset], completion:#escaping (URL?, Error?) -> ()) {
let mixComposition = AVMutableComposition()
var instructions: [AVMutableVideoCompositionLayerInstruction] = []
var insertTime = CMTime(seconds: 0, preferredTimescale: 1)
/// for each URL add the video and audio tracks and their duration to the composition
for sourceAsset in arrayVideos {
let frameRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: sourceAsset.duration)
guard
let nthVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let nthAudioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), //0 used to be kCMPersistentTrackID_Invalid
let assetVideoTrack = sourceAsset.tracks(withMediaType: .video).first
else {
print("didnt work")
return
}
var assetAudioTrack: AVAssetTrack?
assetAudioTrack = sourceAsset.tracks(withMediaType: .audio).first
print(assetAudioTrack, ",-- assetAudioTrack???", assetAudioTrack?.asset, "<-- hes", sourceAsset)
do {
try nthVideoTrack.insertTimeRange(frameRange, of: assetVideoTrack, at: insertTime)
try nthAudioTrack.insertTimeRange(frameRange, of: assetAudioTrack!, at: insertTime)
//instructions:
let nthInstruction = MainCamVC.videoCompositionInstruction(nthVideoTrack, asset: sourceAsset)
nthInstruction.setOpacity(0.0, at: CMTimeAdd(insertTime, sourceAsset.duration)) //sourceasset.duration
instructions.append(nthInstruction)
insertTime = insertTime + sourceAsset.duration //sourceAsset.duration
} catch {
DispatchQueue.main.async {
print("didnt wor2k")
}
}
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: insertTime)
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = CGSize(width: 1080, height: 1920)
let outputFileURL = URL(fileURLWithPath: NSTemporaryDirectory() + "merge.mp4")
//below to clear the video form docuent folder for new vid...
let fileManager = FileManager()
try? fileManager.removeItem(at: outputFileURL)
/// try to start an export session and set the path and file type
if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) { //DOES NOT WORK WITH AVAssetExportPresetPassthrough
exportSession.outputFileType = .mov
exportSession.outputURL = outputFileURL
exportSession.videoComposition = mainComposition
exportSession.shouldOptimizeForNetworkUse = true
/// try to export the file and handle the status cases
exportSession.exportAsynchronously {
if let url = exportSession.outputURL{
completion(url, nil)
}
if let error = exportSession.error {
completion(nil, error)
}
}
}
}
Note I have instructions in order to preserve correct orientations.
Thanks for any help! I just need it to be faster, it roughly takes videoDuration/2 seconds to export in time to export...
After implementing your code into my project, it seems what is making your export slow, would be the way you handle the renderSize, as well as the resolution of the video. On top of that perhaps using a lower preset of quality may make it higher.
Specicially I would note this part:
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: insertTime)
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = CGSize(width: 1080, height: 1920)
Changing the renderSize to = the videoCompositions.size (may be different name for your project) does the trick.
Then in the exporting place, I suggest changing this part:
/// try to start an export session and set the path and file type
if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) { //DOES NOT WORK WITH AVAssetExportPresetPassthrough
exportSession.outputFileType = .mov
exportSession.outputURL = outputFileURL
exportSession.videoComposition = mainComposition
exportSession.shouldOptimizeForNetworkUse = true
/// try to export the file and handle the status cases
exportSession.exportAsynchronously {
if let url = exportSession.outputURL{
completion(url, nil)
}
if let error = exportSession.error {
completion(nil, error)
}
}
}
As I said before, change it to presetQuality one lower or so. It will vastly improve the speed! Some transformations were also duplicated as well, something to look out for!
I need to be able to merge videos taken with the time lapse function in the Camera app on iOS and export as a single video.
However, even if I try to export a single, unchanged time lapse video to the Photo Library, it saves as a completely black video (with the correct duration). Here is the sample code I wrote to just export a single, unchanged video (most of which is adapted from a Ray Wenderlich tutorial):
#IBAction func saveVideo(_ sender: UIBarButtonItem) {
// 1 - Early exit if there's no video file selected
guard let videoAsset = self.avAsset else {
let alert = UIAlertController(title: "Error", message: "Failed to load video asset.", preferredStyle: .alert)
let cancelAction = UIAlertAction(title: "OK", style: .cancel, handler: nil)
alert.addAction(cancelAction)
self.present(alert, animated: true, completion: nil)
return
}
// 2 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
print("Preparing AVMutableComposition...")
let mixComposition = AVMutableComposition()
// 3 - Video track
let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
if let videoAssetTrack = videoAsset.tracks(withMediaType: .video).first {
try videoTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAssetTrack, at: kCMTimeZero)
}
if let audioAssetTrack = videoAsset.tracks(withMediaType: .audio).first {
let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
try audioTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: audioAssetTrack, at: kCMTimeZero)
}
} catch let error as NSError {
self.presentAlert(title: "Export Error", message: "Unable to complete export due to the following error: \(error). Please try again.", block: nil)
print("error: \(error)")
}
// 3.1 - Create AVMutableVideoCompositionInstruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, videoAsset.duration)
// 3.2 - Create an AVMutableVideoCompositionLayerInstruction for the video track and fix the orientation.
let videoLayerInstruction: AVMutableVideoCompositionLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack!)
let videoAssetTrack = videoAsset.tracks(withMediaType: .video).first
var assetOrientation: UIImageOrientation = .up
var isPortrait = false
let t = videoAssetTrack!.preferredTransform
if t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0 {
assetOrientation = .right
isPortrait = true
} else if t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0 {
assetOrientation = .left
isPortrait = true
} else if t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0 {
assetOrientation = .up
} else if t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0 {
assetOrientation = .down
}
videoLayerInstruction.setTransform(videoAssetTrack!.preferredTransform, at: kCMTimeZero)
videoLayerInstruction.setOpacity(0.0, at: videoAsset.duration)
// 3.3 - Add instructions
mainInstruction.layerInstructions = [videoLayerInstruction]
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
var naturalSize: CGSize
if isPortrait {
naturalSize = CGSize(width: videoAssetTrack!.naturalSize.height, height: videoAssetTrack!.naturalSize.width)
} else {
naturalSize = videoAssetTrack!.naturalSize
}
mainComposition.renderSize = CGSize(width: naturalSize.width, height: naturalSize.height)
// set up file destination
let tempName = "temp-thread.mov"
let tempURL = URL(fileURLWithPath: (NSTemporaryDirectory() as NSString).appendingPathComponent(tempName))
do {
if FileManager.default.fileExists(atPath: tempURL.path) {
try FileManager.default.removeItem(at: tempURL)
}
} catch {
print("Error removing temp file.")
}
// create final video using export session
guard let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exportSession.outputURL = tempURL
exportSession.outputFileType = AVFileType.mov
exportSession.shouldOptimizeForNetworkUse = true
exportSession.videoComposition = mainComposition
print("Exporting video...")
exportSession.exportAsynchronously {
DispatchQueue.main.async {
switch exportSession.status {
// Success
case .completed:
print("Saving to Photos Library...")
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: exportSession.outputURL!)
}) { success, error in
if success {
print("Added video to library - success: \(success), error: \(String(describing: error?.localizedDescription))")
} else {
print("Added video to library - success: \(success), error: \(String(describing: error!.localizedDescription))")
}
let _ = try? FileManager.default.removeItem(at: tempURL)
}
print("Export session completed")
// Status other than success
case .cancelled, .exporting, .failed, .unknown, .waiting:
print("Export status: \(exportSession.status.rawValue)")
print("Reason: \(String(describing: exportSession.error))")
}
}
}
}
Why would the resulting video show up completely black? I can't seem to find much documentation on Apple's time lapse videos, so I'm not sure why they might be different than a regular video file. They seem to have a frame rate of 30fps and if I inspect one on my Mac, it's just a regular QuickTime movie file without an audio channel. Any ideas? Exporting any other video with this code (even ones without audio) works flawlessly.
The problem code is:
videoLayerInstruction.setTransform(videoAssetTrack!.preferredTransform, at: kCMTimeZero)
This transformation is eligible to "up" (default) orientation only and it makes a video completely black for the other orientations. You should make a correct transformation for each orientation e.g.:
var transform = videoAssetTrack.preferredTransform
// Right
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
isPortrait = true
let rotate = CGAffineTransform.identity.translatedBy(x: videoAssetTrack.naturalSize.height - videoAssetTrack.preferredTransform.tx, y: -videoAssetTrack.preferredTransform.ty)
transform = videoAssetTrack.preferredTransform.concatenating(rotate)
}
// Left
else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
isPortrait = true
let rotate = CGAffineTransform.identity.translatedBy(x: -videoAssetTrack.preferredTransform.tx, y: videoAssetTrack.naturalSize.width - videoAssetTrack.preferredTransform.ty)
transform = videoAssetTrack.preferredTransform.concatenating(rotate)
}
// Up
else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
transform = videoAssetTrack.preferredTransform
}
// Down
else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
let rotate = CGAffineTransform.identity.translatedBy(x: videoAssetTrack.naturalSize.width - videoAssetTrack.preferredTransform.tx, y: videoAssetTrack.naturalSize.height - videoAssetTrack.preferredTransform.ty)
transform = videoAssetTrack.preferredTransform.concatenating(rotate)
}
videoLayerInstruction.setTransform(transform, at: .zero)
I have taken a png image and a video for watermarking, both are portrait. And I have done it as watermark an image at video.
After watermarking, got a merged video in landscape mode and it is flipped by 90 degree in anti clock direction.
I am not able to find out the exact reason why video got flipped from portrait to landscape mode. While image is showing stretch portrait.
Please help. Thanks in advance.
Used below code:-
- (void)addWatermarkAtVideoFile:(NSURL *)videoURL image:(UIImage *)image withConvertedVideoUUID:(NSString *)convertedVideoUUID response:(void(^)(BOOL success, NSString *videoUUID, NSURL *videoPath))responseBlock {
AVURLAsset* videoAsset = [[AVURLAsset alloc]initWithURL:videoURL options:nil];
AVMutableComposition* mixComposition = [AVMutableComposition composition];
AVAssetTrack *clipVideoTrack = [[videoAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
if(clipVideoTrack) {
AVMutableCompositionTrack *compositionVideoTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];
[compositionVideoTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, videoAsset.duration) ofTrack:clipVideoTrack atTime:kCMTimeZero error:nil];
[compositionVideoTrack setPreferredTransform:[[[videoAsset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0] preferredTransform]];
}
AVAssetTrack *clipAudioTrack = [[videoAsset tracksWithMediaType:AVMediaTypeAudio] objectAtIndex:0];
if(clipAudioTrack) {
AVMutableCompositionTrack *compositionAudioTrack = [mixComposition addMutableTrackWithMediaType:AVMediaTypeAudio preferredTrackID:kCMPersistentTrackID_Invalid];
[compositionAudioTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, videoAsset.duration) ofTrack:clipAudioTrack atTime:kCMTimeZero error:nil];
}
CGSize sizeOfVideo=[videoAsset naturalSize];
//Image of watermark
UIImage *myImage = image;
CALayer *layerCa = [CALayer layer];
layerCa.contents = (id)myImage.CGImage;
layerCa.frame = CGRectMake(0, 0, sizeOfVideo.width, sizeOfVideo.height);
layerCa.opacity = 1.0;
CALayer *parentLayer=[CALayer layer];
CALayer *videoLayer=[CALayer layer];
parentLayer.frame=CGRectMake(0, 0, sizeOfVideo.width, sizeOfVideo.height);
videoLayer.frame=CGRectMake(0, 0, sizeOfVideo.width, sizeOfVideo.height);
[parentLayer addSublayer:videoLayer];
[parentLayer addSublayer:layerCa];
AVMutableVideoComposition *videoComposition=[AVMutableVideoComposition videoComposition] ;
videoComposition.frameDuration=CMTimeMake(1, 30);
videoComposition.renderSize=sizeOfVideo;
videoComposition.animationTool=[AVVideoCompositionCoreAnimationTool videoCompositionCoreAnimationToolWithPostProcessingAsVideoLayer:videoLayer inLayer:parentLayer];
AVMutableVideoCompositionInstruction *instruction = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, [mixComposition duration]);
AVAssetTrack *videoTrack = [[mixComposition tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
AVMutableVideoCompositionLayerInstruction* layerInstruction = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:videoTrack];
instruction.layerInstructions = [NSArray arrayWithObject:layerInstruction];
videoComposition.instructions = [NSArray arrayWithObject: instruction];
//Creating temp path to save the converted video
NSString* myDocumentPath = [self getDocumentDirectoryPathWithFileName:convertedVideoUUID];
NSURL *outputFileURL = [[NSURL alloc] initFileURLWithPath:myDocumentPath];
//Check if the file already exists then remove the previous file
[self removeFileIfExistAtPAth:myDocumentPath];
AVAssetExportSession *exportSession = [[AVAssetExportSession alloc] initWithAsset:mixComposition presetName:AVAssetExportPresetHighestQuality];
exportSession.videoComposition=videoComposition;
exportSession.outputURL = outputFileURL;
exportSession.outputFileType = AVFileTypeQuickTimeMovie;
[exportSession exportAsynchronouslyWithCompletionHandler:^{
switch (exportSession.status)
{
case AVAssetExportSessionStatusCompleted:
NSLog(#"Export OK");
[self saveInPhotoAlbum:myDocumentPath];
break;
case AVAssetExportSessionStatusFailed:
NSLog (#"AVAssetExportSessionStatusFailed: %#", exportSession.error);
break;
case AVAssetExportSessionStatusCancelled:
NSLog(#"Export Cancelled");
break;
}
BOOL statusSuccess = [exportSession status] == AVAssetExportSessionStatusCompleted;
responseBlock(statusSuccess ? YES : NO, statusSuccess ? convertedVideoUUID : nil, statusSuccess ? outputFileURL : nil);
}];
}
I think it's the default behavior with AVFoundation, it's probably not linked to the watermark feature.
You could use CGAffineTransform :
if(height > width) {
CGAffineTransform rotationTransform = CGAffineTransformMakeRotation(M_PI_2);
[layerInstruction setTransform:rotationTransform atTime:kCMTimeZero];
}
The naturalSize of AVAsset doesn't consider the eventual rotation of a video.
If you want to place tour watermark correctly consider using the renderSize of the AVMutableVideoComposition or apply some transformations.
This snippet gives you the actual orientation for an asset:
func orientationForAsset(_ asset: AVAsset) -> (orientation: UIImageOrientation, isPortrait: Bool) {
let videoTrack = asset.tracks(withMediaType: AVMediaTypeVideo).first!
let transformMatrix = videoTrack.preferredTransform
return orientationFromTransform(transformMatrix)
}
func orientationFromTransform(_ transform: CGAffineTransform) -> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
With this one you can get the actual size considering the rotation.
func resolutionSizeForAsset(_ asset: AVAsset) -> CGSize? {
guard let track = asset.tracks(withMediaType: AVMediaTypeVideo).first else { return nil }
let size = track.naturalSize.applying(track.preferredTransform)
return CGSize(width: fabs(size.width), height: fabs(size.height))
}
I want to crop a square video from a video selected from gallery with its orientation fixed. I have searched many stackoverflow posts and raywenderlich post.
My code is working for some videos but not all. For example I have selected a portrait video from gallery having resolution 352x640. During debugging, the natural size of the asset is 640x352 and with the preferredTransform it is detected as a portrait but the crop size is set to 640x640 instead of 352x352 for center square crop.
This is my code
let asset = AVAsset(URL: url)
let composition = AVMutableComposition()
let compositionVideoTrack = composition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let videoComposition: AVMutableVideoComposition?
let audioMix: AVMutableAudioMix?
let timeRange = self.timeRange(asset)
if let videoAssetTrack = asset.tracksWithMediaType(AVMediaTypeVideo).first {
videoComposition = AVMutableVideoComposition()
var error: NSError?
do {
try compositionVideoTrack.insertTimeRange(timeRange, ofTrack: videoAssetTrack, atTime: kCMTimeZero)
} catch var error1 as NSError {
error = error1
} catch {
fatalError()
}
let naturalSize = videoAssetTrack.naturalSize
let videoSize: CGSize
var transform = videoAssetTrack.preferredTransform
var isFirstAssetPortrait_ = false
if(transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0) {
isFirstAssetPortrait_ = true}
if(transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0) {
isFirstAssetPortrait_ = true
}
var FirstAssetScaleToFitRatio = naturalSize.height/naturalSize.width
if(isFirstAssetPortrait_){
videoSize = CGSizeMake(naturalSize.width, naturalSize.width)
FirstAssetScaleToFitRatio = naturalSize.width/naturalSize.height
var FirstAssetScaleFactor = CGAffineTransformMakeScale(FirstAssetScaleToFitRatio,FirstAssetScaleToFitRatio)
FirstAssetScaleFactor = CGAffineTransformTranslate(FirstAssetScaleFactor, 0, (naturalSize.height - naturalSize.width) / 2.0 )
transform = CGAffineTransformConcat(transform, FirstAssetScaleFactor)
}else{
videoSize = CGSizeMake(naturalSize.height, naturalSize.height)
if transform.a >= 0 {
transform = CGAffineTransformTranslate(transform, -(naturalSize.width - naturalSize.height) / 2.0, 0.0)
} else {
transform = CGAffineTransformTranslate(transform, (naturalSize.width - naturalSize.height) / 2.0, 0.0)
}
}
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: compositionVideoTrack)
layerInstruction.setTransform(transform, atTime: kCMTimeZero)
let videoInstructions = AVMutableVideoCompositionInstruction()
videoInstructions.timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration)
videoInstructions.layerInstructions = [layerInstruction]
videoComposition?.renderSize = videoSize
videoComposition?.frameDuration = CMTimeMake(1, 30)
videoComposition?.renderScale = 1.0
videoComposition?.instructions = [videoInstructions]
} else {
videoComposition = nil
}
if let audioTrack = asset.tracksWithMediaType(AVMediaTypeAudio).first {
var error: NSError?
do {
try compositionAudioTrack.insertTimeRange(timeRange, ofTrack: audioTrack, atTime: kCMTimeZero)
} catch var error1 as NSError {
error = error1
} catch {
fatalError()
}
let mixParameters = AVMutableAudioMixInputParameters(track: compositionAudioTrack)
mixParameters.setVolume(1.0, atTime: kCMTimeZero)
audioMix = AVMutableAudioMix()
audioMix?.inputParameters = [mixParameters]
} else {
audioMix = nil
}
self.exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)
self.exporter?.videoComposition = videoComposition
self.exporter?.audioMix = audioMix
self.exporter?.outputURL = NSURL.tempFileURL("mp4")
self.exporter?.outputFileType = AVFileTypeQuickTimeMovie
self.exporter?.exportAsynchronouslyWithCompletionHandler() {
if let exporter = self.exporter {
switch exporter.status {
case .Failed:
break
case .Completed:
default:
break
}
}
}
What I am doing wrong. Is there any better way to fix video orientation and center crop (using minimum value of width or height)?