AVMutableVideoComposition output video shrinked - ios

I'm a newbie to Swift. I'm trying to add a watermark with reference to code from SO. My original video resolution is 1280 X 720, but the output video is a shrunk version.
Here are the before and after pictures
Here is my function to create a watermark.
private func watermark(video videoAsset:AVAsset, watermarkText text : String!, image : CGImage!, saveToLibrary flag : Bool, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize
print("Video size", videoSize.height) //720
print("Video size", videoSize.width) //1280
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
videoLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
parentLayer.addSublayer(videoLayer)
if text != nil {
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.red.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 15
titleLayer.alignmentMode = kCAAlignmentCenter
titleLayer.bounds = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(titleLayer)
} else if image != nil {
let imageLayer = CALayer()
imageLayer.contents = image
let width: CGFloat = (self.imageView.image?.size.width)!
let height: CGFloat = (self.imageView.image?.size.height)!
print("Video size", height) //720
print("Video size", width) //1280
imageLayer.frame = CGRect(x: 0.0, y: 0.0, width: width, height: height)
imageLayer.opacity = 0.65
parentLayer.addSublayer(imageLayer)
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, Int32(clipVideoTrack.nominalFrameRate))
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mov")
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter?.outputURL
if flag {
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)
}
}
}
}
}
While the size of the watermark image is correct, the video is shrunk.

can you try this function
private func watermark(video videoAsset: AVAsset, watermarkText text : String!, image : CGImage!, saveToLibrary flag : Bool, completion : ((_ status : AVAssetExportSessionStatus ?, _ session: AVAssetExportSession ?, _ outputURL : URL ?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
videoLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
parentLayer.addSublayer(videoLayer)
// if text != nil {
// let titleLayer = CATextLayer()
// titleLayer.backgroundColor = UIColor.red.cgColor
// titleLayer.string = text
// titleLayer.font = "Helvetica" as CFTypeRef
// titleLayer.fontSize = 15
// titleLayer.alignmentMode = kCAAlignmentCenter
// titleLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
// parentLayer.addSublayer(titleLayer)
// } else
if image != nil {
let imageLayer = CALayer()
imageLayer.contents = image
let width: CGFloat = (self.imageView.image ?.size.width)!
let height: CGFloat = (self.imageView.image ?.size.height)!
//
print("Video size", height)
print("Video size", width)
imageLayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
// imageLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
imageLayer.opacity = 1
parentLayer.addSublayer(imageLayer)
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, Int32(clipVideoTrack.nominalFrameRate))
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
let videotrack = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
// let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mp4")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return}
exporter.videoComposition = videoComp
exporter.outputFileType = AVFileTypeMPEG4
exporter.outputURL = url
exporter.exportAsynchronously() {
DispatchQueue.main.async {
if exporter.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter.outputURL
if flag {
// Save to library
// let library = ALAssetsLibrary()
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) {
saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
// if library.videoAtPathIs(compatibleWithSavedPhotosAlbum: outputURL) {
// library.writeVideoAtPathToSavedPhotosAlbum(outputURL,
// completionBlock: { (assetURL:NSURL!, error:NSError!) -> Void in
//
// completion!(AVAssetExportSessionStatus.Completed, exporter, outputURL)
// })
// }
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter.status, exporter, nil)
}
}
}
}
}

The code above for creating the watermarked video seems not to be the reason for the smaller output resolution.
Problem
The resolution depends on what kind of AVAsset is put into the watermark method.
Example:
Frequently an UIImagePickerController is used. There is the delegate method
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any])
There one often can see something like this:
let url = info[UIImagePickerControllerMediaURL] as? URL
let videoAsset = AVAsset(url: url!)
self.watermark(video: videoAsset, watermarkText: nil, image: self.imageView.image?.cgImage ...
But with the lines above a downsized input image is used, e.g. instead of having a video with 1920x1080 one has a reduced video size of 1280x720.
Solution
A method for determining the AVAsset from the PHAsset could look like this:
private func videoAsset(for asset: PHAsset, completion: #escaping (AVAsset?) -> Void) {
let requestOptions = PHVideoRequestOptions()
requestOptions.version = .original
PHImageManager.default().requestAVAsset(forVideo: asset, options: requestOptions, resultHandler: {
(avAsset, avAudioMix, info) in
completion(avAsset)
})
}
And where to get the PHAsset from? It can also be determined in the didFinishPickingMediaWithInfo method by using UIImagePickerControllerPHAsset:
let asset = info[UIImagePickerControllerPHAsset] as? PHAsset
Quick Test
For a quick test one could use:
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
if let asset = info[UIImagePickerControllerPHAsset] as? PHAsset {
picker.dismiss(animated: true, completion: { [weak self] in
self?.videoAsset(for: asset, completion: { (avAsset) in
if let videoAsset = avAsset {
DispatchQueue.main.async {
self?.watermark(video: videoAsset, watermarkText: nil, image: self?.imageView.image?.cgImage, saveToLibrary: true) { (exportStat: AVAssetExportSessionStatus? , session: AVAssetExportSession?, url: URL?) in
print("url: \(String(describing: url?.debugDescription))")
}
}
}
})
})
}
}
The result is a video in the original resolution with a watermark on the lower left, see screenshot of resulting video:

Related

Trying to overlay an image onto a CALayer and retain the position from the subview (UITextView)

First time poster, looooooong time peruser. I'm using SwiftUI for the layout and UIRepresentables for the camera work. (Xcode 11.7), and trying to overlay an image onto a CALayer (for eventual export to video). The image was converted from a UITextView so the user is free to edit, pinch/zoom, and drag the text to their heart's content. After scouring SO for days, and reading Ray Wenderlich tutorials I've hit a wall. Screenshots below.
Before: freeform text 'coffee' added to the view
After: exported movie still, 'coffee' text position is incorrect
Below is the export function. I suspect I'm doing something wrong with relativePosition.
Thank you for any suggestions, this is my foray into writing an iOS app.
static func exportLayersToVideo(_ fileUrl:String, _ textView:UITextView){
let fileURL = NSURL(fileURLWithPath: fileUrl)
let composition = AVMutableComposition()
let vidAsset = AVURLAsset(url: fileURL as URL, options: nil)
// get video track
let vtrack = vidAsset.tracks(withMediaType: AVMediaType.video)
let videoTrack: AVAssetTrack = vtrack[0]
let vid_timerange = CMTimeRangeMake(start: CMTime.zero, duration: vidAsset.duration)
let tr: CMTimeRange = CMTimeRange(start: CMTime.zero, duration: CMTime(seconds: 10.0, preferredTimescale: 600))
composition.insertEmptyTimeRange(tr)
let trackID:CMPersistentTrackID = CMPersistentTrackID(kCMPersistentTrackID_Invalid)
if let compositionvideoTrack: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: trackID) {
do {
try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: CMTime.zero)
} catch {
print("error")
}
compositionvideoTrack.preferredTransform = videoTrack.preferredTransform
} else {
print("unable to add video track")
return
}
let size = videoTrack.naturalSize
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
// Convert UITextView to Image
let renderer = UIGraphicsImageRenderer(size: textView.bounds.size)
let image = renderer.image { ctx in
textView.drawHierarchy(in: textView.bounds, afterScreenUpdates: true)
}
let imglayer = CALayer()
let scaledAspect: CGFloat = image.size.width / image.size.height
let scaledWidth = size.width
let scaledHeight = scaledWidth / scaledAspect
let relativePosition = parentlayer.convert(textView.frame.origin, from: textView.layer)
imglayer.frame = CGRect(x: relativePosition.x, y: relativePosition.y, width: scaledWidth,height: scaledHeight)
imglayer.contents = image.cgImage
// Adding videolayer and imglayer
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(imglayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
// instruction for overlay
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = NSArray(object: layerinstruction) as [AnyObject] as! [AVVideoCompositionLayerInstruction]
layercomposition.instructions = NSArray(object: instruction) as [AnyObject] as! [AVVideoCompositionInstructionProtocol]
// create new file to receive data
let dirPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let docsDir = dirPaths[0] as NSString
let movieFilePath = docsDir.appendingPathComponent("result.mov")
let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality)
assetExport?.outputFileType = AVFileType.mov
assetExport?.videoComposition = layercomposition
// Check exist and remove old files
do { // delete old video
try FileManager.default.removeItem(at: movieDestinationUrl as URL)
} catch { print("Error Removing Existing File: \(error.localizedDescription).") }
do { // delete old video
try FileManager.default.removeItem(at: fileURL as URL)
} catch { print("Error Removing Existing File: \(error.localizedDescription).") }
assetExport?.outputURL = movieDestinationUrl as URL
assetExport?.exportAsynchronously(completionHandler: {
switch assetExport!.status {
case AVAssetExportSession.Status.failed:
print("failed")
print(assetExport?.error ?? "unknown error")
case AVAssetExportSession.Status.cancelled:
print("cancelled")
print(assetExport?.error ?? "unknown error")
default:
print("Movie complete")
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: movieDestinationUrl as URL)
}) { saved, error in
if saved {
print("Saved")
}
}
}
})
}
}
It looks like the x position is correct, but the y is off. I think this is because the origin is at the bottom-left instead of the top-left. Try this:
var relativePosition = parentlayer.convert(textView.frame.origin, from: textView.layer)
relativePosition.y = size.height - relativePosition.y
imglayer.frame = CGRect(x: relativePosition.x, y: relativePosition.y, width: scaledWidth,height: scaledHeight)

First Video Is Coming after merge different videos

I am creating a video collage app in which I am merging multiple videos in different frames and make a single video. But all frames are showing first video. Another videos are not showing after merge. Please give me suggestion as soon as possible. My code is below.
func newoverlay() {
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
var arrayOfComposition = Array<AVMutableCompositionTrack>()
var trackInstruction = Array<AVVideoCompositionLayerInstruction>()
var videolayer = Array<CALayer>()
var i:Int = 0
let mainInstruction = AVMutableVideoCompositionInstruction()
var assetDuration:CMTime = CMTime.zero
var box = Array<CALayer>()
var arrOfIns = Array<AVMutableVideoCompositionInstruction>()
var atTimeM : CMTime = CMTimeMake(value: 0, timescale: 0)
var lastAsset: AVURLAsset!
// 2 - Create two video tracks
for videoAssetss in firstAsset {
guard var firstTrack = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: (videoAssetss as? AVURLAsset)!.duration),
of: (videoAssetss as? AVURLAsset)!.tracks(withMediaType: .video)[0],
at: CMTime.zero)
var firstInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack)
guard let cor = photoFrameCordinate[i] as? CGRect else{return}
if videoAssetss as! AVURLAsset != firstAsset.last as! AVURLAsset{
firstInstruction.setOpacity(0, at: assetDuration) // asseteDuration
}
let transform = CGAffineTransform(scaleX: 0.4, y:1).concatenating(CGAffineTransform(translationX: trackInstruction[i-1]., y: -cor.origin.y))
firstInstruction.setTransform(transform, at: CMTime.zero)
assetDuration = CMTimeAdd(assetDuration, (videoAssetss as! AVURLAsset).duration)
lastAsset = videoAssetss as? AVURLAsset
trackInstruction.append(firstInstruction)
i += 1
// arrayOfComposition.append(firstTrack)
} catch {
print("Failed to load first track")
return
}
}
// Watermark Effect
let width: CGFloat = widthConstraintViewForImage.constant
let height = heightConstraintViewForImage.constant
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
//Mark: Frame layer
let bglayer = CALayer()
bglayer.contents = imgViewForAdminImage.image?.cgImage
bglayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
bglayer.backgroundColor = UIColor.clear.cgColor
for index in 0..<videoURLS.count{
var videoBox = CALayer()
guard let cor = photoFrameCordinate[index] as? CGRect else{return}
videoBox.frame = CGRect(x: cor.origin.x, y: parentlayer.frame.maxY-(cor.origin.y+cor.size.height), width: cor.size.width, height: cor.size.height)
videoBox.backgroundColor = UIColor.green.cgColor
videoBox.masksToBounds = true
var vlayer = CALayer()
vlayer.contentsScale = 1.0
vlayer.contentsGravity = CALayerContentsGravity.center
vlayer.frame = CGRect(x: 0, y: 0, width:cor.size.width, height: cor.size.height)
vlayer.backgroundColor = UIColor.yellow.cgColor
videolayer.append(vlayer)
videoBox.addSublayer(vlayer)
box.append(videoBox)
bglayer.addSublayer(videoBox)
}
parentlayer.addSublayer(bglayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayers: videolayer, in: parentlayer)
// 2.1
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: assetDuration)
mainInstruction.layerInstructions = trackInstruction
mainInstruction.backgroundColor = UIColor.red.cgColor
layercomposition.instructions = [mainInstruction]
// layercomposition.renderSize = CGSizeMake(videoSize.width * scale, videoSize.height * scale)
layercomposition.renderScale = 1.0
layercomposition.renderSize = CGSize(width: width, height: height)
// create new file to receive data
let dirPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let docsDir = dirPaths[0] as NSString
let movieFilePath = docsDir.appendingPathComponent("result.mp4")
let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName:AVAssetExportPresetMediumQuality)
assetExport?.outputFileType = AVFileType.mp4
assetExport?.videoComposition = layercomposition
// Check exist and remove old file
FileManager.default.removeItemIfExisted(movieDestinationUrl as URL)
assetExport?.outputURL = movieDestinationUrl as URL
assetExport?.exportAsynchronously(completionHandler: {
switch assetExport!.status {
case AVAssetExportSession.Status.failed:
print("failed")
print(assetExport?.error ?? "unknown error")
case AVAssetExportSession.Status.cancelled:
print("cancelled")
print(assetExport?.error ?? "unknown error")
default:
print("Movie complete")
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: movieDestinationUrl as URL)
}) { saved, error in
if saved {
print("Saved")
}else{
print(error!)
}
}
self.playVideo()`enter code here`
}
})
}
When I set the opacity of the first video to 0 then the second video is showing in all frames. I think all videos are coming but behind the first video that's why only first video is showing in the all frames.
I used
let videolayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: asset)
videolayerInstruction.setCropRectangle(CGRec(), at: Time())

How to position the CALayer in a video?

I have a UIView (size: W: 375 H: 667) with an image that can be placed anywhere inside it. Later this image will be overlaid with a video and saved. My problem is when I view the video the image is not found on the same position chosen in my UIView because my video is at a size of (720 x 1280). How can I reflect the position of the chosen image in my UIView inside a Video (720 x 1280)?
This is the code I'm using:
private func watermark(video videoAsset:AVAsset,modelView:MyViewModel, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((_ status : AVAssetExportSession.Status?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack:AVAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
do {
try compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: videoAsset.duration), of: clipVideoTrack, at: CMTime.zero)
}
catch {
print(error.localizedDescription)
}
let videoSize = self.resolutionSizeForLocalVideo(asset: clipVideoTrack)
print("DIMENSIONE DEL VIDEO W: \(videoSize.width) H: \(videoSize.height)")
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(videoLayer)
//My layer image
let layerTest = CALayer()
layerTest.frame = modelView.frame
layerTest.contents = modelView.image.cgImage
print("A: \(modelView.frame.origin.y) - \(modelView.frame.origin.x)")
print("B: \(layerTest.frame.origin.y) - \(layerTest.frame.origin.x)")
parentLayer.addSublayer(layerTest)
print("PARENT: \(parentLayer.frame.origin.y) - \(parentLayer.frame.origin.x)")
//------------------------
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(value: 1, timescale: 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: mixComposition.duration)
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack!, asset: videoAsset)
layerInstruction.setTransform((clipVideoTrack.preferredTransform), at: CMTime.zero)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mp4")
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSession.Status.completed {
let outputURL = exporter?.outputURL
if flag {
// Save to library
// let library = ALAssetsLibrary()
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSession.Status.completed, exporter, outputURL)
}
}
}
// if library.videoAtPathIs(compatibleWithSavedPhotosAlbum: outputURL) {
// library.writeVideoAtPathToSavedPhotosAlbum(outputURL,
// completionBlock: { (assetURL:NSURL!, error:NSError!) -> Void in
//
// completion!(AVAssetExportSessionStatus.Completed, exporter, outputURL)
// })
// }
} else {
completion!(AVAssetExportSession.Status.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)
}
}
}
}
}
private func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let scale : CGAffineTransform = CGAffineTransform(scaleX: 1, y:1)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scale), at: CMTime.zero)
return instruction
}
This is what I would like to get:
The answers to this question may be helpful. I faced a similar problem when attempting to position user-generated text over a video. This is what worked for me:
First, I added a helper method to convert a CGPoint from one rect to another:
func convertPoint(point: CGPoint, fromRect: CGRect, toRect: CGRect) -> CGPoint {
return CGPoint(x: (toRect.size.width / fromRect.size.width) * point.x, y: (toRect.size.height / fromRect.size.height) * point.y)
}
I positioned my text view (in your case, an image view) using its center point. Here's how you could calculate an adjusted center point using the helper method:
let adjustedCenter = convertPoint(point: imageView.center, fromRect: view.frame, toRect: CGRect(x: 0, y: 0, width: 720.0, height: 1280.0))
I had to do some extra positioning after that because the coordinate system for CALayers is flipped, so this is what the final point might look like:
let finalCenter = CGPoint(x: adjustedCenter.x, y: (1280.0 - adjustedCenter.y) - (imageView.bounds.height / 2.0))
Then you would set your CALayer's position property to that point.
layerTest.position = finalCenter
Hope that helps!

iOS Swift Implementation of Instagram Story Editor

I'm working on a project similar to editing photos/videos on Instagram Story (with the functionality of adding stickers, etc). My initial approach was to use
videoCompositionInstructions!.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: containerLayer)
but I realized that there are many challenges with this method. First, if the input is a landscape video, I cannot recover the background gradient color - it becomes all black (https://imgur.com/a/wYpknE4). Not to mention the cropping issues - if the user moves the video out of bounds, the video should be clipped, but with my current approach, this would be difficult. Also, if I add stickers, I have to scale the x and y to fit the render size of the video.
What really would be the best approach to this? Surely there would be an easier way? Intuitively, it would make sense to start off with a container view and the user can add stickers, video, etc to it and it would be the easiest to simply export the container view with clipsToBounds = true (no need to scale x/y, crop the video, landscape issues, etc).
If anyone has worked on a similar project, or has any inputs, it would be appreciated.
class AVFoundationClient {
var selectedVideoURL: URL?
var mutableComposition: AVMutableComposition?
var videoCompositionInstructions: AVMutableVideoComposition?
var videoTrack: AVMutableCompositionTrack?
var sourceAsset: AVURLAsset?
var insertTime = CMTime.zero
var sourceVideoAsset: AVAsset?
var sourceVideoTrack: AVAssetTrack?
var sourceRange: CMTimeRange?
var renderWidth: CGFloat?
var renderHeight: CGFloat?
var endTime: CMTime?
var videoBounds: CGRect?
var stickerLayers = [CALayer]()
func exportVideoFileFromStickersAndOriginalVideo(_ stickers: [Int:Sticker], sourceURL: URL) {
createNewMutableCompositionAndTrack()
getSourceAssetFromURL(sourceURL)
getVideoParamsAndAppendTracks()
createVideoCompositionInstructions()
for (_, sticker) in stickers {
createStickerLayer(sticker.image!, x: sticker.x!, y: sticker.y!, width: sticker.width!, height: sticker.height!, scale: sticker.scale!)
}
mergeStickerLayersAndFinalizeInstructions()
export(mutableComposition!)
}
func createStickerLayer(_ image: UIImage, x: CGFloat, y: CGFloat, width: CGFloat, height: CGFloat, scale: CGFloat) {
let scaleRatio = renderWidth!/UIScreen.main.bounds.width
let stickerX = x*scaleRatio
let stickerY = y*scaleRatio
let imageLayer = CALayer()
imageLayer.frame = CGRect(x: stickerX, y: stickerY, width: width*scaleRatio, height: height*scaleRatio)
imageLayer.contents = image.cgImage
imageLayer.contentsGravity = CALayerContentsGravity.resize
imageLayer.masksToBounds = true
stickerLayers.append(imageLayer)
}
func mergeStickerLayersAndFinalizeInstructions() {
let videoLayer = CALayer()
videoLayer.frame = CGRect(x: 0, y: 0, width: renderWidth!, height: renderWidth!*16/9)
videoLayer.contentsGravity = .resizeAspectFill
let containerLayer = CALayer()
containerLayer.backgroundColor = UIColor.mainBlue().cgColor
containerLayer.isGeometryFlipped = true
containerLayer.frame = CGRect(x: 0, y: 0, width: renderWidth!, height: renderWidth!*16/9)
containerLayer.addSublayer(videoLayer)
for stickerLayer in stickerLayers {
containerLayer.addSublayer(stickerLayer)
}
videoCompositionInstructions!.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: containerLayer)
}
func createNewMutableCompositionAndTrack() {
mutableComposition = AVMutableComposition()
videoTrack = mutableComposition!.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID())
}
func getSourceAssetFromURL(_ fileURL: URL) {
sourceAsset = AVURLAsset(url: fileURL, options: nil)
}
func getVideoParamsAndAppendTracks() {
let sourceDuration = CMTimeRangeMake(start: CMTime.zero, duration: sourceAsset!.duration)
sourceVideoTrack = sourceAsset!.tracks(withMediaType: AVMediaType.video)[0]
renderWidth = sourceVideoTrack!.renderSize().width
renderHeight = sourceVideoTrack!.renderSize().height
endTime = sourceAsset!.duration
sourceRange = sourceDuration
do {
try videoTrack!.insertTimeRange(sourceDuration, of: sourceVideoTrack!, at: insertTime)
}catch {
print("error inserting time range")
}
}
func createVideoCompositionInstructions() {
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = sourceRange!
let videolayerInstruction = videoCompositionInstruction(videoTrack!, asset: sourceAsset!)
videolayerInstruction.setOpacity(0.0, at: endTime!)
//Add instructions
mainInstruction.layerInstructions = [videolayerInstruction]
videoCompositionInstructions = AVMutableVideoComposition()
videoCompositionInstructions!.renderScale = 1.0
videoCompositionInstructions!.renderSize = CGSize(width: renderWidth!, height: renderWidth!*16/9)
videoCompositionInstructions!.frameDuration = CMTimeMake(value: 1, timescale: 30)
videoCompositionInstructions!.instructions = [mainInstruction]
}
func videoCompositionInstruction(_ track: AVCompositionTrack, asset: AVAsset)
-> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: .video)[0]
instruction.setTransform(assetTrack.preferredTransform.concatenating(CGAffineTransform(translationX: 0, y: -(renderHeight! - renderWidth!*16/9)/2)), at: CMTime.zero)
return instruction
}
}
extension AVFoundationClient {
//Export the AV Mutable Composition
func export(_ mutableComposition: AVMutableComposition) {
// Set up exporter
guard let exporter = AVAssetExportSession(asset: mutableComposition, presetName: AVAssetExportPreset1920x1080) else { return }
exporter.outputURL = generateExportUrl()
exporter.outputFileType = AVFileType.mov
exporter.shouldOptimizeForNetworkUse = false
exporter.videoComposition = videoCompositionInstructions
exporter.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidComplete(exportURL: exporter.outputURL!, doneEditing: false)
}
}
}
func generateExportUrl() -> URL {
// Create a custom URL using curernt date-time to prevent conflicted URL in the future.
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormat = DateFormatter()
dateFormat.dateStyle = .long
dateFormat.timeStyle = .short
let dateString = dateFormat.string(from: Date())
let exportPath = (documentDirectory as NSString).strings(byAppendingPaths: ["edited-video-\(dateString).mp4"])[0]
//erase old
let fileManager = FileManager.default
do {
try fileManager.removeItem(at: URL(fileURLWithPath: exportPath))
} catch {
print("Unable to remove item at \(URL(fileURLWithPath: exportPath))")
}
return URL(fileURLWithPath: exportPath)
}
//Export Finish Handler
func exportDidComplete(exportURL: URL, doneEditing: Bool) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: exportURL)
}) { saved, error in
if saved {print("successful saving")}
else {
print("error saving")
}
}
}
}

AVAssetExportSession wrong orientation in front camera

I'm encountering wrong orientation of video exported using AVAssetExportSession only in front Camera. I followed this tutorial https://stackoverflow.com/a/35368649/3764365 but I got this scenario. I think it's not wrong orientation the image is cut at half. I tried changing the video layer, render layer but got no luck. My code looks like this.
let composition = AVMutableComposition()
let vidAsset = AVURLAsset(url: path)
// get video track
let vtrack = vidAsset.tracks(withMediaType: AVMediaTypeVideo)
// get audi trac
let videoTrack:AVAssetTrack = vtrack[0]
_ = videoTrack.timeRange.duration
let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)
var _: NSError?
let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
do {
try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: kCMTimeZero)
} catch let error {
print(error.localizedDescription)
}
let compositionVideoTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let audioTrack = vidAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, vidAsset.duration), of: audioTrack, at: kCMTimeZero)
} catch {
print("error")
}
let size = videoTrack.naturalSize
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
parentlayer.addSublayer(videolayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = CGSize(width: size.height, height: size.width)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
// instruction for watermark
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
layerinstruction.setTransform(videoTrack.preferredTransform, at: kCMTimeZero)
// create new file to receive data
let movieDestinationUrl = UIImage.outPut()
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: composition, presetName: AVAssetExportPreset1280x720)!
assetExport.videoComposition = layercomposition
assetExport.outputFileType = AVFileTypeQuickTimeMovie
assetExport.outputURL = movieDestinationUrl
Setting movieFileOutputConnection?.isVideoMirrored from true to false fixed the issue for me. Its a weird bug in my opinion.
if self.currentCamera == .front {
movieFileOutputConnection?.isVideoMirrored = false
}
I will share my code on how I solved this issue.
func addImagesToVideo(path: URL, labelImageViews: [LabelImageView]) {
SVProgressHUD.show()
let composition = AVMutableComposition()
let vidAsset = AVURLAsset(url: path)
// get video track
let vtrack = vidAsset.tracks(withMediaType: AVMediaTypeVideo)
// get audi trac
let videoTrack:AVAssetTrack = vtrack[0]
_ = videoTrack.timeRange.duration
let vid_timerange = CMTimeRangeMake(kCMTimeZero, vidAsset.duration)
var _: NSError?
let compositionvideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
do {
try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: kCMTimeZero)
} catch let error {
print(error.localizedDescription)
}
let compositionVideoTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let audioTrack = vidAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, vidAsset.duration), of: audioTrack, at: kCMTimeZero)
} catch {
print("error")
}
let size = videoTrack.naturalSize
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.height, height: size.width)
parentlayer.addSublayer(videolayer)
if labelImageViews.count != 0 {
let blankImage = self.clearImage(size: videolayer.frame.size)
let image = self.saveImage(imageOne: blankImage, labelImageViews: labelImageViews)
let imglayer = CALayer()
imglayer.contents = image.cgImage
imglayer.frame = CGRect(origin: CGPoint.zero, size: videolayer.frame.size)
imglayer.opacity = 1
parentlayer.addSublayer(imglayer)
}
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = CGSize(width: size.height, height: size.width)
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
// instruction for watermark
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
var isVideoAssetPortrait = false
let videoTransform = videoTrack.preferredTransform
if(videoTransform.a == 0 && videoTransform.b == 1.0 && videoTransform.c == -1.0 && videoTransform.d == 0) {
isVideoAssetPortrait = true
}
if(videoTransform.a == 0 && videoTransform.b == -1.0 && videoTransform.c == 1.0 && videoTransform.d == 0) {
isVideoAssetPortrait = true
}
if isVideoAssetPortrait {
let FirstAssetScaleFactor = CGAffineTransform(scaleX: 1, y: 1)
layerinstruction.setTransform(videoTrack.preferredTransform.concatenating(FirstAssetScaleFactor), at: kCMTimeZero)
} else {
let FirstAssetScaleFactor = CGAffineTransform(scaleX: 1, y: 1)
layerinstruction.setTransform(videoTrack.preferredTransform.concatenating(FirstAssetScaleFactor).concatenating(CGAffineTransform(translationX: 0, y: 560)), at: kCMTimeZero)
}
// create new file to receive data
let movieDestinationUrl = UIImage.outPut()
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: composition, presetName: AVAssetExportPreset1280x720)!
assetExport.videoComposition = layercomposition
assetExport.outputFileType = AVFileTypeQuickTimeMovie
assetExport.outputURL = movieDestinationUrl
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status{
case AVAssetExportSessionStatus.failed:
print("failed \(assetExport.error!)")
case AVAssetExportSessionStatus.cancelled:
print("cancelled \(assetExport.error!)")
default:
print("Movie complete")
// play video
OperationQueue.main.addOperation({ () -> Void in
let output = UIImage.outPut()
UIImage.compress(inputURL: movieDestinationUrl as NSURL, outputURL: output as NSURL) {
UISaveVideoAtPathToSavedPhotosAlbum(output.relativePath, nil, nil, nil)
print("Done Converting")
DispatchQueue.main.async {
SVProgressHUD.dismiss()
}
}
})
}
})
}

Resources