Exporting Core Animations to Videos - ios

I have been trying to get AVAssetExportSession to save a video with animations overlaid, but am running into a problem where the exported video has a runtime of 0:00.
As I understand it, the AVAssetExportSession APIs need a base video, which can be a black video that runs for a few seconds. You can then write over and cover up as much of that base video as you'd like using CALayers. If the animations run over the runtime of the base video, the exported video will extend itself to contain the runtime of the animations.
The base video is 5 sec long, but still yet the exported video is 0:00. Interestingly the exported video does contain the black background from the source video and the very first frame of the animation (the layers).
Has anyone run into this before and know of a good solution/what I'm missing?
Code for context...
#objc func saveMovie() {
print("save movie")
self.selectedFrame = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
self.selectedBounds = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
let mainLayer = CALayer()
mainLayer.frame = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
let videoLayer = CALayer()
videoLayer.frame = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
let animationLayer = CALayer()
animationLayer.frame = CGRect(x: 0.0, y: 0.0, width: 1080.0, height: 1920.0)
animationLayer.addSublayer(makeBackground())
animationLayer.addSublayer(makeHeadingTextLayer())
mainLayer.addSublayer(videoLayer)
mainLayer.addSublayer(animationLayer)
if let sourceVideoUrl = Bundle.main.url(
forResource: "SourceVideo",
withExtension: "mp4"
) {
// Load Video Asset to Use As Base
print(sourceVideoUrl.absoluteString)
let baseVideoAsset = AVURLAsset(url: sourceVideoUrl)
// Create Composition for the video to live in
let composition = AVMutableComposition()
composition.naturalSize = CGSize(width: 1080.0, height: 1920.0)
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: AVMediaType.video,
preferredTrackID: kCMPersistentTrackID_Invalid
),
let assetTrack = baseVideoAsset.tracks(
withMediaType: .video
).first
else {
print("something is wrong with the asset")
return
}
do {
// this crashes, so just hard coding 5 seconds right now
//let baseVideoDuration = try await baseVideoAsset.load(.duration)
//print("\(baseVideoDuration)")
let timeRange = CMTimeRangeMake(
start: .zero,
duration: CMTime(value: 5, timescale: 30)
)
try compositionTrack.insertTimeRange(
timeRange,
of: assetTrack,
at: .zero
)
} catch {
print("issue with video track insert time range")
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoSize = CGSize(width: 1080.0, height: 1920.0)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: mainLayer
)
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(
start: CMTime.zero,
duration: CMTimeMake(value: 10, timescale: 30)
)
videoComposition.instructions = [videoCompositionInstruction]
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: assetTrack)
let transform = assetTrack.preferredTransform
layerInstruction.setTransform(transform, at: .zero)
videoCompositionInstruction.layerInstructions = [layerInstruction]
guard let exporter = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPreset1920x1080
) else {
print("failed to create exporter")
return
}
let videoName = UUID().uuidString
let exportUrl = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension("mov")
exporter.videoComposition = videoComposition
exporter.outputFileType = .mov
exporter.outputURL = exportUrl
exporter.timeRange = CMTimeRangeMake(start: .zero, duration: CMTimeMake(value: 10, timescale: 30))
NSLog("Composition Duration: %ld seconds", lround(CMTimeGetSeconds(composition.duration)));
exporter.exportAsynchronously {
DispatchQueue.main.async {
switch exporter.status {
case .failed:
print("failed to export")
print(exporter.error ?? "no error")
case .cancelled:
print("canceled")
case .completed:
print("completed")
UISaveVideoAtPathToSavedPhotosAlbum(
exportUrl.relativePath,
self,
nil,
nil
)
case .unknown:
print("unknown status")
default:
break
}
}
}
}
}

Related

how to add test on a vedio or CAtextlayer. i want to show text on the recorded videos. and when i do it audo is fine video go blank.Thanks in Advance

I am recording 3 videos and merging them into one. I need text on all the videos. am doing it but no success. the video goes black and the audio comes fine. When I do it without CALayer everything is good Need Help.
i there something wring with the code or I am doing it the wrong way please guide me.
private func doMerge(arrayVideos:[AVAsset], animation:Bool, completion:#escaping Completion) -> Void {
var insertTime = CMTime.zero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayVideos {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = videoSize
}
}
if outputSize.width == 0 || outputSize.height == 0 {
outputSize = defaultSize
}
// Silence sound (in case of video has no sound track)
// let silenceURL = Bundle.main.url(forResource: "silence", withExtension: "mp3")
// let silenceAsset = AVAsset(url:silenceURL!)
// let silenceSoundTrack = silenceAsset.tracks(withMediaType: AVMediaType.audio).first
// Init composition
let mixComposition = AVMutableComposition.init()
for videoAsset in arrayVideos {
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
// Get audio track
var audioTrack:AVAssetTrack?
if videoAsset.tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first
}
else {
// audioTrack = silenceSoundTrack
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = CMTime.zero
let duration = videoAsset.duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: videoTrack,
at: insertTime)
// Add audio track to audio composition at specific time
if let audioTrack = audioTrack {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: startTime, duration: duration),
of: audioTrack,
at: insertTime)
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!,
asset: videoAsset,
standardSize: outputSize,
atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = videoAsset.duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp(fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
// Watermark Effect
let size = videoTrack.naturalSize
// create text Layer
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.contentsScale = UIScreen.main.scale
titleLayer.string = "Dummy text"
titleLayer.foregroundColor = UIColor.white.cgColor
titleLayer.font = UIFont(name: "Helvetica", size: 28)
titleLayer.shadowOpacity = 0.5
titleLayer.alignmentMode = CATextLayerAlignmentMode.center
titleLayer.frame = CGRect(x: 0, y: 50, width: size.width, height: size.height)
let videolayer = CALayer()
videolayer.backgroundColor = UIColor.clear.cgColor
// videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
// let layercomposition = AVMutableVideoComposition()
// layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
// layercomposition.renderSize = size
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: titleLayer)
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
// mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
}
})
}
Just Change This Part
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
// let size = videoTrack.naturalSize
// create text Layer
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.clear.cgColor
titleLayer.contentsScale = UIScreen.main.scale
titleLayer.string = questions[counter]
counter = counter + 1
titleLayer.foregroundColor = UIColor.black.cgColor
titleLayer.font = UIFont(name: "Helvetica", size: 28)
titleLayer.shadowOpacity = 0.5
titleLayer.alignmentMode = CATextLayerAlignmentMode.center
titleLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let videolayer = CALayer()
videolayer.backgroundColor = UIColor.clear.cgColor
videolayer.backgroundColor = UIColor.red.cgColor
videolayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(titleLayer)
// let layercomposition = AVMutableVideoComposition()
// layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
// layercomposition.renderSize = size
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)

Trying to overlay an image onto a CALayer and retain the position from the subview (UITextView)

First time poster, looooooong time peruser. I'm using SwiftUI for the layout and UIRepresentables for the camera work. (Xcode 11.7), and trying to overlay an image onto a CALayer (for eventual export to video). The image was converted from a UITextView so the user is free to edit, pinch/zoom, and drag the text to their heart's content. After scouring SO for days, and reading Ray Wenderlich tutorials I've hit a wall. Screenshots below.
Before: freeform text 'coffee' added to the view
After: exported movie still, 'coffee' text position is incorrect
Below is the export function. I suspect I'm doing something wrong with relativePosition.
Thank you for any suggestions, this is my foray into writing an iOS app.
static func exportLayersToVideo(_ fileUrl:String, _ textView:UITextView){
let fileURL = NSURL(fileURLWithPath: fileUrl)
let composition = AVMutableComposition()
let vidAsset = AVURLAsset(url: fileURL as URL, options: nil)
// get video track
let vtrack = vidAsset.tracks(withMediaType: AVMediaType.video)
let videoTrack: AVAssetTrack = vtrack[0]
let vid_timerange = CMTimeRangeMake(start: CMTime.zero, duration: vidAsset.duration)
let tr: CMTimeRange = CMTimeRange(start: CMTime.zero, duration: CMTime(seconds: 10.0, preferredTimescale: 600))
composition.insertEmptyTimeRange(tr)
let trackID:CMPersistentTrackID = CMPersistentTrackID(kCMPersistentTrackID_Invalid)
if let compositionvideoTrack: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: trackID) {
do {
try compositionvideoTrack.insertTimeRange(vid_timerange, of: videoTrack, at: CMTime.zero)
} catch {
print("error")
}
compositionvideoTrack.preferredTransform = videoTrack.preferredTransform
} else {
print("unable to add video track")
return
}
let size = videoTrack.naturalSize
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
// Convert UITextView to Image
let renderer = UIGraphicsImageRenderer(size: textView.bounds.size)
let image = renderer.image { ctx in
textView.drawHierarchy(in: textView.bounds, afterScreenUpdates: true)
}
let imglayer = CALayer()
let scaledAspect: CGFloat = image.size.width / image.size.height
let scaledWidth = size.width
let scaledHeight = scaledWidth / scaledAspect
let relativePosition = parentlayer.convert(textView.frame.origin, from: textView.layer)
imglayer.frame = CGRect(x: relativePosition.x, y: relativePosition.y, width: scaledWidth,height: scaledHeight)
imglayer.contents = image.cgImage
// Adding videolayer and imglayer
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(imglayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
layercomposition.renderSize = size
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
// instruction for overlay
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaType.video)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
instruction.layerInstructions = NSArray(object: layerinstruction) as [AnyObject] as! [AVVideoCompositionLayerInstruction]
layercomposition.instructions = NSArray(object: instruction) as [AnyObject] as! [AVVideoCompositionInstructionProtocol]
// create new file to receive data
let dirPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let docsDir = dirPaths[0] as NSString
let movieFilePath = docsDir.appendingPathComponent("result.mov")
let movieDestinationUrl = NSURL(fileURLWithPath: movieFilePath)
// use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality)
assetExport?.outputFileType = AVFileType.mov
assetExport?.videoComposition = layercomposition
// Check exist and remove old files
do { // delete old video
try FileManager.default.removeItem(at: movieDestinationUrl as URL)
} catch { print("Error Removing Existing File: \(error.localizedDescription).") }
do { // delete old video
try FileManager.default.removeItem(at: fileURL as URL)
} catch { print("Error Removing Existing File: \(error.localizedDescription).") }
assetExport?.outputURL = movieDestinationUrl as URL
assetExport?.exportAsynchronously(completionHandler: {
switch assetExport!.status {
case AVAssetExportSession.Status.failed:
print("failed")
print(assetExport?.error ?? "unknown error")
case AVAssetExportSession.Status.cancelled:
print("cancelled")
print(assetExport?.error ?? "unknown error")
default:
print("Movie complete")
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: movieDestinationUrl as URL)
}) { saved, error in
if saved {
print("Saved")
}
}
}
})
}
}
It looks like the x position is correct, but the y is off. I think this is because the origin is at the bottom-left instead of the top-left. Try this:
var relativePosition = parentlayer.convert(textView.frame.origin, from: textView.layer)
relativePosition.y = size.height - relativePosition.y
imglayer.frame = CGRect(x: relativePosition.x, y: relativePosition.y, width: scaledWidth,height: scaledHeight)

Make AVMutableComposition with image (aspect fill) and video in aspect fit

I am attempting to make a new video using an image that will always be size: CGSize(375, 667), but with a video that will be different sizes, and with the contentMode of .`aspectFit'. The problem is that I cannot figure out how to make the whole video composition the correct size (i.e. the image size), and instead it is the videos natural size with a bunch of weird outcomes. (edit note: the video should be centered in the view like a normal aspectFit would do for a UIImageView for example..)
here is an example of what i am trying to achieve... note that I already have the image and the video, all i need to do is make the new video with them. And this is how it should look like (in the image):
desired result image here --
Here is the code I am attempting currently, with a placeholder image of "background" (a random 375, 667 image in Assets..): I think I may be doing the stuff around the comment "important stuff" improperly... but i cannot figure it out currently :/
func makeVideo(fromVideoAt videoURL: URL, forName name: String, onComplete: #escaping (URL?) -> Void) {
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let assetTrack = asset.tracks(withMediaType: .video).first
else {
print("Something is wrong with the asset.")
onComplete(nil)
return
}
do {
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
} catch {
print(error)
onComplete(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoInfo = orientation(from: assetTrack.preferredTransform)
//Important stuff potentially? general below:
let videoSize: CGSize
if videoInfo.isPortrait {
videoSize = CGSize(width: 720, height: 1280)
} else {
videoSize = CGSize(width: 720, height: 1280) //720.0, 1280 tiktok default..?
}
//the Background image:
let backgroundLayer = CALayer()
backgroundLayer.frame = CGRect(origin: .zero, size: videoSize) //videosize
backgroundLayer.contents = UIImage(named: "background")?.cgImage
backgroundLayer.contentsGravity = .resizeAspectFill
backgroundLayer.backgroundColor = UIColor.red.cgColor
//Video layer:
let videoLayer = CALayer()
// videoLayer.frame = CGRect(origin: .zero, size: CGSize(width: composition.naturalSize.width, height: composition.naturalSize.height)) //videosize
videoLayer.backgroundColor = UIColor.yellow.cgColor
print(composition.naturalSize, "<-- composition.naturalSize")
videoLayer.frame = CGRect(origin: .zero, size: CGSize(width: videoSize.width, height: composition.naturalSize.height))//CGRect(x: 0, y: 0, width: videoSize.width, height: composition.naturalSize.height)
//OutPutlayer putting the together?
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: CGSize(width: 720, height: 1280)) //videosize
outputLayer.backgroundColor = UIColor.white.cgColor
outputLayer.addSublayer(backgroundLayer)
outputLayer.addSublayer(videoLayer)
// outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: outputLayer)
//Setting Up Instructions
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(for: compositionTrack, assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
//EXPORTING
guard let export = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality) else {
print("Cannot create export session.")
onComplete(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent(videoName).appendingPathExtension("mp4")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
onComplete(exportURL)
default:
print("Something went wrong during export.")
print(export.error ?? "unknown error")
onComplete(nil)
break
}
}
}
}
Try to use this code https://github.com/vabe1337/VBVideoEditor. It render video like TikTok, Instagram.

Overlaying text on video is very slow any faster alternatives

let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let assetTrack = asset.tracks(withMediaType: .video).first
else {
print("Something is wrong with the asset.")
onComplete(nil)
return
}
do {
let timeRange = CMTimeRange(start: .zero, duration: asset.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(
withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid) {
try compositionAudioTrack.insertTimeRange(
timeRange,
of: audioAssetTrack,
at: .zero)
}
} catch {
//print(error)
onComplete(nil)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoInfo = orientation(from: assetTrack.preferredTransform)
let videoSize: CGSize
if videoInfo.isPortrait {
videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width)
} else {
videoSize = assetTrack.naturalSize
}
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let textLayer = CATextLayer()
textLayer.string = mainTextview.text
textLayer.shouldRasterize = true
textLayer.rasterizationScale = UIScreen.main.scale
//textLayer.backgroundColor = UIColor.black.cgColor
switch textAlignment {
case .center:
textLayer.alignmentMode = .center
case .justified:
textLayer.alignmentMode = .justified
case .left:
textLayer.alignmentMode = .left
case .right:
textLayer.alignmentMode = .right
default:
textLayer.alignmentMode = .center
}
textLayer.isWrapped = true
textLayer.foregroundColor = mainTextview.textColor?.cgColor
textLayer.fontSize = 70
switch textWeight {
case .bold:
textLayer.font = UIFont.systemFont(ofSize: 0, weight: .bold)
case .light:
textLayer.font = UIFont.systemFont(ofSize: 0, weight: .light)
case .regular:
textLayer.font = UIFont.systemFont(ofSize: 0, weight: .regular)
case .medium:
textLayer.font = UIFont.systemFont(ofSize: 0, weight: .medium)
}
switch textviewPosition {
case .Center:
textLayer.frame = CGRect(x: 0, y: videoSize.height * 0.30, width: videoSize.width, height: 500)
case .Top:
textLayer.frame = CGRect(x: 0, y: videoSize.height * 0.60, width: videoSize.width, height: 500)
case .Bottom:
textLayer.frame = CGRect(x: 10, y: 0, width: videoSize.width, height: 500)
}
textLayer.displayIfNeeded()
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(textLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(
start: .zero,
duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality)
else {
//print("Cannot create export session.")
onComplete(nil)
return
}
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension(".mp4")
export.videoComposition = videoComposition
export.outputFileType = .mp4
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
onComplete(exportURL)
default:
onComplete(nil)
break
}
}
}
}
This is the function I use to overlay the text on video (videoURL), I pass in the url for the video which is saved in the document directory and use a text view already created for the user to retrieve the text, weight, font and alignment for the overlay thanks. Majority of time this process takes 15 seconds which is a long time for users to wait.
Exporting video is labor-intensive and takes significant time, and there's nothing you can do about that.
However, the process is asynchronous so there is no need to make the user "wait". Also, the exporter vends a Progress object, so you can easily show progress as a psychological trick (it is amazing how time flies when you're watching a progress indicator).
Thanks for your response under where I initiated the AVAssetExportSession I added a timer:
self.exportTimer = Timer.scheduledTimer(timeInterval: 0.1, target: self, selector: #selector(displayData), userInfo: nil, repeats: true)
Now I can correctly update the display using this method:
#objc private func displayData() {
self.progressBar.progress = exportSession.progress;
if self.progressBar.progress > .99) {
self.exportTimer.invalidate()
}
}
I also added Invalidate() to the export.status switch statement for when there is an error.

Adding overlay to video in Swift 3

I'm learning AVFoundation and I'm having a problem trying to save a video with an overlay image in Swift 3. Using AVMutableComposition I'm able to add the image to the video however the video is zoomed in and not constraining itself to the portrait size the video was taken in. I've tried:
Setting the natural size through the AVAssetTrack.
Constraining the video to portrait size in the AVMutableVideoComposition renderFrame.
Locking the new video bounds to the recorded video width and height.
The code below works apart from the issue I'm needing help on. The image I'm trying to add covers the entire portrait view and has a border all around the edges. The app also only allows for portrait.
func processVideoWithWatermark(video: AVURLAsset, watermark: UIImage, completion: #escaping (Bool) -> Void) {
let composition = AVMutableComposition()
let asset = AVURLAsset(url: video.url, options: nil)
let track = asset.tracks(withMediaType: AVMediaTypeVideo)
let videoTrack:AVAssetTrack = track[0] as AVAssetTrack
let timerange = CMTimeRangeMake(kCMTimeZero, asset.duration)
let compositionVideoTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
// let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
//
// for audioTrack in asset.tracks(withMediaType: AVMediaTypeAudio) {
// do {
// try compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: kCMTimeZero)
// } catch {
// print(error)
// }
//
// }
//
let size = videoTrack.naturalSize
let watermark = watermark.cgImage
let watermarklayer = CALayer()
watermarklayer.contents = watermark
watermarklayer.frame = CGRect(x: 0, y: 0, width: screenWidth, height: screenHeight)
watermarklayer.opacity = 1
let videolayer = CALayer()
videolayer.frame = CGRect(x: 0, y: 0, width: screenWidth, height: screenHeight)
let parentlayer = CALayer()
parentlayer.frame = CGRect(x: 0, y: 0, width: size.width, height: size.height)
parentlayer.addSublayer(videolayer)
parentlayer.addSublayer(watermarklayer)
let layercomposition = AVMutableVideoComposition()
layercomposition.frameDuration = CMTimeMake(1, 30)
layercomposition.renderSize = CGSize(width: screenWidth, height: screenHeight)
layercomposition.renderScale = 1.0
layercomposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videolayer, in: parentlayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, composition.duration)
let videotrack = composition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerinstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
layerinstruction.setTransform(videoTrack.preferredTransform, at: kCMTimeZero)
instruction.layerInstructions = [layerinstruction]
layercomposition.instructions = [instruction]
let filePath = NSTemporaryDirectory() + self.fileName()
let movieUrl = URL(fileURLWithPath: filePath)
guard let assetExport = AVAssetExportSession(asset: composition, presetName:AVAssetExportPresetHighestQuality) else {return}
assetExport.videoComposition = layercomposition
assetExport.outputFileType = AVFileTypeMPEG4
assetExport.outputURL = movieUrl
assetExport.exportAsynchronously(completionHandler: {
switch assetExport.status {
case .completed:
print("success")
print(video.url)
self.saveVideoToUserLibrary(fileURL: movieUrl, completion: { (success, error) in
if success {
completion(true)
} else {
completion(false)
}
})
break
case .cancelled:
print("cancelled")
break
case .exporting:
print("exporting")
break
case .failed:
print(video.url)
print("failed: \(assetExport.error!)")
break
case .unknown:
print("unknown")
break
case .waiting:
print("waiting")
break
}
})
}
If the video layer should fill parent layer, your videoLayer's frame is incorrect. You need to set the size equal to size instead of screenSize.

Resources