Here is my code that adds image & text overlays to a local video. The problem is that it's extremely SLOW. Any ideas how to fix it?
Also I would appreciate if you can suggest 3rd party libraries that can do watermarking.
public func addWatermark(
fromVideoAt videoURL: URL,
watermark: Watermark,
fileName: String,
onSuccess: #escaping (URL) -> Void,
onFailure: #escaping ((Error?) -> Void)
) {
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid
),
let assetTrack = asset.tracks(withMediaType: .video).first
else {
onFailure(nil)
return
}
do {
let timeRange = CMTimeRange(start: .zero, duration: assetTrack.timeRange.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(
withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid
) {
try compositionAudioTrack.insertTimeRange(
timeRange,
of: audioAssetTrack,
at: .zero
)
}
} catch {
onFailure(error)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoInfo = orientation(from: assetTrack.preferredTransform)
let videoSize: CGSize
if videoInfo.isPortrait {
videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width
)
} else {
videoSize = assetTrack.naturalSize
}
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let imageFrame = watermark.calculateImageFrame(parentSize: videoSize)
addImage(watermark.image, to: overlayLayer, frame: imageFrame)
let textOrigin = CGPoint(x: imageFrame.minX + 4, y: imageFrame.minY)
if let text = watermark.text {
addText(
text,
to: overlayLayer,
origin: textOrigin,
textAttributes: Watermark.textAttributes(type: watermark.type)
)
}
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 60)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer
)
videoComposition.colorPrimaries = AVVideoColorPrimaries_ITU_R_709_2
videoComposition.colorTransferFunction = "sRGB"
videoComposition.colorYCbCrMatrix = nil
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack
)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality
)
else {
onFailure(nil)
return
}
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(fileName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
onSuccess(exportURL)
default:
onFailure(export.error)
}
}
}
}
Watermark is the wrapper struct. It contains image/text, text attributes, size and other similar helpful information.
I've tried without any luck:
export.shouldOptimizeForNetworkUse = true. It did not work.
AVAssetExportPresetPassthrough instead of AVAssetExportPresetHighestQuality. It removed overlays.
I have the following code which is relatively fast. It watermarks an 8 second video in about 2.56 seconds. When I ran it under Metal System Trace Instrument it seemed to be balanced and using GPU-acceleration the whole time. You just call exportIt()
As a side matter, this code uses async await wrapping of AVKit functions and migrates off any deprecated interfaces as of iOS 16.
A tidied up and working sample app with resource files is https://github.com/faisalmemon/watermark
The core code is as follows:
//
// WatermarkHelper.swift
// watermark
//
// Created by Faisal Memon on 09/02/2023.
//
import Foundation
import AVKit
struct WatermarkHelper {
enum WatermarkError: Error {
case cannotLoadResources
case cannotAddTrack
case cannotLoadVideoTrack(Error?)
case cannotCopyOriginalAudioVideo(Error?)
case noVideoTrackPresent
case exportSessionCannotBeCreated
}
func compositionAddMediaTrack(_ composition: AVMutableComposition, withMediaType mediaType: AVMediaType) throws -> AVMutableCompositionTrack {
guard let compositionTrack = composition.addMutableTrack(
withMediaType: mediaType,
preferredTrackID: kCMPersistentTrackID_Invalid) else {
throw WatermarkError.cannotAddTrack
}
return compositionTrack
}
func loadTrack(inputVideo: AVAsset, withMediaType mediaType: AVMediaType) async throws -> AVAssetTrack? {
return try await withCheckedThrowingContinuation({
(continuation: CheckedContinuation<AVAssetTrack?, Error>) in
inputVideo.loadTracks(withMediaType: mediaType) { tracks, error in
if let tracks = tracks {
continuation.resume(returning: tracks.first)
} else {
continuation.resume(throwing: WatermarkError.cannotLoadVideoTrack(error))
}
}
})
}
func bringOverVideoAndAudio(inputVideo: AVAsset, assetTrack: AVAssetTrack, compositionTrack: AVMutableCompositionTrack, composition: AVMutableComposition) async throws {
do {
let timeRange = await CMTimeRange(start: .zero, duration: try inputVideo.load(.duration))
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = try await loadTrack(inputVideo: inputVideo, withMediaType: .audio) {
let compositionAudioTrack = try compositionAddMediaTrack(composition, withMediaType: .audio)
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
} catch {
print(error)
throw WatermarkError.cannotCopyOriginalAudioVideo(error)
}
}
private func orientation(from transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
func preferredTransformAndSize(compositionTrack: AVMutableCompositionTrack, assetTrack: AVAssetTrack) async throws -> (preferredTransform: CGAffineTransform, videoSize: CGSize) {
let transform = try await assetTrack.load(.preferredTransform)
let videoInfo = orientation(from: transform)
let videoSize: CGSize
let naturalSize = try await assetTrack.load(.naturalSize)
if videoInfo.isPortrait {
videoSize = CGSize(
width: naturalSize.height,
height: naturalSize.width)
} else {
videoSize = naturalSize
}
return (transform, videoSize)
}
private func compositionLayerInstruction(for track: AVCompositionTrack, assetTrack: AVAssetTrack, preferredTransform: CGAffineTransform) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
instruction.setTransform(preferredTransform, at: .zero)
return instruction
}
private func addImage(to layer: CALayer, watermark: UIImage, videoSize: CGSize) {
let imageLayer = CALayer()
let aspect: CGFloat = watermark.size.width / watermark.size.height
let width = videoSize.width
let height = width / aspect
imageLayer.frame = CGRect(
x: 0,
y: -height * 0.15,
width: width,
height: height)
imageLayer.contents = watermark.cgImage
layer.addSublayer(imageLayer)
}
func composeVideo(composition: AVMutableComposition, videoComposition: AVMutableVideoComposition, compositionTrack: AVMutableCompositionTrack, assetTrack: AVAssetTrack, preferredTransform: CGAffineTransform) {
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(
start: .zero,
duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack, preferredTransform: preferredTransform)
instruction.layerInstructions = [layerInstruction]
}
func exportSession(composition: AVMutableComposition, videoComposition: AVMutableVideoComposition, outputURL: URL) throws -> AVAssetExportSession {
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality)
else {
print("Cannot create export session.")
throw WatermarkError.exportSessionCannotBeCreated
}
export.videoComposition = videoComposition
export.outputFileType = .mp4
export.outputURL = outputURL
return export
}
func executeSession(_ session: AVAssetExportSession) async throws -> AVAssetExportSession.Status {
return try await withCheckedThrowingContinuation({
(continuation: CheckedContinuation<AVAssetExportSession.Status, Error>) in
session.exportAsynchronously {
DispatchQueue.main.async {
if let error = session.error {
continuation.resume(throwing: error)
} else {
continuation.resume(returning: session.status)
}
}
}
})
}
func addWatermarkTopDriver(inputVideo: AVAsset, outputURL: URL, watermark: UIImage) async throws -> AVAssetExportSession.Status {
let composition = AVMutableComposition()
let compositionTrack = try compositionAddMediaTrack(composition, withMediaType: .video)
guard let videoAssetTrack = try await loadTrack(inputVideo: inputVideo, withMediaType: .video) else {
throw WatermarkError.noVideoTrackPresent
}
try await bringOverVideoAndAudio(inputVideo: inputVideo, assetTrack: videoAssetTrack, compositionTrack: compositionTrack, composition: composition)
let transformAndSize = try await preferredTransformAndSize(compositionTrack: compositionTrack, assetTrack: videoAssetTrack)
compositionTrack.preferredTransform = transformAndSize.preferredTransform
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
addImage(to: overlayLayer, watermark: watermark, videoSize: transformAndSize.videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = transformAndSize.videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
composeVideo(composition: composition, videoComposition: videoComposition, compositionTrack: compositionTrack, assetTrack: videoAssetTrack, preferredTransform: transformAndSize.preferredTransform)
let session = try exportSession(composition: composition, videoComposition: videoComposition, outputURL: outputURL)
return try await executeSession(session)
}
/// Creates a watermarked movie and saves it to the documents directory.
///
/// For an 8 second video (251 frames), this code takes 2.56 seconds on iPhone 11 producing a high quality video at 30 FPS.
/// - Returns: Time interval taken for processing.
public func exportIt() async throws -> TimeInterval {
let timeStart = Date()
guard
let filePath = Bundle.main.path(forResource: "donut-spinning", ofType: "mp4"),
let docUrl = try? FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: true),
let watermarkImage = UIImage(systemName: "seal") else {
throw WatermarkError.cannotLoadResources
}
let videoAsset = AVAsset(url: URL(filePath: filePath))
let outputURL = docUrl.appending(component: "watermark-donut-spinning.mp4")
try? FileManager.default.removeItem(at: outputURL)
print(outputURL)
let result = try await addWatermarkTopDriver(inputVideo: videoAsset, outputURL: outputURL, watermark: watermarkImage)
let timeEnd = Date()
let duration = timeEnd.timeIntervalSince(timeStart)
print(result)
return duration
}
}
Use this below method for super fast watermark adding to video
func addWatermark(inputURL: URL, outputURL: URL, handler:#escaping (_ exportSession: AVAssetExportSession?)-> Void) {
let mixComposition = AVMutableComposition()
let asset = AVAsset(url: inputURL)
let videoTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let timerange = CMTimeRangeMake(start: CMTime.zero, duration: asset.duration)
let compositionVideoTrack:AVMutableCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: CMTime.zero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
let watermarkFilter = CIFilter(name: "CISourceOverCompositing")!
let watermarkImage = CIImage(image: UIImage(named: "waterMark")!)
let videoComposition = AVVideoComposition(asset: asset) { (filteringRequest) in
let source = filteringRequest.sourceImage.clampedToExtent()
watermarkFilter.setValue(source, forKey: "inputBackgroundImage")
let transform = CGAffineTransform(translationX: filteringRequest.sourceImage.extent.width - (watermarkImage?.extent.width)! - 2, y: 0)
watermarkFilter.setValue(watermarkImage?.transformed(by: transform), forKey: "inputImage")
filteringRequest.finish(with: watermarkFilter.outputImage!, context: nil)
}
guard let exportSession = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset640x480) else {
handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = AVFileType.mp4
exportSession.shouldOptimizeForNetworkUse = true
exportSession.videoComposition = videoComposition
exportSession.exportAsynchronously { () -> Void in
handler(exportSession)
}
}
Call this method when you want to add watermark easily
let outputURL = NSURL.fileURL(withPath: "TempPath")
let inputURL = NSURL.fileURL(withPath: "VideoWithWatermarkPath")
addWatermark(inputURL: inputURL, outputURL: outputURL, handler: { (exportSession) in
guard let session = exportSession else {
// Error
return
}
switch session.status {
case .completed:
guard NSData(contentsOf: outputURL) != nil else {
// Error
return
}
// Now you can find the video with the watermark in the location outputURL
default:
// Error
}
})
Have you checked out Apple's documentation? It adds a title layer (CALayer) on top of an existing AVMutableComposition or an AVAsset? Since it's a legacy doc from iOS 6, you'll need to refactor a bit, but it should be fast on today's tech.
Related
I am trying to crop the video with AVMutableComposition. The exported video seem rotated to an unknown angle. It does not even choose the correct frame from the video. I have debugged the frame, and the provided crop frame is correct. Even if the video's preferredTransformation is Identity, then too it does not crop the correct video.
My code is like below:
public func exportVideo(asset: AVAsset, destinationDirectory: URL? = nil, editingOptions: MediaEditingOptions? = nil,
completion: #escaping VideoExportCompletion) -> ExportOperation? {
let composition = AVMutableComposition()
guard let videoTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let audioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) else {
completion(.failure(Error.couldNotAddMutableTracks))
return nil
}
let timeRange = editingOptions?.videoEditingOptions.timeRange ?? CMTimeRange(start: .zero, duration: asset.duration)
if let videoAssetTrack = asset.tracks(withMediaType: .video).first {
do {
try videoTrack.insertTimeRange(timeRange, of: videoAssetTrack, at: .zero)
} catch {
completion(.failure(error))
return nil
}
} else {
completion(.failure(Error.couldNotFindVideoTrack))
return nil
}
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first {
do {
try audioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
} catch {
completion(.failure(error))
return nil
}
}
let preset = editingOptions?.videoEditingOptions.preset ?? VideoEditingOptions.defaultPreset
guard let exportSession = AVAssetExportSession(asset: composition, presetName: preset) else {
completion(.failure(Error.couldNotInitiateExportSession))
return nil
}
guard var exportURL = destinationDirectory ?? cacheDirectoryURL() else {
completion(.failure(Error.couldNotDetermineOutputDirectory))
return nil
}
/// Prepare Instructions
let compositionInstructions = AVMutableVideoCompositionInstruction()
compositionInstructions.timeRange = CMTimeRange(start: .zero, duration: asset.duration)
let layerInstructions = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let cropFrame = editingOptions?.scrollBounds ?? CGRect(x: 0, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
let transform = getTransform(for: videoTrack, cropFrame: cropFrame)
layerInstructions.setTransform(transform, at: .zero)
layerInstructions.setOpacity(1.0, at: CMTime.zero)
compositionInstructions.layerInstructions = [layerInstructions]
/// Add video composition.
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = cropFrame.size
videoComposition.instructions = [compositionInstructions]
videoComposition.frameDuration = videoTrack.minFrameDuration
exportURL.appendPathComponent(UUID().uuidString + ".mp4")
exportSession.videoComposition = videoComposition
exportSession.outputURL = exportURL
exportSession.outputFileType = AVFileType.mov
exportSession.exportAsynchronously {
if exportSession.status == .failed {
completion(.failure(exportSession.error ?? Error.unknown))
} else if exportSession.status == .completed {
completion(.success(exportURL))
} else {
completion(.failure(Error.unknown))
}
}
return exportSession
}
func getTransform(for videoTrack: AVAssetTrack, cropFrame: CGRect) -> CGAffineTransform {
let renderSize = cropFrame.size
let renderScale = renderSize.width / cropFrame.width
let offset = CGPoint(x: -cropFrame.origin.x, y: -cropFrame.origin.y)
let rotation = atan2(videoTrack.preferredTransform.b, videoTrack.preferredTransform.a)
var rotationOffset = CGPoint(x: 0, y: 0)
if videoTrack.preferredTransform.b == -1.0 {
rotationOffset.y = videoTrack.naturalSize.width
} else if videoTrack.preferredTransform.c == -1.0 {
rotationOffset.x = videoTrack.naturalSize.height
} else if videoTrack.preferredTransform.a == -1.0 {
rotationOffset.x = videoTrack.naturalSize.width
rotationOffset.y = videoTrack.naturalSize.height
}
var transform = CGAffineTransform.identity
transform = transform.scaledBy(x: renderScale, y: renderScale)
transform = transform.translatedBy(x: offset.x + rotationOffset.x, y: offset.y + rotationOffset.y)
transform = transform.rotated(by: rotation)
print("track size \(videoTrack.naturalSize)")
print("preferred Transform = \(videoTrack.preferredTransform)")
print("rotation angle \(rotation)")
print("rotation offset \(rotationOffset)")
print("actual Transform = \(transform)")
return transform
}
I don't find anything obvious here. Am I missing anything here?
I trying to export video using following code. It works fine for first 3 times and then fails for more than 3 attempts, I am trying to add recorded voices over video, I am pretty new to all these concepts so any help will be appreciated
open func generate(video url: URL, with frame: CGRect? = nil, byApplying transformation: CGAffineTransform? = nil, in previewArea: CGRect? = nil, previewCornerRadius: Float = 0, overlayImage: UIImage? = nil, setOverlayAsBackground: Bool = false, gifLayer: CALayer? = nil, audioUrl: URL? = nil, muteAudio: Bool = false, success: #escaping ((URL) -> Void), failure: #escaping ((Error) -> Void)) {
let mixComposition: AVMutableComposition = AVMutableComposition()
var mutableCompositionVideoTrack: AVMutableCompositionTrack? = nil
var mutableCompositionOriginalAudioTrack: AVMutableCompositionTrack? = nil
var mutableCompositionAudioTrack: AVMutableCompositionTrack? = nil
let totalVideoCompositionInstruction : AVMutableVideoCompositionInstruction = AVMutableVideoCompositionInstruction()
let aVideoAsset: AVAsset = AVAsset(url: url)
var aAudioAsset: AVAsset? = nil
if let url = audioUrl {
aAudioAsset = AVAsset(url: url)
}
if let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid) {
mutableCompositionVideoTrack = videoTrack
if aAudioAsset != nil, let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
mutableCompositionAudioTrack = audioTrack
}
if !muteAudio, aVideoAsset.hasAudio, let originalAudioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
/// If original audio present
mutableCompositionOriginalAudioTrack = originalAudioTrack
}
}
do {
var originalVideoSize: CGSize = self.prefferedVideoSize
let ratio = self.prefferedVideoSize.width / Utility.get9By16ScreenSize().width
if let aVideoAssetTrack: AVAssetTrack = aVideoAsset.tracks(withMediaType: .video).first {
originalVideoSize = aVideoAssetTrack.naturalSize
var transforms = aVideoAssetTrack.preferredTransform
if aVideoAsset.videoOrientation().orientation == .landscapeLeft || aVideoAsset.videoOrientation().orientation == .landscapeRight {
let ratio = self.prefferedVideoSize.width / originalVideoSize.width
let centerY: CGFloat = (self.prefferedVideoSize.height - (originalVideoSize.height * ratio)) / 2
transforms = transforms.concatenating(CGAffineTransform(translationX: 0, y: centerY).scaledBy(x: ratio, y: ratio))
}
try mutableCompositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration), of: aVideoAssetTrack, at: CMTime.zero)
if !muteAudio, aVideoAsset.hasAudio, let audioAssetTrack: AVAssetTrack = aVideoAsset.tracks(withMediaType: .audio).first {
try mutableCompositionOriginalAudioTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: audioAssetTrack.timeRange.duration), of: audioAssetTrack, at: CMTime.zero)
}
if let audioAsset = aAudioAsset, let aAudioAssetTrack: AVAssetTrack = audioAsset.tracks(withMediaType: .audio).first {
try mutableCompositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aAudioAssetTrack.timeRange.duration), of: aAudioAssetTrack, at: CMTime.zero)
}
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration)
let mixVideoTrack = mixComposition.tracks(withMediaType: AVMediaType.video)[0]
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: mixVideoTrack)
layerInstruction.setTransform(transforms, at: CMTime.zero)
totalVideoCompositionInstruction.layerInstructions = [layerInstruction]
}
let mutableVideoComposition: AVMutableVideoComposition = AVMutableVideoComposition()
mutableVideoComposition.frameDuration = CMTimeMake(value: 1, timescale: 12)
mutableVideoComposition.renderSize = self.prefferedVideoSize
mutableVideoComposition.instructions = [totalVideoCompositionInstruction]
let parentLayer = CALayer()
parentLayer.frame = self.prefferedVideoRect
parentLayer.isGeometryFlipped = true
let videoLayer = CALayer()
videoLayer.contentsGravity = .resizeAspect
videoLayer.contentsScale = 1
videoLayer.frame = self.prefferedVideoRect
if let frame = frame {
let scalledFrame = frame.scale(by: ratio)
videoLayer.frame = scalledFrame
let videoContainerLayer = CALayer()
parentLayer.frame = self.prefferedVideoRect
parentLayer.addSublayer(videoContainerLayer)
videoContainerLayer.addSublayer(videoLayer)
if let transformation = transformation {
if let previewFrame = previewArea {
let maskLayer = CALayer()
maskLayer.backgroundColor = UIColor.black.cgColor
let scalledMaskFrame = previewFrame.scale(by: ratio)
maskLayer.frame = scalledMaskFrame
maskLayer.cornerRadius = previewCornerRadius.cgFloat
maskLayer.masksToBounds = true
videoContainerLayer.mask = maskLayer
}
videoLayer.transform = CATransform3DMakeAffineTransform(transformation)
}
} else {
parentLayer.addSublayer(videoLayer)
}
/// Add overlay if overlay image present
if let image = overlayImage {
let imageLayer = CALayer()
imageLayer.contents = image.cgImage
imageLayer.frame = self.prefferedVideoRect
imageLayer.masksToBounds = true
if setOverlayAsBackground {
parentLayer.insertSublayer(imageLayer, at: 0)
} else {
parentLayer.addSublayer(imageLayer)
}
}
/// Add overlay if overlay image present
if let overlay = gifLayer {
overlay.frame = CGRect(origin: CGPoint(x: (self.prefferedVideoSize.width - overlay.frame.width) / 2, y: (self.prefferedVideoSize.height - overlay.frame.height) / 2), size: overlay.frame.size)
overlay.transform = CATransform3DMakeAffineTransform(CGAffineTransform(scaleX: ratio, y: ratio))
parentLayer.addSublayer(overlay)
}
mutableVideoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let outputURL = URL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent("tmp-rendered-video-R6S9K2B4.m4v")
self.exportVideo(from: mixComposition, toFile: outputURL, with: mutableVideoComposition, success: success, failure: failure)
} catch{
DCDebug.print(error)
failure(error)
}
}
func exportVideo(from composition: AVComposition, toFile output: URL, with videoComposition: AVVideoComposition? = nil, success: #escaping ((URL) -> Void), failure: #escaping ((Error) -> Void)) {
do {
if FileManager.default.fileExists(atPath: output.path) {
try FileManager.default.removeItem(at: output)
}
if let exportSession = AVAssetExportSession(asset: composition, presetName: self.presetName ?? AVAssetExportPresetHighestQuality) {
exportSession.outputURL = output
exportSession.outputFileType = AVFileType.mp4
exportSession.shouldOptimizeForNetworkUse = true
if let videoComposition = videoComposition {
exportSession.videoComposition = videoComposition
}
/// try to export the file and handle the status cases
exportSession.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
switch exportSession.status {
case .completed:
success(output)
case .failed:
if let _error = exportSession.error {
failure(_error)
}
case .cancelled:
if let _error = exportSession.error {
failure(_error)
}
default:
success(output)
}
}
})
} else {
failure(VideoMakerError(error: .kFailedToStartAssetExportSession))
}
} catch {
DCDebug.print(error)
failure(error)
}
}
I am getting following error
Domain=AVFoundationErrorDomain Code=-11841 "Operation Stopped" UserInfo={NSLocalizedDescription=Operation Stopped, NSLocalizedFailureReason=The video could not be composed.}
following are time range values when export fails
(lldb) po aVideoAsset.tracks(withMediaType: .audio).first?.timeRange.duration
▿ Optional<CMTime>
▿ some : CMTime
- value : 1852
- timescale : 600
▿ flags : CMTimeFlags
- rawValue : 1
- epoch : 0
(lldb) po aVideoAsset.tracks(withMediaType: .video).first?.timeRange.duration
▿ Optional<CMTime>
▿ some : CMTime
- value : 1800
- timescale : 600
▿ flags : CMTimeFlags
- rawValue : 1
- epoch : 0
I solved this issue by replacing following line
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration)
By
if let originalAudioTrack = mutableCompositionOriginalAudioTrack, originalAudioTrack.timeRange.duration > aVideoAssetTrack.timeRange.duration, !muteAudio, aVideoAsset.hasAudio {
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: originalAudioTrack.timeRange.duration)
} else {
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration)
}
This solved my issue, but I am not sure if this is correct solution to this problem or just a hack, so if anyone provide me proper explanation to this issue and a valid solution other than this, then bounty is yours.
I am creating a video out of an image and then need to access it right away. I am creating it by adding an imageBuffer of the image to an AVAssetWriter. The video is created here:
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let first = adaptor.append(buffer, withPresentationTime: startFrameTime)
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let second = adaptor.append(buffer, withPresentationTime: endFrameTime)
videoWriterInput.markAsFinished()
videoWriter.finishWriting {
completion(videoWriter.error)
}
Both first and second return true (successful appends) and there is no error on videoWriter. The video has successfully been created. I can successfully "retrieve" the asset upon its completion with
makeVideo(image, urlDestination) { error in
guard error == nil else { return }
let imageAsset = AVAsset(url: url)
guard
let imageTrack = self.composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid),
let imageVideoTrack = imageAsset.tracks(withMediaType: .video).first else {
assertionFailure()
return
}
try! imageTrack.insertTimeRange(
CMTimeRangeMake(start: .zero, duration: self.duration),
of: imageVideoTrack,
at: .zero
)
let imageVideoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: imageTrack)
}
However, the first time I try to access it I don't get any video (there is a video track with the right duration, but with no video of any sort being displayed). If I add it to a PreviewController I get nothing. However, if I dismiss the PreviewController, and access the asset a second time, then it is successful.
My first thought was that this is a potential timing issue, but even if I add a delay it fails the first time.
Any thoughts? Keep in mind that this code works for when the file at the url already exists; just not right after it is made.
Edit:
The above are the parts of code I think are pertinent to this question. Fuller code is as below:
private func filePath() -> URL {
let fileManager = FileManager.default
let urls = fileManager.urls(for: .documentDirectory, in: .userDomainMask)
guard let documentDirectory = urls.first else {
fatalError("documentDir Error")
}
return documentDirectory
}
class VideoComposer {
let composition = AVMutableComposition()
let mainInstruction = AVMutableVideoCompositionInstruction()
let duration: CMTime
let videoSize: CGSize
var viewSizeMultiplier: CGFloat = 5.0
init(view: UIView) {
videoSize = CGSize(width: 1772.0, height: 3840.0)
viewSizeMultiplier = 1772.0 / view.frame.width
self.duration = CMTime(seconds: 15, preferredTimescale: 600)
mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: self.duration)
view.subviews.reversed().forEach { subview in
if let imageView = subview as? UIImageView {
addImage(of: imageView)
}
else {
print("unhandled view type")
}
}
}
func createVideo(completion: #escaping (AVAssetExportSession) -> Void) {
// make video composition
let videoComposition = AVMutableVideoComposition()
videoComposition.instructions = [mainInstruction]
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 60)
videoComposition.renderSize = videoSize
export(videoComposition: videoComposition) { (session) in
completion(session)
}
}
private func export(videoComposition: AVMutableVideoComposition, completion: #escaping (AVAssetExportSession) -> Void) {
// export
let url = filePath().appendingPathComponent("output.mov")
let fileManager = FileManager.default
if fileManager.fileExists(atPath: url.path) {
try! fileManager.removeItem(at: url)
}
guard let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality) else {
assertionFailure()
return
}
exporter.videoComposition = videoComposition
exporter.outputFileType = .mov
exporter.outputURL = url
exporter.exportAsynchronously {
DispatchQueue.main.async {
completion(exporter)
}
}
}
private func addImage(of imageView: UIImageView) {
guard let image = imageView.image else {
assertionFailure("no image")
return
}
let movieLength = TimeInterval(duration.seconds)
let url = filePath().appendingPathComponent("image.mov")
ImageVideoCreator.writeSingleImageToMovie(image: image, movieLength: movieLength, outputFileURL: url) { [weak self] success in
guard let `self` = self else {
return
}
let imageAsset = AVAsset(url: url)
let keys = ["playable", "readable", "composable", "tracks", "exportable"]
var error: NSError? = nil
imageAsset.loadValuesAsynchronously(forKeys: keys, completionHandler: {
DispatchQueue.main.async {
keys.forEach({ key in
let status = imageAsset.statusOfValue(forKey: key, error: &error)
switch status {
case .loaded:
print("loaded. \(error)")
case .loading:
print("loading. \(error)")
case .failed:
print("failed. \(error)")
case .cancelled:
print("cancelled. \(error)")
case .unknown:
print("unknown. \(error)")
}
})
guard
let imageTrack = self.composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid),
let imageVideoTrack = imageAsset.tracks(withMediaType: .video).first
else {
assertionFailure()
return
}
try! imageTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: self.duration), of: imageVideoTrack, at: .zero)
let imageVideoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: imageTrack)
print("image")
self.setTransform(on: imageVideoLayerInstruction, of: imageView, andOf: imageVideoTrack)
self.mainInstruction.layerInstructions.append(imageVideoLayerInstruction)
}
})
}
}
}
class ViewController: UIViewController {
var composer: VideoComposer?
let player = AVPlayerViewController()
override func viewDidLoad() {
super.viewDidLoad()
guard let pathUrl = Bundle.main.url(forResource: "SampleVideo_1280x720_1mb", withExtension: "mp4") else {
assertionFailure()
return
}
let image = UIImage(named: "image")
let imageView = UIImageView(image: image)
view.addSubview(imageView)
imageView.translatesAutoresizingMaskIntoConstraints = false
imageView.topAnchor.constraint(equalTo: view.topAnchor, constant: 0).isActive = true
imageView.leadingAnchor.constraint(equalTo: view.leadingAnchor, constant: 0).isActive = true
imageView.widthAnchor.constraint(equalToConstant: image!.size.width / 4).isActive = true
imageView.heightAnchor.constraint(equalToConstant: image!.size.height / 4).isActive = true
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
composer = VideoComposer(view: view)
composer?.createVideo() { exporter in
self.didFinish(session: exporter)
}
}
func didFinish(session: AVAssetExportSession) {
guard let url = session.outputURL else {
assertionFailure()
return
}
self.showVideo(videoUrl: url)
}
func showVideo(videoUrl: URL) {
let videoPlayer = AVPlayer(url: videoUrl)
player.player = videoPlayer
self.present(player, animated: true) {
self.player.player?.play()
}
}
}
class ImageVideoCreator {
private static func pixelBuffer(fromImage image: CGImage, size: CGSize) -> CVPixelBuffer? {
let options: CFDictionary = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true] as CFDictionary
var pxbuffer: CVPixelBuffer? = nil
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(size.width), Int(size.height), kCVPixelFormatType_32ARGB, options, &pxbuffer)
guard let buffer = pxbuffer, status == kCVReturnSuccess else {
return nil
}
CVPixelBufferLockBaseAddress(buffer, [])
guard let pxdata = CVPixelBufferGetBaseAddress(buffer) else {
return nil
}
let bytesPerRow = CVPixelBufferGetBytesPerRow(buffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: pxdata, width: Int(size.width), height: Int(size.height), bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) else {
return nil
}
context.concatenate(CGAffineTransform(rotationAngle: 0))
context.draw(image, in: CGRect(x: 0, y: 0, width: size.width, height: size.height))
CVPixelBufferUnlockBaseAddress(buffer, [])
return buffer
}
static func writeSingleImageToMovie(image: UIImage, movieLength: TimeInterval, outputFileURL: URL, completion: #escaping (Bool) -> ()) {
let fileManager = FileManager.default
if fileManager.fileExists(atPath: outputFileURL.path) {
try! fileManager.removeItem(at: outputFileURL)
}
do {
let imageSize = image.size
let videoWriter = try AVAssetWriter(outputURL: outputFileURL, fileType: AVFileType.mov)
let videoSettings: [String: Any] = [AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: imageSize.width,
AVVideoHeightKey: imageSize.height]
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
let adaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: nil)
if !videoWriter.canAdd(videoWriterInput) {
completion(false)
return
}
videoWriterInput.expectsMediaDataInRealTime = true
videoWriter.add(videoWriterInput)
videoWriter.startWriting()
let timeScale: Int32 = 600 // recommended in CMTime for movies.
let startFrameTime = CMTimeMake(value: 0, timescale: 600)
let endFrameTime = CMTimeMakeWithSeconds(movieLength, preferredTimescale: timeScale)
videoWriter.startSession(atSourceTime: startFrameTime)
guard let cgImage = image.cgImage else {
completion(false)
return
}
let buffer: CVPixelBuffer = self.pixelBuffer(fromImage: cgImage, size: imageSize)!
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let first = adaptor.append(buffer, withPresentationTime: startFrameTime)
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let second = adaptor.append(buffer, withPresentationTime: endFrameTime)
videoWriterInput.markAsFinished()
videoWriter.finishWriting {
completion(true)
}
} catch {
completion(false)
}
}
}
I wanted to dig this question back up. I am having an issue overlaying two videos. I believe it has something to do with the transparency of the first AVMutableVideoCompositionLayerInstruction but I have played around with it extensively with no luck. Any suggestions would be greatly appreciated!:
func overlay(video firstAsset: AVURLAsset, withSecondVideo secondAsset: AVURLAsset) {
let mixComposition = AVMutableComposition()
let firstTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
let secondTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
guard let firstMediaTrack = firstAsset.tracks(withMediaType: AVMediaType.video).first else { return }
guard let secondMediaTrack = secondAsset.tracks(withMediaType: AVMediaType.video).first else { return }
do {
try firstTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, firstAsset.duration), of: firstMediaTrack, at: kCMTimeZero)
try secondTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, secondAsset.duration), of: secondMediaTrack, at: kCMTimeZero)
} catch (let error) {
print(error)
}
self.width = max(firstMediaTrack.naturalSize.width, secondMediaTrack.naturalSize.width)
self.height = max(firstMediaTrack.naturalSize.height, secondMediaTrack.naturalSize.height)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = CGSize(width: width!, height: height!)
videoComposition.frameDuration = firstMediaTrack.minFrameDuration
let firstLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstMediaTrack)
let scale = CGAffineTransform(scaleX: 0.3, y: 0.3)
let move = CGAffineTransform(translationX: self.width! - ((self.width! * 0.3) + 10), y: 10)
firstLayerInstruction.setTransform(scale.concatenating(move), at: kCMTimeZero)
firstLayerInstruction.setOpacity(1.0, at: kCMTimeZero)
let secondlayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: secondMediaTrack)
secondlayerInstruction.setTransform((secondTrack?.preferredTransform)!, at: kCMTimeZero)
secondlayerInstruction.setOpacity(1.0, at: kCMTimeZero)
let combined = AVMutableVideoCompositionInstruction()
combined.timeRange = CMTimeRangeMake(kCMTimeZero, max(firstAsset.duration, secondAsset.duration))
combined.backgroundColor = UIColor.clear.cgColor
combined.layerInstructions = [firstLayerInstruction, secondlayerInstruction]
videoComposition.instructions = [combined]
let outputUrl = self.getPathForTempFileNamed(filename: "output.mov")
self.exportCompositedVideo(compiledVideo: mixComposition, toURL: outputUrl, withVideoComposition: videoComposition)
self.removeTempFileAtPath(path: outputUrl.absoluteString)
}
The expected result is one video with two videos overplayed inside of it. the first layer being a full screen video and the second layer being a smaller video positioned in the upper right hand corner. Oddly enough, when I use on AVMutableVideoCompositionInstruction and put both AVMutableVideoCompositionLayerInstruction's inside of its layer instructions, it works! - but the video used in the FirstMediaTrack is used for both layers? Played around with that for a while and then tried to implement the approach detailed here which has individual instructions for both layers but this approach results in just the first layer showing a full screen video with the second layer completely invisible.
Here is the code that is working for me, i based it off this tutorial. I found the key was setting the backgrounds as clear (found on this thread). It also has a scale in there as I was experimenting with making one video smaller.
import AVFoundation
import AVKit
import Photos
var myurl: URL?
func newoverlay(video firstAsset: AVURLAsset, withSecondVideo secondAsset: AVURLAsset) {
// 1 - Create AVMutableComposition object. This object will hold your AVMutableCompositionTrack instances.
let mixComposition = AVMutableComposition()
// 2 - Create two video tracks
guard let firstTrack = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration),
of: firstAsset.tracks(withMediaType: .video)[0],
at: CMTime.zero)
} catch {
print("Failed to load first track")
return
}
guard let secondTrack = mixComposition.addMutableTrack(withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
do {
try secondTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: secondAsset.duration),
of: secondAsset.tracks(withMediaType: .video)[0],
at: CMTime.zero)
} catch {
print("Failed to load second track")
return
}
// 2.1
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: CMTimeAdd(firstAsset.duration, secondAsset.duration))
// 2.2
let firstInstruction = ViewController.videoCompositionInstruction(firstTrack, asset: firstAsset)
let scale = CGAffineTransform(scaleX: 0.3, y: 0.3)
let move = CGAffineTransform(translationX: 10, y: 10)
firstInstruction.setTransform(scale.concatenating(move), at: CMTime.zero)
let secondInstruction = ViewController.videoCompositionInstruction(secondTrack, asset: secondAsset)
// 2.3
mainInstruction.layerInstructions = [firstInstruction, secondInstruction]
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
let width = max(firstTrack.naturalSize.width, secondTrack.naturalSize.width)
let height = max(firstTrack.naturalSize.height, secondTrack.naturalSize.height)
mainComposition.renderSize = CGSize(width: width, height: height)
mainInstruction.backgroundColor = UIColor.clear.cgColor
// 4 - Get path
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else { return }
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = documentDirectory.appendingPathComponent("mergeVideo-\(date).mov")
// Check exists and remove old file
FileManager.default.removeItemIfExisted(url as URL)
// 5 - Create Exporter
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
exporter.outputURL = url
exporter.outputFileType = AVFileType.mov
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = mainComposition
// 6 - Perform the Export
exporter.exportAsynchronously() {
DispatchQueue.main.async {
print("Movie complete")
self.myurl = url as URL
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: url as URL)
}) { saved, error in
if saved {
print("Saved")
}
}
self.playVideo()
}
}
}
func playVideo() {
let player = AVPlayer(url: myurl!)
let playerLayer = AVPlayerLayer(player: player)
playerLayer.frame = self.view.bounds
self.view.layer.addSublayer(playerLayer)
player.play()
print("playing...")
}
static func videoCompositionInstruction(_ track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor), at: CMTime.zero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
.concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: CMTime.zero)
}
return instruction
}
static func orientationFromTransform(_ transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
}
extension FileManager {
func removeItemIfExisted(_ url:URL) -> Void {
if FileManager.default.fileExists(atPath: url.path) {
do {
try FileManager.default.removeItem(atPath: url.path)
}
catch {
print("Failed to delete file")
}
}
}
}
My goal is to let user select video from photos and then let him to add labels over it.
Here is what I've got:
let audioAsset = AVURLAsset(url: selectedVideoURL)
let videoAsset = AVURLAsset(url: selectedVideoURL)
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
let clipAudioTrack = audioAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, audioAsset.duration), of: clipAudioTrack, at: kCMTimeZero)
compositionVideoTrack.preferredTransform = clipVideoTrack.preferredTransform
} catch {
print(error)
}
var videoSize = clipVideoTrack.naturalSize
if isVideoPortrait(asset: videoAsset) {
videoSize = CGSize(width: videoSize.height, height: videoSize.width)
}
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(videoLayer)
// adding label
let helloLabelLayer = CATextLayer()
helloLabelLayer.string = "Hello"
helloLabelLayer.font = "Signika-Semibold" as CFTypeRef?
helloLabelLayer.fontSize = 30.0
helloLabelLayer.contentsScale = mainScreen.scale
helloLabelLayer.alignmentMode = kCAAlignmentNatural
helloLabelLayer.frame = CGRect(x: 0.0, y: 0.0, width: 100.0, height: 50.0)
parentLayer.addSublayer(helloLabelLayer)
// creating composition
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
let layerInstruction = videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
if let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset640x480) {
let filename = NSTemporaryDirectory().appending("video.mov")
if FileManager.default.fileExists(atPath: filename) {
do {
try FileManager.default.removeItem(atPath: filename)
} catch {
print(error)
}
}
let url = URL(fileURLWithPath: filename)
assetExport.outputURL = url
assetExport.outputFileType = AVFileTypeMPEG4
assetExport.videoComposition = videoComp
print(NSDate().timeIntervalSince1970)
assetExport.exportAsynchronously {
print(NSDate().timeIntervalSince1970)
let library = ALAssetsLibrary()
library.writeVideoAtPath(toSavedPhotosAlbum: url, completionBlock: {
(url, error) in
switch assetExport.status {
case AVAssetExportSessionStatus.failed:
p("failed \(assetExport.error)")
case AVAssetExportSessionStatus.cancelled:
p("cancelled \(assetExport.error)")
default:
p("complete")
p(NSDate().timeIntervalSince1970)
if FileManager.default.fileExists(atPath: filename) {
do {
try FileManager.default.removeItem(atPath: filename)
} catch {
p(error)
}
}
print("Exported")
}
})
}
Implementation of isVideoPortrait function:
func isVideoPortrait(asset: AVAsset) -> Bool {
var isPortrait = false
let tracks = asset.tracks(withMediaType: AVMediaTypeVideo)
if tracks.count > 0 {
let videoTrack = tracks[0]
let t = videoTrack.preferredTransform
if t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0 {
isPortrait = true
}
if t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0 {
isPortrait = true
}
if t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0 {
isPortrait = false
}
if t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0 {
isPortrait = false
}
}
return isPortrait
}
And the last function for video composition layer instruction:
func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaTypeVideo)[0]
let transform = assetTrack.preferredTransform
instruction.setTransform(transform, at: kCMTimeZero)
return instruction
}
The code works well, output video has label, but if I select 1 minute video, export takes 28 seconds.
I've search for it and tried to remove layerInsctuction transform, but no effect.
Tried to add:
assetExport.shouldOptimizeForNetworkUse = false
no effect either.
Also, tried to set AVAssetExportPresetPassthrough for AVAssetExportSession, in this case video exports with 1 second but labels have gone.
Any help would be appreciated, because I'm in stuck. Thanks for your time.
The only way I can think of is to reduce the quality via the bit rate and resolution.
This is done through a dictionary applied to the videoSettings of the AssetExporter, for this to work I had to use a Framework called SDAVAssetExportSession
Then by changing the videoSettings I could play with the quality to get an optimal quality / speed.
let compression = [AVVideoAverageBitRateKey : 2097152(DESIRED_BITRATE),AVVideoProfileLevelKey : AVVideoProfileLevelH264BaselineAutoLevel]
let videoSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : maxWidth, AVVideoHeightKey : maxHeight, AVVideoCompressionPropertiesKey:compression]
This was the only way I could speed things up.
This is not directly relevant to your question, but your code here is backwards:
assetExport.exportAsynchronously {
let library = ALAssetsLibrary()
library.writeVideoAtPath(toSavedPhotosAlbum: url, completionBlock: {
switch assetExport.status {
No no no. First you complete the asset export. Then you can copy again to somewhere else if that's what you want to do. So this needs to go like this:
assetExport.exportAsynchronously {
switch assetExport.status {
case .completed:
let library = ALAssetsLibrary()
library.writeVideoAtPath...
Other comments:
ALAssetsLibrary is dead. This is not the way to copy into the user's photo library. Use Photo framework.
Your original code is very odd, because there are a lot of other cases you are not testing for. You are just assuming that default means .completed. That's dangerous.