AVAssetExportSession export video AVFoundationErrorDomain Code=-11841 error - ios

I trying to export video using following code. It works fine for first 3 times and then fails for more than 3 attempts, I am trying to add recorded voices over video, I am pretty new to all these concepts so any help will be appreciated
open func generate(video url: URL, with frame: CGRect? = nil, byApplying transformation: CGAffineTransform? = nil, in previewArea: CGRect? = nil, previewCornerRadius: Float = 0, overlayImage: UIImage? = nil, setOverlayAsBackground: Bool = false, gifLayer: CALayer? = nil, audioUrl: URL? = nil, muteAudio: Bool = false, success: #escaping ((URL) -> Void), failure: #escaping ((Error) -> Void)) {
let mixComposition: AVMutableComposition = AVMutableComposition()
var mutableCompositionVideoTrack: AVMutableCompositionTrack? = nil
var mutableCompositionOriginalAudioTrack: AVMutableCompositionTrack? = nil
var mutableCompositionAudioTrack: AVMutableCompositionTrack? = nil
let totalVideoCompositionInstruction : AVMutableVideoCompositionInstruction = AVMutableVideoCompositionInstruction()
let aVideoAsset: AVAsset = AVAsset(url: url)
var aAudioAsset: AVAsset? = nil
if let url = audioUrl {
aAudioAsset = AVAsset(url: url)
}
if let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid) {
mutableCompositionVideoTrack = videoTrack
if aAudioAsset != nil, let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
mutableCompositionAudioTrack = audioTrack
}
if !muteAudio, aVideoAsset.hasAudio, let originalAudioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
/// If original audio present
mutableCompositionOriginalAudioTrack = originalAudioTrack
}
}
do {
var originalVideoSize: CGSize = self.prefferedVideoSize
let ratio = self.prefferedVideoSize.width / Utility.get9By16ScreenSize().width
if let aVideoAssetTrack: AVAssetTrack = aVideoAsset.tracks(withMediaType: .video).first {
originalVideoSize = aVideoAssetTrack.naturalSize
var transforms = aVideoAssetTrack.preferredTransform
if aVideoAsset.videoOrientation().orientation == .landscapeLeft || aVideoAsset.videoOrientation().orientation == .landscapeRight {
let ratio = self.prefferedVideoSize.width / originalVideoSize.width
let centerY: CGFloat = (self.prefferedVideoSize.height - (originalVideoSize.height * ratio)) / 2
transforms = transforms.concatenating(CGAffineTransform(translationX: 0, y: centerY).scaledBy(x: ratio, y: ratio))
}
try mutableCompositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration), of: aVideoAssetTrack, at: CMTime.zero)
if !muteAudio, aVideoAsset.hasAudio, let audioAssetTrack: AVAssetTrack = aVideoAsset.tracks(withMediaType: .audio).first {
try mutableCompositionOriginalAudioTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: audioAssetTrack.timeRange.duration), of: audioAssetTrack, at: CMTime.zero)
}
if let audioAsset = aAudioAsset, let aAudioAssetTrack: AVAssetTrack = audioAsset.tracks(withMediaType: .audio).first {
try mutableCompositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aAudioAssetTrack.timeRange.duration), of: aAudioAssetTrack, at: CMTime.zero)
}
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration)
let mixVideoTrack = mixComposition.tracks(withMediaType: AVMediaType.video)[0]
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: mixVideoTrack)
layerInstruction.setTransform(transforms, at: CMTime.zero)
totalVideoCompositionInstruction.layerInstructions = [layerInstruction]
}
let mutableVideoComposition: AVMutableVideoComposition = AVMutableVideoComposition()
mutableVideoComposition.frameDuration = CMTimeMake(value: 1, timescale: 12)
mutableVideoComposition.renderSize = self.prefferedVideoSize
mutableVideoComposition.instructions = [totalVideoCompositionInstruction]
let parentLayer = CALayer()
parentLayer.frame = self.prefferedVideoRect
parentLayer.isGeometryFlipped = true
let videoLayer = CALayer()
videoLayer.contentsGravity = .resizeAspect
videoLayer.contentsScale = 1
videoLayer.frame = self.prefferedVideoRect
if let frame = frame {
let scalledFrame = frame.scale(by: ratio)
videoLayer.frame = scalledFrame
let videoContainerLayer = CALayer()
parentLayer.frame = self.prefferedVideoRect
parentLayer.addSublayer(videoContainerLayer)
videoContainerLayer.addSublayer(videoLayer)
if let transformation = transformation {
if let previewFrame = previewArea {
let maskLayer = CALayer()
maskLayer.backgroundColor = UIColor.black.cgColor
let scalledMaskFrame = previewFrame.scale(by: ratio)
maskLayer.frame = scalledMaskFrame
maskLayer.cornerRadius = previewCornerRadius.cgFloat
maskLayer.masksToBounds = true
videoContainerLayer.mask = maskLayer
}
videoLayer.transform = CATransform3DMakeAffineTransform(transformation)
}
} else {
parentLayer.addSublayer(videoLayer)
}
/// Add overlay if overlay image present
if let image = overlayImage {
let imageLayer = CALayer()
imageLayer.contents = image.cgImage
imageLayer.frame = self.prefferedVideoRect
imageLayer.masksToBounds = true
if setOverlayAsBackground {
parentLayer.insertSublayer(imageLayer, at: 0)
} else {
parentLayer.addSublayer(imageLayer)
}
}
/// Add overlay if overlay image present
if let overlay = gifLayer {
overlay.frame = CGRect(origin: CGPoint(x: (self.prefferedVideoSize.width - overlay.frame.width) / 2, y: (self.prefferedVideoSize.height - overlay.frame.height) / 2), size: overlay.frame.size)
overlay.transform = CATransform3DMakeAffineTransform(CGAffineTransform(scaleX: ratio, y: ratio))
parentLayer.addSublayer(overlay)
}
mutableVideoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let outputURL = URL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent("tmp-rendered-video-R6S9K2B4.m4v")
self.exportVideo(from: mixComposition, toFile: outputURL, with: mutableVideoComposition, success: success, failure: failure)
} catch{
DCDebug.print(error)
failure(error)
}
}
func exportVideo(from composition: AVComposition, toFile output: URL, with videoComposition: AVVideoComposition? = nil, success: #escaping ((URL) -> Void), failure: #escaping ((Error) -> Void)) {
do {
if FileManager.default.fileExists(atPath: output.path) {
try FileManager.default.removeItem(at: output)
}
if let exportSession = AVAssetExportSession(asset: composition, presetName: self.presetName ?? AVAssetExportPresetHighestQuality) {
exportSession.outputURL = output
exportSession.outputFileType = AVFileType.mp4
exportSession.shouldOptimizeForNetworkUse = true
if let videoComposition = videoComposition {
exportSession.videoComposition = videoComposition
}
/// try to export the file and handle the status cases
exportSession.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
switch exportSession.status {
case .completed:
success(output)
case .failed:
if let _error = exportSession.error {
failure(_error)
}
case .cancelled:
if let _error = exportSession.error {
failure(_error)
}
default:
success(output)
}
}
})
} else {
failure(VideoMakerError(error: .kFailedToStartAssetExportSession))
}
} catch {
DCDebug.print(error)
failure(error)
}
}
I am getting following error
Domain=AVFoundationErrorDomain Code=-11841 "Operation Stopped" UserInfo={NSLocalizedDescription=Operation Stopped, NSLocalizedFailureReason=The video could not be composed.}
following are time range values when export fails
(lldb) po aVideoAsset.tracks(withMediaType: .audio).first?.timeRange.duration
▿ Optional<CMTime>
▿ some : CMTime
- value : 1852
- timescale : 600
▿ flags : CMTimeFlags
- rawValue : 1
- epoch : 0
(lldb) po aVideoAsset.tracks(withMediaType: .video).first?.timeRange.duration
▿ Optional<CMTime>
▿ some : CMTime
- value : 1800
- timescale : 600
▿ flags : CMTimeFlags
- rawValue : 1
- epoch : 0

I solved this issue by replacing following line
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration)
By
if let originalAudioTrack = mutableCompositionOriginalAudioTrack, originalAudioTrack.timeRange.duration > aVideoAssetTrack.timeRange.duration, !muteAudio, aVideoAsset.hasAudio {
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: originalAudioTrack.timeRange.duration)
} else {
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration)
}
This solved my issue, but I am not sure if this is correct solution to this problem or just a hack, so if anyone provide me proper explanation to this issue and a valid solution other than this, then bounty is yours.

Related

Swift - Add watermark to a video is very slow

Here is my code that adds image & text overlays to a local video. The problem is that it's extremely SLOW. Any ideas how to fix it?
Also I would appreciate if you can suggest 3rd party libraries that can do watermarking.
public func addWatermark(
fromVideoAt videoURL: URL,
watermark: Watermark,
fileName: String,
onSuccess: #escaping (URL) -> Void,
onFailure: #escaping ((Error?) -> Void)
) {
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid
),
let assetTrack = asset.tracks(withMediaType: .video).first
else {
onFailure(nil)
return
}
do {
let timeRange = CMTimeRange(start: .zero, duration: assetTrack.timeRange.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(
withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid
) {
try compositionAudioTrack.insertTimeRange(
timeRange,
of: audioAssetTrack,
at: .zero
)
}
} catch {
onFailure(error)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoInfo = orientation(from: assetTrack.preferredTransform)
let videoSize: CGSize
if videoInfo.isPortrait {
videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width
)
} else {
videoSize = assetTrack.naturalSize
}
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let imageFrame = watermark.calculateImageFrame(parentSize: videoSize)
addImage(watermark.image, to: overlayLayer, frame: imageFrame)
let textOrigin = CGPoint(x: imageFrame.minX + 4, y: imageFrame.minY)
if let text = watermark.text {
addText(
text,
to: overlayLayer,
origin: textOrigin,
textAttributes: Watermark.textAttributes(type: watermark.type)
)
}
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 60)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer
)
videoComposition.colorPrimaries = AVVideoColorPrimaries_ITU_R_709_2
videoComposition.colorTransferFunction = "sRGB"
videoComposition.colorYCbCrMatrix = nil
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack
)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality
)
else {
onFailure(nil)
return
}
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(fileName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
onSuccess(exportURL)
default:
onFailure(export.error)
}
}
}
}
Watermark is the wrapper struct. It contains image/text, text attributes, size and other similar helpful information.
I've tried without any luck:
export.shouldOptimizeForNetworkUse = true. It did not work.
AVAssetExportPresetPassthrough instead of AVAssetExportPresetHighestQuality. It removed overlays.
I have the following code which is relatively fast. It watermarks an 8 second video in about 2.56 seconds. When I ran it under Metal System Trace Instrument it seemed to be balanced and using GPU-acceleration the whole time. You just call exportIt()
As a side matter, this code uses async await wrapping of AVKit functions and migrates off any deprecated interfaces as of iOS 16.
A tidied up and working sample app with resource files is https://github.com/faisalmemon/watermark
The core code is as follows:
//
// WatermarkHelper.swift
// watermark
//
// Created by Faisal Memon on 09/02/2023.
//
import Foundation
import AVKit
struct WatermarkHelper {
enum WatermarkError: Error {
case cannotLoadResources
case cannotAddTrack
case cannotLoadVideoTrack(Error?)
case cannotCopyOriginalAudioVideo(Error?)
case noVideoTrackPresent
case exportSessionCannotBeCreated
}
func compositionAddMediaTrack(_ composition: AVMutableComposition, withMediaType mediaType: AVMediaType) throws -> AVMutableCompositionTrack {
guard let compositionTrack = composition.addMutableTrack(
withMediaType: mediaType,
preferredTrackID: kCMPersistentTrackID_Invalid) else {
throw WatermarkError.cannotAddTrack
}
return compositionTrack
}
func loadTrack(inputVideo: AVAsset, withMediaType mediaType: AVMediaType) async throws -> AVAssetTrack? {
return try await withCheckedThrowingContinuation({
(continuation: CheckedContinuation<AVAssetTrack?, Error>) in
inputVideo.loadTracks(withMediaType: mediaType) { tracks, error in
if let tracks = tracks {
continuation.resume(returning: tracks.first)
} else {
continuation.resume(throwing: WatermarkError.cannotLoadVideoTrack(error))
}
}
})
}
func bringOverVideoAndAudio(inputVideo: AVAsset, assetTrack: AVAssetTrack, compositionTrack: AVMutableCompositionTrack, composition: AVMutableComposition) async throws {
do {
let timeRange = await CMTimeRange(start: .zero, duration: try inputVideo.load(.duration))
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = try await loadTrack(inputVideo: inputVideo, withMediaType: .audio) {
let compositionAudioTrack = try compositionAddMediaTrack(composition, withMediaType: .audio)
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
} catch {
print(error)
throw WatermarkError.cannotCopyOriginalAudioVideo(error)
}
}
private func orientation(from transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
func preferredTransformAndSize(compositionTrack: AVMutableCompositionTrack, assetTrack: AVAssetTrack) async throws -> (preferredTransform: CGAffineTransform, videoSize: CGSize) {
let transform = try await assetTrack.load(.preferredTransform)
let videoInfo = orientation(from: transform)
let videoSize: CGSize
let naturalSize = try await assetTrack.load(.naturalSize)
if videoInfo.isPortrait {
videoSize = CGSize(
width: naturalSize.height,
height: naturalSize.width)
} else {
videoSize = naturalSize
}
return (transform, videoSize)
}
private func compositionLayerInstruction(for track: AVCompositionTrack, assetTrack: AVAssetTrack, preferredTransform: CGAffineTransform) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
instruction.setTransform(preferredTransform, at: .zero)
return instruction
}
private func addImage(to layer: CALayer, watermark: UIImage, videoSize: CGSize) {
let imageLayer = CALayer()
let aspect: CGFloat = watermark.size.width / watermark.size.height
let width = videoSize.width
let height = width / aspect
imageLayer.frame = CGRect(
x: 0,
y: -height * 0.15,
width: width,
height: height)
imageLayer.contents = watermark.cgImage
layer.addSublayer(imageLayer)
}
func composeVideo(composition: AVMutableComposition, videoComposition: AVMutableVideoComposition, compositionTrack: AVMutableCompositionTrack, assetTrack: AVAssetTrack, preferredTransform: CGAffineTransform) {
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(
start: .zero,
duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack, preferredTransform: preferredTransform)
instruction.layerInstructions = [layerInstruction]
}
func exportSession(composition: AVMutableComposition, videoComposition: AVMutableVideoComposition, outputURL: URL) throws -> AVAssetExportSession {
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality)
else {
print("Cannot create export session.")
throw WatermarkError.exportSessionCannotBeCreated
}
export.videoComposition = videoComposition
export.outputFileType = .mp4
export.outputURL = outputURL
return export
}
func executeSession(_ session: AVAssetExportSession) async throws -> AVAssetExportSession.Status {
return try await withCheckedThrowingContinuation({
(continuation: CheckedContinuation<AVAssetExportSession.Status, Error>) in
session.exportAsynchronously {
DispatchQueue.main.async {
if let error = session.error {
continuation.resume(throwing: error)
} else {
continuation.resume(returning: session.status)
}
}
}
})
}
func addWatermarkTopDriver(inputVideo: AVAsset, outputURL: URL, watermark: UIImage) async throws -> AVAssetExportSession.Status {
let composition = AVMutableComposition()
let compositionTrack = try compositionAddMediaTrack(composition, withMediaType: .video)
guard let videoAssetTrack = try await loadTrack(inputVideo: inputVideo, withMediaType: .video) else {
throw WatermarkError.noVideoTrackPresent
}
try await bringOverVideoAndAudio(inputVideo: inputVideo, assetTrack: videoAssetTrack, compositionTrack: compositionTrack, composition: composition)
let transformAndSize = try await preferredTransformAndSize(compositionTrack: compositionTrack, assetTrack: videoAssetTrack)
compositionTrack.preferredTransform = transformAndSize.preferredTransform
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
addImage(to: overlayLayer, watermark: watermark, videoSize: transformAndSize.videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = transformAndSize.videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
composeVideo(composition: composition, videoComposition: videoComposition, compositionTrack: compositionTrack, assetTrack: videoAssetTrack, preferredTransform: transformAndSize.preferredTransform)
let session = try exportSession(composition: composition, videoComposition: videoComposition, outputURL: outputURL)
return try await executeSession(session)
}
/// Creates a watermarked movie and saves it to the documents directory.
///
/// For an 8 second video (251 frames), this code takes 2.56 seconds on iPhone 11 producing a high quality video at 30 FPS.
/// - Returns: Time interval taken for processing.
public func exportIt() async throws -> TimeInterval {
let timeStart = Date()
guard
let filePath = Bundle.main.path(forResource: "donut-spinning", ofType: "mp4"),
let docUrl = try? FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: true),
let watermarkImage = UIImage(systemName: "seal") else {
throw WatermarkError.cannotLoadResources
}
let videoAsset = AVAsset(url: URL(filePath: filePath))
let outputURL = docUrl.appending(component: "watermark-donut-spinning.mp4")
try? FileManager.default.removeItem(at: outputURL)
print(outputURL)
let result = try await addWatermarkTopDriver(inputVideo: videoAsset, outputURL: outputURL, watermark: watermarkImage)
let timeEnd = Date()
let duration = timeEnd.timeIntervalSince(timeStart)
print(result)
return duration
}
}
Use this below method for super fast watermark adding to video
func addWatermark(inputURL: URL, outputURL: URL, handler:#escaping (_ exportSession: AVAssetExportSession?)-> Void) {
let mixComposition = AVMutableComposition()
let asset = AVAsset(url: inputURL)
let videoTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let timerange = CMTimeRangeMake(start: CMTime.zero, duration: asset.duration)
let compositionVideoTrack:AVMutableCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: CMTime.zero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
let watermarkFilter = CIFilter(name: "CISourceOverCompositing")!
let watermarkImage = CIImage(image: UIImage(named: "waterMark")!)
let videoComposition = AVVideoComposition(asset: asset) { (filteringRequest) in
let source = filteringRequest.sourceImage.clampedToExtent()
watermarkFilter.setValue(source, forKey: "inputBackgroundImage")
let transform = CGAffineTransform(translationX: filteringRequest.sourceImage.extent.width - (watermarkImage?.extent.width)! - 2, y: 0)
watermarkFilter.setValue(watermarkImage?.transformed(by: transform), forKey: "inputImage")
filteringRequest.finish(with: watermarkFilter.outputImage!, context: nil)
}
guard let exportSession = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset640x480) else {
handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = AVFileType.mp4
exportSession.shouldOptimizeForNetworkUse = true
exportSession.videoComposition = videoComposition
exportSession.exportAsynchronously { () -> Void in
handler(exportSession)
}
}
Call this method when you want to add watermark easily
let outputURL = NSURL.fileURL(withPath: "TempPath")
let inputURL = NSURL.fileURL(withPath: "VideoWithWatermarkPath")
addWatermark(inputURL: inputURL, outputURL: outputURL, handler: { (exportSession) in
guard let session = exportSession else {
// Error
return
}
switch session.status {
case .completed:
guard NSData(contentsOf: outputURL) != nil else {
// Error
return
}
// Now you can find the video with the watermark in the location outputURL
default:
// Error
}
})
Have you checked out Apple's documentation? It adds a title layer (CALayer) on top of an existing AVMutableComposition or an AVAsset? Since it's a legacy doc from iOS 6, you'll need to refactor a bit, but it should be fast on today's tech.

AVAssetExportSession Crop Video Transformation problem

I am trying to crop the video with AVMutableComposition. The exported video seem rotated to an unknown angle. It does not even choose the correct frame from the video. I have debugged the frame, and the provided crop frame is correct. Even if the video's preferredTransformation is Identity, then too it does not crop the correct video.
My code is like below:
public func exportVideo(asset: AVAsset, destinationDirectory: URL? = nil, editingOptions: MediaEditingOptions? = nil,
completion: #escaping VideoExportCompletion) -> ExportOperation? {
let composition = AVMutableComposition()
guard let videoTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let audioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) else {
completion(.failure(Error.couldNotAddMutableTracks))
return nil
}
let timeRange = editingOptions?.videoEditingOptions.timeRange ?? CMTimeRange(start: .zero, duration: asset.duration)
if let videoAssetTrack = asset.tracks(withMediaType: .video).first {
do {
try videoTrack.insertTimeRange(timeRange, of: videoAssetTrack, at: .zero)
} catch {
completion(.failure(error))
return nil
}
} else {
completion(.failure(Error.couldNotFindVideoTrack))
return nil
}
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first {
do {
try audioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
} catch {
completion(.failure(error))
return nil
}
}
let preset = editingOptions?.videoEditingOptions.preset ?? VideoEditingOptions.defaultPreset
guard let exportSession = AVAssetExportSession(asset: composition, presetName: preset) else {
completion(.failure(Error.couldNotInitiateExportSession))
return nil
}
guard var exportURL = destinationDirectory ?? cacheDirectoryURL() else {
completion(.failure(Error.couldNotDetermineOutputDirectory))
return nil
}
/// Prepare Instructions
let compositionInstructions = AVMutableVideoCompositionInstruction()
compositionInstructions.timeRange = CMTimeRange(start: .zero, duration: asset.duration)
let layerInstructions = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let cropFrame = editingOptions?.scrollBounds ?? CGRect(x: 0, y: 0, width: videoTrack.naturalSize.width, height: videoTrack.naturalSize.height)
let transform = getTransform(for: videoTrack, cropFrame: cropFrame)
layerInstructions.setTransform(transform, at: .zero)
layerInstructions.setOpacity(1.0, at: CMTime.zero)
compositionInstructions.layerInstructions = [layerInstructions]
/// Add video composition.
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = cropFrame.size
videoComposition.instructions = [compositionInstructions]
videoComposition.frameDuration = videoTrack.minFrameDuration
exportURL.appendPathComponent(UUID().uuidString + ".mp4")
exportSession.videoComposition = videoComposition
exportSession.outputURL = exportURL
exportSession.outputFileType = AVFileType.mov
exportSession.exportAsynchronously {
if exportSession.status == .failed {
completion(.failure(exportSession.error ?? Error.unknown))
} else if exportSession.status == .completed {
completion(.success(exportURL))
} else {
completion(.failure(Error.unknown))
}
}
return exportSession
}
func getTransform(for videoTrack: AVAssetTrack, cropFrame: CGRect) -> CGAffineTransform {
let renderSize = cropFrame.size
let renderScale = renderSize.width / cropFrame.width
let offset = CGPoint(x: -cropFrame.origin.x, y: -cropFrame.origin.y)
let rotation = atan2(videoTrack.preferredTransform.b, videoTrack.preferredTransform.a)
var rotationOffset = CGPoint(x: 0, y: 0)
if videoTrack.preferredTransform.b == -1.0 {
rotationOffset.y = videoTrack.naturalSize.width
} else if videoTrack.preferredTransform.c == -1.0 {
rotationOffset.x = videoTrack.naturalSize.height
} else if videoTrack.preferredTransform.a == -1.0 {
rotationOffset.x = videoTrack.naturalSize.width
rotationOffset.y = videoTrack.naturalSize.height
}
var transform = CGAffineTransform.identity
transform = transform.scaledBy(x: renderScale, y: renderScale)
transform = transform.translatedBy(x: offset.x + rotationOffset.x, y: offset.y + rotationOffset.y)
transform = transform.rotated(by: rotation)
print("track size \(videoTrack.naturalSize)")
print("preferred Transform = \(videoTrack.preferredTransform)")
print("rotation angle \(rotation)")
print("rotation offset \(rotationOffset)")
print("actual Transform = \(transform)")
return transform
}
I don't find anything obvious here. Am I missing anything here?

AVFoundation - Merge multiple video - add animation in between videos

I am trying to merge images and video clips together. I kept an option to add animation in between videos and images. There has few options like fade-in, fade-out, rotate, slide-up, slide-down, left, right etc. For images I am able to add animation, but how to add animation for video? Specifically when a video clip is completed and another video clip is going to start that time I want to add animations. Now my merging functionality is working well. Only to add the animation in between the videos.
I have tried with:
instruction.setOpacityRamp(fromStartOpacity: <#T##Float#>, toEndOpacity: <#T##Float#>, timeRange: <#T##CMTimeRange#>)
but this option only showing fade-in/ fade-out effect. But other custom animation options where to add those effect and how?
Here is me source code for merging. Many dependant functions are there in the code. But I have posted only merging functionality code. I have commented with //HERE TO ADD THE ANIMATION. So that you can directly reach that point where I am trying to add animations.
func merge(allAssets: [MovieAssetPresentable], isHDR: Bool, success: #escaping (URL?) -> (Void), progress: #escaping (CGFloat) -> (Void), failed: #escaping (String?) -> (Void)) {
cancelExport()
let defaultSize = isHDR ? self.videoOutputResolution.HD : self.videoOutputResolution.lowQuality
let videoPresetName = self.getPresetName(resolution: defaultSize)
self.mergeSuccess = success
self.mergeError = failed
self.mergeProgress = progress
let mixComposition = AVMutableComposition()
let mainInstruction = AVMutableVideoCompositionInstruction()
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
guard let urlVideoForBackground = Bundle.main.url(forResource: "black", withExtension: "mov") else {
self.mergeError("Need black background video !")
return
}
let assetForBackground = AVAsset(url: urlVideoForBackground)
let trackForBackground = assetForBackground.tracks(withMediaType: AVMediaType.video).first
//Set output size
var outputSize = CGSize.zero
for asset in allAssets.filter({$0.assetType! == .video}) {
guard let videoAsset = asset.asset else { continue }
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
let assetInfo = self.orientationFromTransform(videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = CGSize(width: defaultSize.width, height: ((videoSize.height / videoSize.width) * defaultSize.width))
}
}
if outputSize == CGSize.zero {
outputSize = defaultSize
}
debugPrint("OUTPUT SIZE: \(outputSize)")
let layerContentsGravity = VideoSettings.shared.fetchVideoFitClips()
var layerImages = [CALayer]()
var insertTime = CMTime.zero
var audioMixInputParameters = [AVMutableAudioMixInputParameters]()
// Init Video layer
let videoLayer = CALayer()
videoLayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
videoLayer.contentsGravity = layerContentsGravity
let parentlayer = CALayer()
parentlayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
parentlayer.addSublayer(videoLayer)
for asset in allAssets.filter({$0.assetType! == .image || $0.assetType! == .video}) {
//Video speed level
let videoSpeed = Double(asset.videoSpeedLevel!)
if asset.assetType! == .video {
//Video asset
let ast = asset.asset!
let duration = asset.endTime! - asset.beginTime! //ast.duration
//Create AVMutableCompositionTrack object
guard let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
self.mergeError("Unable to create track.")
continue
}
//Add original video sound track
let originalSoundTrack: AVMutableCompositionTrack?
if asset.asset!.tracks(withMediaType: .audio).count > 0 {
originalSoundTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try originalSoundTrack?.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), of: ast.tracks(withMediaType: AVMediaType.audio)[0], at: insertTime)
} catch {
self.mergeError("Unable to create original audio track.")
continue
}
//Set video original sound track speed
originalSoundTrack?.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
let audioInputParams = AVMutableAudioMixInputParameters(track: originalSoundTrack)
audioInputParams.setVolume(asset.videoOriginalVolume!, at: CMTime.zero)
audioInputParams.trackID = originalSoundTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
}
//Set time range
do {
try track.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration),
of: ast.tracks(withMediaType: AVMediaType.video)[0],
at: insertTime)
} catch let err {
self.mergeError("Failed to load track: \(err.localizedDescription)")
continue
}
//Set video speed
track.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
insertTime = CMTimeAdd(insertTime, duration)
let instruction = self.videoCompositionInstruction(track, asset: ast, outputSize: outputSize)
// let instruction = videoCompositionInstructionForTrack(track: t, asset: ast, standardSize: outputSize, atTime: insertTime)
instruction.setOpacity(0.0, at: insertTime)
//HERE TO ADD THE ANIMATION
layerInstructions.append(instruction)
} else {
//Image data
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let defaultImageTime = CMTimeGetSeconds(asset.endTime!) - CMTimeGetSeconds(asset.beginTime!)
let duration = CMTime.init(seconds:defaultImageTime, preferredTimescale: assetForBackground.duration.timescale)
do {
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: duration),
of: trackForBackground!,
at: insertTime)
}
catch {
self.mergeError("Background time range error")
}
guard let image = UIImage(data: asset.imageData!) else { continue }
// Create Image layer
let imageLayer = CALayer()
imageLayer.frame = CGRect.init(origin: CGPoint.zero, size: outputSize)
imageLayer.contents = image.cgImage
imageLayer.opacity = 0
imageLayer.contentsGravity = layerContentsGravity
self.setOrientation(image: image, onLayer: imageLayer)
// Add Fade in & Fade out animation
let fadeInAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeInAnimation.duration = 1
fadeInAnimation.fromValue = NSNumber(value: 0)
fadeInAnimation.toValue = NSNumber(value: 1)
fadeInAnimation.isRemovedOnCompletion = false
fadeInAnimation.beginTime = CMTimeGetSeconds(insertTime) == 0 ? 0.05: CMTimeGetSeconds(insertTime)
fadeInAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeInAnimation, forKey: "opacityIN")
let fadeOutAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeOutAnimation.duration = 1
fadeOutAnimation.fromValue = NSNumber(value: 1)
fadeOutAnimation.toValue = NSNumber(value: 0)
fadeOutAnimation.isRemovedOnCompletion = false
fadeOutAnimation.beginTime = CMTimeGetSeconds(CMTimeAdd(insertTime, duration))
fadeOutAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeOutAnimation, forKey: "opacityOUT")
layerImages.append(imageLayer)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
}
// Add Image layers
for layer in layerImages {
parentlayer.addSublayer(layer)
}
//Add Water mark if Subscription not activated
if !AddManager.shared.hasActiveSubscription {
let imglogo = UIImage(named: "watermark")
let waterMarklayer = CALayer()
waterMarklayer.contents = imglogo?.cgImage
let sizeOfWaterMark = Utility.getWaterMarkSizeWithVideoSize(videoSize: outputSize, defaultSize: waterMarkSize)
debugPrint("sizeOfWaterMark=\(sizeOfWaterMark)")
waterMarklayer.frame = CGRect(x: outputSize.width - (sizeOfWaterMark.width+10), y: 5, width: sizeOfWaterMark.width, height: sizeOfWaterMark.height)
waterMarklayer.contentsGravity = .resizeAspect
waterMarklayer.opacity = 1.0
parentlayer.addSublayer(waterMarklayer)
}
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: insertTime)
mainInstruction.layerInstructions = layerInstructions
mainInstruction.backgroundColor = VideoSettings.shared.fetchVideoBackgroundColor().color.cgColor
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
mainComposition.renderScale = 1.0
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentlayer)
for audioAsset in allAssets.filter({$0.assetType! == .audio}) {
//NOTE: If you have requirement to increase/ decrease audio fade-in fade-out effect time, please modify fadeInFadeOutEffectTiming variable as second
let fadeInFadeOutEffectTiming = Double(3) //seconds
let volumeLevel = audioAsset.audioVolumeLevel!
let isFadeIn = audioAsset.audioFadeInEffect!
let isFadeOut = audioAsset.audioFadeOutEffect!
var audioBeginTime = audioAsset.beginTime!
var audioEndTime = audioAsset.endTime!
var audioTrackTime = audioAsset.audioTrackStartTime!
var trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
//If audio starting position (second) is greater than equals to zero (in order to video length)
if CMTimeGetSeconds(CMTimeAdd(audioTrackTime, audioBeginTime)) >= 0 {
//If audio starting position (second) more than video length, i.e. total video length is 20 second, but audio starting position is from 24 seconds, we sould not add the audio
if CMTimeCompare(CMTimeAdd(audioTrackTime, audioBeginTime), insertTime) == 1 {
trimmedAudioDuration = CMTime.zero
} else {
//If audio start position (seconds) + crop length is exceed total video length, we should add only the part within the video
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), insertTime) == 1 {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
trimmedAudioDuration = CMTimeSubtract(insertTime, audioTrackTime)
} else {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
}
}
}
//If audio start time is in negative (second)
else {
//If audio crop length is in negative (second)
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), CMTime.zero) == -1 {
trimmedAudioDuration = CMTime.zero
} else {
audioBeginTime = CMTime(seconds: abs(CMTimeGetSeconds(audioTrackTime)), preferredTimescale: audioTrackTime.timescale)
audioTrackTime = CMTime.zero
trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
if CMTimeCompare(trimmedAudioDuration, insertTime) == 1 {
trimmedAudioDuration = insertTime
}
}
}
if trimmedAudioDuration != CMTime.zero {
audioEndTime = CMTimeAdd(audioTrackTime, trimmedAudioDuration)
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try audioTrack?.insertTimeRange(CMTimeRangeMake(start: audioBeginTime , duration: trimmedAudioDuration),
of: audioAsset.asset!.tracks(withMediaType: AVMediaType.audio)[0] ,
at: audioTrackTime)
let audioInputParams = AVMutableAudioMixInputParameters(track: audioTrack)
var effectTime = CMTime(seconds: fadeInFadeOutEffectTiming, preferredTimescale: 600)
if CMTimeCompare(trimmedAudioDuration, CMTimeMultiply(effectTime, multiplier: 2)) == -1 {
effectTime = CMTime(seconds: CMTimeGetSeconds(trimmedAudioDuration) / 2, preferredTimescale: 600)
}
//Fade in effect
audioInputParams.setVolumeRamp(fromStartVolume: isFadeIn ? 0 : volumeLevel, toEndVolume: volumeLevel, timeRange: CMTimeRange(start: audioTrackTime, duration: effectTime))
//Fade out effect
audioInputParams.setVolumeRamp(fromStartVolume: volumeLevel, toEndVolume: isFadeOut ? 0 : volumeLevel, timeRange: CMTimeRange(start: CMTimeSubtract(audioEndTime, effectTime), duration: effectTime))
audioInputParams.trackID = audioTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
} catch {
print("Failed to load Audio track")
}
}
}
// 4 - Get path
guard let url = Utility.createFileAtDocumentDirectory(name: "mergeVideo-\(Date().timeIntervalSince1970).mp4") else {
debugPrint("Unable to file at document directory")
return
}
// 5 - Create Exporter
self.exporter = AVAssetExportSession(asset: mixComposition, presetName: videoPresetName)
guard let exp = self.exporter else {
debugPrint("Unable to export.")
return
}
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = audioMixInputParameters
exp.outputURL = url
exp.outputFileType = AVFileType.mp4
exp.shouldOptimizeForNetworkUse = true
exp.videoComposition = mainComposition
exp.audioMix = audioMix
//self.viewPieProgress.setProgress(0.0, animated: false)
//viewPieProgress.isHidden = isHDR
//timer for progress
self.timer = Timer.scheduledTimer(timeInterval: 1.0, target: self, selector: #selector(self.updateExportingProgress(timer:)), userInfo: exp, repeats: true)
// 6 - Perform the Export
exp.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exp)
}
}
}
I have tried many options but nothing meets my requirement. Please help me out.
If you required any other information from me, please feel free to comment on this post.
Thanks in advance.

How to position the CALayer in a video?

I have a UIView (size: W: 375 H: 667) with an image that can be placed anywhere inside it. Later this image will be overlaid with a video and saved. My problem is when I view the video the image is not found on the same position chosen in my UIView because my video is at a size of (720 x 1280). How can I reflect the position of the chosen image in my UIView inside a Video (720 x 1280)?
This is the code I'm using:
private func watermark(video videoAsset:AVAsset,modelView:MyViewModel, watermarkText text : String!, imageName name : String!, saveToLibrary flag : Bool, watermarkPosition position : QUWatermarkPosition, completion : ((_ status : AVAssetExportSession.Status?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack:AVAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
do {
try compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: videoAsset.duration), of: clipVideoTrack, at: CMTime.zero)
}
catch {
print(error.localizedDescription)
}
let videoSize = self.resolutionSizeForLocalVideo(asset: clipVideoTrack)
print("DIMENSIONE DEL VIDEO W: \(videoSize.width) H: \(videoSize.height)")
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(videoLayer)
//My layer image
let layerTest = CALayer()
layerTest.frame = modelView.frame
layerTest.contents = modelView.image.cgImage
print("A: \(modelView.frame.origin.y) - \(modelView.frame.origin.x)")
print("B: \(layerTest.frame.origin.y) - \(layerTest.frame.origin.x)")
parentLayer.addSublayer(layerTest)
print("PARENT: \(parentLayer.frame.origin.y) - \(parentLayer.frame.origin.x)")
//------------------------
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(value: 1, timescale: 30)
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: mixComposition.duration)
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack!, asset: videoAsset)
layerInstruction.setTransform((clipVideoTrack.preferredTransform), at: CMTime.zero)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mp4")
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSession.Status.completed {
let outputURL = exporter?.outputURL
if flag {
// Save to library
// let library = ALAssetsLibrary()
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSession.Status.completed, exporter, outputURL)
}
}
}
// if library.videoAtPathIs(compatibleWithSavedPhotosAlbum: outputURL) {
// library.writeVideoAtPathToSavedPhotosAlbum(outputURL,
// completionBlock: { (assetURL:NSURL!, error:NSError!) -> Void in
//
// completion!(AVAssetExportSessionStatus.Completed, exporter, outputURL)
// })
// }
} else {
completion!(AVAssetExportSession.Status.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)
}
}
}
}
}
private func videoCompositionInstructionForTrack(track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let scale : CGAffineTransform = CGAffineTransform(scaleX: 1, y:1)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scale), at: CMTime.zero)
return instruction
}
This is what I would like to get:
The answers to this question may be helpful. I faced a similar problem when attempting to position user-generated text over a video. This is what worked for me:
First, I added a helper method to convert a CGPoint from one rect to another:
func convertPoint(point: CGPoint, fromRect: CGRect, toRect: CGRect) -> CGPoint {
return CGPoint(x: (toRect.size.width / fromRect.size.width) * point.x, y: (toRect.size.height / fromRect.size.height) * point.y)
}
I positioned my text view (in your case, an image view) using its center point. Here's how you could calculate an adjusted center point using the helper method:
let adjustedCenter = convertPoint(point: imageView.center, fromRect: view.frame, toRect: CGRect(x: 0, y: 0, width: 720.0, height: 1280.0))
I had to do some extra positioning after that because the coordinate system for CALayers is flipped, so this is what the final point might look like:
let finalCenter = CGPoint(x: adjustedCenter.x, y: (1280.0 - adjustedCenter.y) - (imageView.bounds.height / 2.0))
Then you would set your CALayer's position property to that point.
layerTest.position = finalCenter
Hope that helps!

AVMutableVideoComposition output video shrinked

I'm a newbie to Swift. I'm trying to add a watermark with reference to code from SO. My original video resolution is 1280 X 720, but the output video is a shrunk version.
Here are the before and after pictures
Here is my function to create a watermark.
private func watermark(video videoAsset:AVAsset, watermarkText text : String!, image : CGImage!, saveToLibrary flag : Bool, completion : ((_ status : AVAssetExportSessionStatus?, _ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize
print("Video size", videoSize.height) //720
print("Video size", videoSize.width) //1280
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
videoLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
parentLayer.addSublayer(videoLayer)
if text != nil {
let titleLayer = CATextLayer()
titleLayer.backgroundColor = UIColor.red.cgColor
titleLayer.string = text
titleLayer.font = "Helvetica" as CFTypeRef
titleLayer.fontSize = 15
titleLayer.alignmentMode = kCAAlignmentCenter
titleLayer.bounds = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
parentLayer.addSublayer(titleLayer)
} else if image != nil {
let imageLayer = CALayer()
imageLayer.contents = image
let width: CGFloat = (self.imageView.image?.size.width)!
let height: CGFloat = (self.imageView.image?.size.height)!
print("Video size", height) //720
print("Video size", width) //1280
imageLayer.frame = CGRect(x: 0.0, y: 0.0, width: width, height: height)
imageLayer.opacity = 0.65
parentLayer.addSublayer(imageLayer)
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, Int32(clipVideoTrack.nominalFrameRate))
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
_ = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mov")
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = url
exporter?.outputFileType = AVFileTypeQuickTimeMovie
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = videoComp
exporter?.exportAsynchronously() {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter?.outputURL
if flag {
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) { saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter?.status, exporter, nil)
}
}
}
}
}
While the size of the watermark image is correct, the video is shrunk.
can you try this function
private func watermark(video videoAsset: AVAsset, watermarkText text : String!, image : CGImage!, saveToLibrary flag : Bool, completion : ((_ status : AVAssetExportSessionStatus ?, _ session: AVAssetExportSession ?, _ outputURL : URL ?) -> ())?) {
DispatchQueue.global(qos: DispatchQoS.QoSClass.default).async {
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let clipVideoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: clipVideoTrack, at: kCMTimeZero)
}
catch {
print(error.localizedDescription)
}
let videoSize = clipVideoTrack.naturalSize
let parentLayer = CALayer()
let videoLayer = CALayer()
parentLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
videoLayer.frame = CGRect(x: 0.0,
y: 0.0,
width: videoSize.width,
height: videoSize.height)
parentLayer.addSublayer(videoLayer)
// if text != nil {
// let titleLayer = CATextLayer()
// titleLayer.backgroundColor = UIColor.red.cgColor
// titleLayer.string = text
// titleLayer.font = "Helvetica" as CFTypeRef
// titleLayer.fontSize = 15
// titleLayer.alignmentMode = kCAAlignmentCenter
// titleLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
// parentLayer.addSublayer(titleLayer)
// } else
if image != nil {
let imageLayer = CALayer()
imageLayer.contents = image
let width: CGFloat = (self.imageView.image ?.size.width)!
let height: CGFloat = (self.imageView.image ?.size.height)!
//
print("Video size", height)
print("Video size", width)
imageLayer.frame = CGRect(x: 0, y: 0, width: width, height: height)
// imageLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
imageLayer.opacity = 1
parentLayer.addSublayer(imageLayer)
}
let videoComp = AVMutableVideoComposition()
videoComp.renderSize = videoSize
videoComp.frameDuration = CMTimeMake(1, Int32(clipVideoTrack.nominalFrameRate))
videoComp.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRangeMake(kCMTimeZero, mixComposition.duration)
let videotrack = mixComposition.tracks(withMediaType: AVMediaTypeVideo)[0] as AVAssetTrack
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videotrack)
// let layerInstruction = self.videoCompositionInstructionForTrack(track: compositionVideoTrack, asset: videoAsset)
instruction.layerInstructions = [layerInstruction]
videoComp.instructions = [instruction]
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: Date())
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("watermarkVideo-\(date).mp4")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return}
exporter.videoComposition = videoComp
exporter.outputFileType = AVFileTypeMPEG4
exporter.outputURL = url
exporter.exportAsynchronously() {
DispatchQueue.main.async {
if exporter.status == AVAssetExportSessionStatus.completed {
let outputURL = exporter.outputURL
if flag {
// Save to library
// let library = ALAssetsLibrary()
if UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(outputURL!.path) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: outputURL!)
}) {
saved, error in
if saved {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
}
}
// if library.videoAtPathIs(compatibleWithSavedPhotosAlbum: outputURL) {
// library.writeVideoAtPathToSavedPhotosAlbum(outputURL,
// completionBlock: { (assetURL:NSURL!, error:NSError!) -> Void in
//
// completion!(AVAssetExportSessionStatus.Completed, exporter, outputURL)
// })
// }
} else {
completion!(AVAssetExportSessionStatus.completed, exporter, outputURL)
}
} else {
// Error
completion!(exporter.status, exporter, nil)
}
}
}
}
}
The code above for creating the watermarked video seems not to be the reason for the smaller output resolution.
Problem
The resolution depends on what kind of AVAsset is put into the watermark method.
Example:
Frequently an UIImagePickerController is used. There is the delegate method
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any])
There one often can see something like this:
let url = info[UIImagePickerControllerMediaURL] as? URL
let videoAsset = AVAsset(url: url!)
self.watermark(video: videoAsset, watermarkText: nil, image: self.imageView.image?.cgImage ...
But with the lines above a downsized input image is used, e.g. instead of having a video with 1920x1080 one has a reduced video size of 1280x720.
Solution
A method for determining the AVAsset from the PHAsset could look like this:
private func videoAsset(for asset: PHAsset, completion: #escaping (AVAsset?) -> Void) {
let requestOptions = PHVideoRequestOptions()
requestOptions.version = .original
PHImageManager.default().requestAVAsset(forVideo: asset, options: requestOptions, resultHandler: {
(avAsset, avAudioMix, info) in
completion(avAsset)
})
}
And where to get the PHAsset from? It can also be determined in the didFinishPickingMediaWithInfo method by using UIImagePickerControllerPHAsset:
let asset = info[UIImagePickerControllerPHAsset] as? PHAsset
Quick Test
For a quick test one could use:
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
if let asset = info[UIImagePickerControllerPHAsset] as? PHAsset {
picker.dismiss(animated: true, completion: { [weak self] in
self?.videoAsset(for: asset, completion: { (avAsset) in
if let videoAsset = avAsset {
DispatchQueue.main.async {
self?.watermark(video: videoAsset, watermarkText: nil, image: self?.imageView.image?.cgImage, saveToLibrary: true) { (exportStat: AVAssetExportSessionStatus? , session: AVAssetExportSession?, url: URL?) in
print("url: \(String(describing: url?.debugDescription))")
}
}
}
})
})
}
}
The result is a video in the original resolution with a watermark on the lower left, see screenshot of resulting video:

Resources