Swift: Output file of AssetWriter not available to AVAsset on first creation - ios

I am creating a video out of an image and then need to access it right away. I am creating it by adding an imageBuffer of the image to an AVAssetWriter. The video is created here:
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let first = adaptor.append(buffer, withPresentationTime: startFrameTime)
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let second = adaptor.append(buffer, withPresentationTime: endFrameTime)
videoWriterInput.markAsFinished()
videoWriter.finishWriting {
completion(videoWriter.error)
}
Both first and second return true (successful appends) and there is no error on videoWriter. The video has successfully been created. I can successfully "retrieve" the asset upon its completion with
makeVideo(image, urlDestination) { error in
guard error == nil else { return }
let imageAsset = AVAsset(url: url)
guard
let imageTrack = self.composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid),
let imageVideoTrack = imageAsset.tracks(withMediaType: .video).first else {
assertionFailure()
return
}
try! imageTrack.insertTimeRange(
CMTimeRangeMake(start: .zero, duration: self.duration),
of: imageVideoTrack,
at: .zero
)
let imageVideoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: imageTrack)
}
However, the first time I try to access it I don't get any video (there is a video track with the right duration, but with no video of any sort being displayed). If I add it to a PreviewController I get nothing. However, if I dismiss the PreviewController, and access the asset a second time, then it is successful.
My first thought was that this is a potential timing issue, but even if I add a delay it fails the first time.
Any thoughts? Keep in mind that this code works for when the file at the url already exists; just not right after it is made.
Edit:
The above are the parts of code I think are pertinent to this question. Fuller code is as below:
private func filePath() -> URL {
let fileManager = FileManager.default
let urls = fileManager.urls(for: .documentDirectory, in: .userDomainMask)
guard let documentDirectory = urls.first else {
fatalError("documentDir Error")
}
return documentDirectory
}
class VideoComposer {
let composition = AVMutableComposition()
let mainInstruction = AVMutableVideoCompositionInstruction()
let duration: CMTime
let videoSize: CGSize
var viewSizeMultiplier: CGFloat = 5.0
init(view: UIView) {
videoSize = CGSize(width: 1772.0, height: 3840.0)
viewSizeMultiplier = 1772.0 / view.frame.width
self.duration = CMTime(seconds: 15, preferredTimescale: 600)
mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: self.duration)
view.subviews.reversed().forEach { subview in
if let imageView = subview as? UIImageView {
addImage(of: imageView)
}
else {
print("unhandled view type")
}
}
}
func createVideo(completion: #escaping (AVAssetExportSession) -> Void) {
// make video composition
let videoComposition = AVMutableVideoComposition()
videoComposition.instructions = [mainInstruction]
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 60)
videoComposition.renderSize = videoSize
export(videoComposition: videoComposition) { (session) in
completion(session)
}
}
private func export(videoComposition: AVMutableVideoComposition, completion: #escaping (AVAssetExportSession) -> Void) {
// export
let url = filePath().appendingPathComponent("output.mov")
let fileManager = FileManager.default
if fileManager.fileExists(atPath: url.path) {
try! fileManager.removeItem(at: url)
}
guard let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality) else {
assertionFailure()
return
}
exporter.videoComposition = videoComposition
exporter.outputFileType = .mov
exporter.outputURL = url
exporter.exportAsynchronously {
DispatchQueue.main.async {
completion(exporter)
}
}
}
private func addImage(of imageView: UIImageView) {
guard let image = imageView.image else {
assertionFailure("no image")
return
}
let movieLength = TimeInterval(duration.seconds)
let url = filePath().appendingPathComponent("image.mov")
ImageVideoCreator.writeSingleImageToMovie(image: image, movieLength: movieLength, outputFileURL: url) { [weak self] success in
guard let `self` = self else {
return
}
let imageAsset = AVAsset(url: url)
let keys = ["playable", "readable", "composable", "tracks", "exportable"]
var error: NSError? = nil
imageAsset.loadValuesAsynchronously(forKeys: keys, completionHandler: {
DispatchQueue.main.async {
keys.forEach({ key in
let status = imageAsset.statusOfValue(forKey: key, error: &error)
switch status {
case .loaded:
print("loaded. \(error)")
case .loading:
print("loading. \(error)")
case .failed:
print("failed. \(error)")
case .cancelled:
print("cancelled. \(error)")
case .unknown:
print("unknown. \(error)")
}
})
guard
let imageTrack = self.composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid),
let imageVideoTrack = imageAsset.tracks(withMediaType: .video).first
else {
assertionFailure()
return
}
try! imageTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: self.duration), of: imageVideoTrack, at: .zero)
let imageVideoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: imageTrack)
print("image")
self.setTransform(on: imageVideoLayerInstruction, of: imageView, andOf: imageVideoTrack)
self.mainInstruction.layerInstructions.append(imageVideoLayerInstruction)
}
})
}
}
}
class ViewController: UIViewController {
var composer: VideoComposer?
let player = AVPlayerViewController()
override func viewDidLoad() {
super.viewDidLoad()
guard let pathUrl = Bundle.main.url(forResource: "SampleVideo_1280x720_1mb", withExtension: "mp4") else {
assertionFailure()
return
}
let image = UIImage(named: "image")
let imageView = UIImageView(image: image)
view.addSubview(imageView)
imageView.translatesAutoresizingMaskIntoConstraints = false
imageView.topAnchor.constraint(equalTo: view.topAnchor, constant: 0).isActive = true
imageView.leadingAnchor.constraint(equalTo: view.leadingAnchor, constant: 0).isActive = true
imageView.widthAnchor.constraint(equalToConstant: image!.size.width / 4).isActive = true
imageView.heightAnchor.constraint(equalToConstant: image!.size.height / 4).isActive = true
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
composer = VideoComposer(view: view)
composer?.createVideo() { exporter in
self.didFinish(session: exporter)
}
}
func didFinish(session: AVAssetExportSession) {
guard let url = session.outputURL else {
assertionFailure()
return
}
self.showVideo(videoUrl: url)
}
func showVideo(videoUrl: URL) {
let videoPlayer = AVPlayer(url: videoUrl)
player.player = videoPlayer
self.present(player, animated: true) {
self.player.player?.play()
}
}
}
class ImageVideoCreator {
private static func pixelBuffer(fromImage image: CGImage, size: CGSize) -> CVPixelBuffer? {
let options: CFDictionary = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true] as CFDictionary
var pxbuffer: CVPixelBuffer? = nil
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(size.width), Int(size.height), kCVPixelFormatType_32ARGB, options, &pxbuffer)
guard let buffer = pxbuffer, status == kCVReturnSuccess else {
return nil
}
CVPixelBufferLockBaseAddress(buffer, [])
guard let pxdata = CVPixelBufferGetBaseAddress(buffer) else {
return nil
}
let bytesPerRow = CVPixelBufferGetBytesPerRow(buffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: pxdata, width: Int(size.width), height: Int(size.height), bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) else {
return nil
}
context.concatenate(CGAffineTransform(rotationAngle: 0))
context.draw(image, in: CGRect(x: 0, y: 0, width: size.width, height: size.height))
CVPixelBufferUnlockBaseAddress(buffer, [])
return buffer
}
static func writeSingleImageToMovie(image: UIImage, movieLength: TimeInterval, outputFileURL: URL, completion: #escaping (Bool) -> ()) {
let fileManager = FileManager.default
if fileManager.fileExists(atPath: outputFileURL.path) {
try! fileManager.removeItem(at: outputFileURL)
}
do {
let imageSize = image.size
let videoWriter = try AVAssetWriter(outputURL: outputFileURL, fileType: AVFileType.mov)
let videoSettings: [String: Any] = [AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: imageSize.width,
AVVideoHeightKey: imageSize.height]
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
let adaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: nil)
if !videoWriter.canAdd(videoWriterInput) {
completion(false)
return
}
videoWriterInput.expectsMediaDataInRealTime = true
videoWriter.add(videoWriterInput)
videoWriter.startWriting()
let timeScale: Int32 = 600 // recommended in CMTime for movies.
let startFrameTime = CMTimeMake(value: 0, timescale: 600)
let endFrameTime = CMTimeMakeWithSeconds(movieLength, preferredTimescale: timeScale)
videoWriter.startSession(atSourceTime: startFrameTime)
guard let cgImage = image.cgImage else {
completion(false)
return
}
let buffer: CVPixelBuffer = self.pixelBuffer(fromImage: cgImage, size: imageSize)!
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let first = adaptor.append(buffer, withPresentationTime: startFrameTime)
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let second = adaptor.append(buffer, withPresentationTime: endFrameTime)
videoWriterInput.markAsFinished()
videoWriter.finishWriting {
completion(true)
}
} catch {
completion(false)
}
}
}

Related

Swift - Add watermark to a video is very slow

Here is my code that adds image & text overlays to a local video. The problem is that it's extremely SLOW. Any ideas how to fix it?
Also I would appreciate if you can suggest 3rd party libraries that can do watermarking.
public func addWatermark(
fromVideoAt videoURL: URL,
watermark: Watermark,
fileName: String,
onSuccess: #escaping (URL) -> Void,
onFailure: #escaping ((Error?) -> Void)
) {
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid
),
let assetTrack = asset.tracks(withMediaType: .video).first
else {
onFailure(nil)
return
}
do {
let timeRange = CMTimeRange(start: .zero, duration: assetTrack.timeRange.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(
withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid
) {
try compositionAudioTrack.insertTimeRange(
timeRange,
of: audioAssetTrack,
at: .zero
)
}
} catch {
onFailure(error)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoInfo = orientation(from: assetTrack.preferredTransform)
let videoSize: CGSize
if videoInfo.isPortrait {
videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width
)
} else {
videoSize = assetTrack.naturalSize
}
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let imageFrame = watermark.calculateImageFrame(parentSize: videoSize)
addImage(watermark.image, to: overlayLayer, frame: imageFrame)
let textOrigin = CGPoint(x: imageFrame.minX + 4, y: imageFrame.minY)
if let text = watermark.text {
addText(
text,
to: overlayLayer,
origin: textOrigin,
textAttributes: Watermark.textAttributes(type: watermark.type)
)
}
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 60)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer
)
videoComposition.colorPrimaries = AVVideoColorPrimaries_ITU_R_709_2
videoComposition.colorTransferFunction = "sRGB"
videoComposition.colorYCbCrMatrix = nil
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack
)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality
)
else {
onFailure(nil)
return
}
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(fileName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
onSuccess(exportURL)
default:
onFailure(export.error)
}
}
}
}
Watermark is the wrapper struct. It contains image/text, text attributes, size and other similar helpful information.
I've tried without any luck:
export.shouldOptimizeForNetworkUse = true. It did not work.
AVAssetExportPresetPassthrough instead of AVAssetExportPresetHighestQuality. It removed overlays.
I have the following code which is relatively fast. It watermarks an 8 second video in about 2.56 seconds. When I ran it under Metal System Trace Instrument it seemed to be balanced and using GPU-acceleration the whole time. You just call exportIt()
As a side matter, this code uses async await wrapping of AVKit functions and migrates off any deprecated interfaces as of iOS 16.
A tidied up and working sample app with resource files is https://github.com/faisalmemon/watermark
The core code is as follows:
//
// WatermarkHelper.swift
// watermark
//
// Created by Faisal Memon on 09/02/2023.
//
import Foundation
import AVKit
struct WatermarkHelper {
enum WatermarkError: Error {
case cannotLoadResources
case cannotAddTrack
case cannotLoadVideoTrack(Error?)
case cannotCopyOriginalAudioVideo(Error?)
case noVideoTrackPresent
case exportSessionCannotBeCreated
}
func compositionAddMediaTrack(_ composition: AVMutableComposition, withMediaType mediaType: AVMediaType) throws -> AVMutableCompositionTrack {
guard let compositionTrack = composition.addMutableTrack(
withMediaType: mediaType,
preferredTrackID: kCMPersistentTrackID_Invalid) else {
throw WatermarkError.cannotAddTrack
}
return compositionTrack
}
func loadTrack(inputVideo: AVAsset, withMediaType mediaType: AVMediaType) async throws -> AVAssetTrack? {
return try await withCheckedThrowingContinuation({
(continuation: CheckedContinuation<AVAssetTrack?, Error>) in
inputVideo.loadTracks(withMediaType: mediaType) { tracks, error in
if let tracks = tracks {
continuation.resume(returning: tracks.first)
} else {
continuation.resume(throwing: WatermarkError.cannotLoadVideoTrack(error))
}
}
})
}
func bringOverVideoAndAudio(inputVideo: AVAsset, assetTrack: AVAssetTrack, compositionTrack: AVMutableCompositionTrack, composition: AVMutableComposition) async throws {
do {
let timeRange = await CMTimeRange(start: .zero, duration: try inputVideo.load(.duration))
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = try await loadTrack(inputVideo: inputVideo, withMediaType: .audio) {
let compositionAudioTrack = try compositionAddMediaTrack(composition, withMediaType: .audio)
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
} catch {
print(error)
throw WatermarkError.cannotCopyOriginalAudioVideo(error)
}
}
private func orientation(from transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
func preferredTransformAndSize(compositionTrack: AVMutableCompositionTrack, assetTrack: AVAssetTrack) async throws -> (preferredTransform: CGAffineTransform, videoSize: CGSize) {
let transform = try await assetTrack.load(.preferredTransform)
let videoInfo = orientation(from: transform)
let videoSize: CGSize
let naturalSize = try await assetTrack.load(.naturalSize)
if videoInfo.isPortrait {
videoSize = CGSize(
width: naturalSize.height,
height: naturalSize.width)
} else {
videoSize = naturalSize
}
return (transform, videoSize)
}
private func compositionLayerInstruction(for track: AVCompositionTrack, assetTrack: AVAssetTrack, preferredTransform: CGAffineTransform) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
instruction.setTransform(preferredTransform, at: .zero)
return instruction
}
private func addImage(to layer: CALayer, watermark: UIImage, videoSize: CGSize) {
let imageLayer = CALayer()
let aspect: CGFloat = watermark.size.width / watermark.size.height
let width = videoSize.width
let height = width / aspect
imageLayer.frame = CGRect(
x: 0,
y: -height * 0.15,
width: width,
height: height)
imageLayer.contents = watermark.cgImage
layer.addSublayer(imageLayer)
}
func composeVideo(composition: AVMutableComposition, videoComposition: AVMutableVideoComposition, compositionTrack: AVMutableCompositionTrack, assetTrack: AVAssetTrack, preferredTransform: CGAffineTransform) {
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(
start: .zero,
duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack, preferredTransform: preferredTransform)
instruction.layerInstructions = [layerInstruction]
}
func exportSession(composition: AVMutableComposition, videoComposition: AVMutableVideoComposition, outputURL: URL) throws -> AVAssetExportSession {
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality)
else {
print("Cannot create export session.")
throw WatermarkError.exportSessionCannotBeCreated
}
export.videoComposition = videoComposition
export.outputFileType = .mp4
export.outputURL = outputURL
return export
}
func executeSession(_ session: AVAssetExportSession) async throws -> AVAssetExportSession.Status {
return try await withCheckedThrowingContinuation({
(continuation: CheckedContinuation<AVAssetExportSession.Status, Error>) in
session.exportAsynchronously {
DispatchQueue.main.async {
if let error = session.error {
continuation.resume(throwing: error)
} else {
continuation.resume(returning: session.status)
}
}
}
})
}
func addWatermarkTopDriver(inputVideo: AVAsset, outputURL: URL, watermark: UIImage) async throws -> AVAssetExportSession.Status {
let composition = AVMutableComposition()
let compositionTrack = try compositionAddMediaTrack(composition, withMediaType: .video)
guard let videoAssetTrack = try await loadTrack(inputVideo: inputVideo, withMediaType: .video) else {
throw WatermarkError.noVideoTrackPresent
}
try await bringOverVideoAndAudio(inputVideo: inputVideo, assetTrack: videoAssetTrack, compositionTrack: compositionTrack, composition: composition)
let transformAndSize = try await preferredTransformAndSize(compositionTrack: compositionTrack, assetTrack: videoAssetTrack)
compositionTrack.preferredTransform = transformAndSize.preferredTransform
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
addImage(to: overlayLayer, watermark: watermark, videoSize: transformAndSize.videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = transformAndSize.videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
composeVideo(composition: composition, videoComposition: videoComposition, compositionTrack: compositionTrack, assetTrack: videoAssetTrack, preferredTransform: transformAndSize.preferredTransform)
let session = try exportSession(composition: composition, videoComposition: videoComposition, outputURL: outputURL)
return try await executeSession(session)
}
/// Creates a watermarked movie and saves it to the documents directory.
///
/// For an 8 second video (251 frames), this code takes 2.56 seconds on iPhone 11 producing a high quality video at 30 FPS.
/// - Returns: Time interval taken for processing.
public func exportIt() async throws -> TimeInterval {
let timeStart = Date()
guard
let filePath = Bundle.main.path(forResource: "donut-spinning", ofType: "mp4"),
let docUrl = try? FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: true),
let watermarkImage = UIImage(systemName: "seal") else {
throw WatermarkError.cannotLoadResources
}
let videoAsset = AVAsset(url: URL(filePath: filePath))
let outputURL = docUrl.appending(component: "watermark-donut-spinning.mp4")
try? FileManager.default.removeItem(at: outputURL)
print(outputURL)
let result = try await addWatermarkTopDriver(inputVideo: videoAsset, outputURL: outputURL, watermark: watermarkImage)
let timeEnd = Date()
let duration = timeEnd.timeIntervalSince(timeStart)
print(result)
return duration
}
}
Use this below method for super fast watermark adding to video
func addWatermark(inputURL: URL, outputURL: URL, handler:#escaping (_ exportSession: AVAssetExportSession?)-> Void) {
let mixComposition = AVMutableComposition()
let asset = AVAsset(url: inputURL)
let videoTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let timerange = CMTimeRangeMake(start: CMTime.zero, duration: asset.duration)
let compositionVideoTrack:AVMutableCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: CMTime.zero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
let watermarkFilter = CIFilter(name: "CISourceOverCompositing")!
let watermarkImage = CIImage(image: UIImage(named: "waterMark")!)
let videoComposition = AVVideoComposition(asset: asset) { (filteringRequest) in
let source = filteringRequest.sourceImage.clampedToExtent()
watermarkFilter.setValue(source, forKey: "inputBackgroundImage")
let transform = CGAffineTransform(translationX: filteringRequest.sourceImage.extent.width - (watermarkImage?.extent.width)! - 2, y: 0)
watermarkFilter.setValue(watermarkImage?.transformed(by: transform), forKey: "inputImage")
filteringRequest.finish(with: watermarkFilter.outputImage!, context: nil)
}
guard let exportSession = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset640x480) else {
handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = AVFileType.mp4
exportSession.shouldOptimizeForNetworkUse = true
exportSession.videoComposition = videoComposition
exportSession.exportAsynchronously { () -> Void in
handler(exportSession)
}
}
Call this method when you want to add watermark easily
let outputURL = NSURL.fileURL(withPath: "TempPath")
let inputURL = NSURL.fileURL(withPath: "VideoWithWatermarkPath")
addWatermark(inputURL: inputURL, outputURL: outputURL, handler: { (exportSession) in
guard let session = exportSession else {
// Error
return
}
switch session.status {
case .completed:
guard NSData(contentsOf: outputURL) != nil else {
// Error
return
}
// Now you can find the video with the watermark in the location outputURL
default:
// Error
}
})
Have you checked out Apple's documentation? It adds a title layer (CALayer) on top of an existing AVMutableComposition or an AVAsset? Since it's a legacy doc from iOS 6, you'll need to refactor a bit, but it should be fast on today's tech.

How to sync AVPlayer and MTKView

I have a project where users can take a video and later add filters to them or change basic settings like brightness and contrast. To accomplish this, I use BBMetalImage, which basically returns the video in a MTKView (named a BBMetalView in the project).
Everything works great - I can play the video, add filters and the desired effects, but there is no audio. I asked the author about this, who recommended using an AVPlayer (or AVAudioPlayer) for this. So I did. However, the video and audio are out of sync. Possibly because of different bitrates in the first place, and the author of the library also mentioned the frame rate can differ because of the filter process (the time this consumes is variable):
The render view FPS is not exactly the same to the actual rate.
Because the video source output frame is processed by filters and the
filter process time is variable.
First, I crop my video to the desired aspect ratio (4:5). I save this file (480x600) locally, using AVVideoProfileLevelH264HighAutoLevel as AVVideoProfileLevelKey. My audio configuration, using NextLevelSessionExporter, has the following setup: AVEncoderBitRateKey: 128000, AVNumberOfChannelsKey: 2, AVSampleRateKey: 44100.
Then, the BBMetalImage library takes this saved audio file and provides a MTKView (BBMetalView) to display the video, allowing me to add filters and effects in real time. The setup kind of looks like this:
self.metalView = BBMetalView(frame: CGRect(x: 0, y: self.view.center.y - ((UIScreen.main.bounds.width * 1.25) / 2), width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.width * 1.25))
self.view.addSubview(self.metalView)
self.videoSource = BBMetalVideoSource(url: outputURL)
self.videoSource.playWithVideoRate = true
self.videoSource.audioConsumer = self.metalAudio
self.videoSource.add(consumer: self.metalView)
self.videoSource.add(consumer: self.videoWriter)
self.audioItem = AVPlayerItem(url: outputURL)
self.audioPlayer = AVPlayer(playerItem: self.audioItem)
self.playerLayer = AVPlayerLayer(player: self.audioPlayer)
self.videoPreview.layer.addSublayer(self.playerLayer!)
self.playerLayer?.frame = CGRect(x: 0, y: 0, width: 0, height: 0)
self.playerLayer?.backgroundColor = UIColor.black.cgColor
self.startVideo()
And startVideo() goes like this:
audioPlayer.seek(to: .zero)
audioPlayer.play()
videoSource.start(progress: { (frameTime) in
print(frameTime)
}) { [weak self] (finish) in
guard let self = self else { return }
self.startVideo()
}
This is all probably pretty vague because of the external library/libraries. However, my question is pretty simple: is there any way I can sync the MTKView with my AVPlayer? It would help me a lot and I'm sure Silence-GitHub would also implement this feature into the library to help a lot of other users. Any ideas on how to approach this are welcome!
I custom the BBMetalVideoSource as follow then it worked:
Create a delegate in BBMetalVideoSource to get the current time of the audio player with which we want to sync
In func private func processAsset(progress:, completion:), I replace this block of code if useVideoRate { //... } by:
if useVideoRate {
if let playerTime = delegate.getAudioPlayerCurrentTime() {
let diff = CMTimeGetSeconds(sampleFrameTime) - playerTime
if diff > 0.0 {
sleepTime = diff
if sleepTime > 1.0 {
sleepTime = 0.0
}
usleep(UInt32(1000000 * sleepTime))
} else {
sleepTime = 0
}
}
}
This code help us resolve both problems: 1. No audio when preview video effect, and 2. Sync audio with video.
Due to your circumstances, you seem to need to try 1 of 2 things:
1) Try and apply some sort of overlay that has the desired effect for your video. I could attempt something like this, but I have personally not done this.
2) This takes a little more time beforehand - in the sense that the program would have to take a few moments (depending on your filtering, time varies), to recreate a new video with the desired effects. You can try this out and see if it works for you.
I have made my own VideoCreator using some sourcecode from SO from somewhere.
//Recreates a new video with applied filter
public static func createFilteredVideo(asset: AVAsset, completionHandler: #escaping (_ asset: AVAsset) -> Void) {
let url = (asset as? AVURLAsset)!.url
let snapshot = url.videoSnapshot()
guard let image = snapshot else { return }
let fps = Int32(asset.tracks(withMediaType: .video)[0].nominalFrameRate)
let writer = VideoCreator(fps: Int32(fps), width: image.size.width, height: image.size.height, audioSettings: nil)
let timeScale = asset.duration.timescale
let timeValue = asset.duration.value
let frameTime = 1/Double(fps) * Double(timeScale)
let numberOfImages = Int(Double(timeValue)/Double(frameTime))
let queue = DispatchQueue(label: "com.queue.queue", qos: .utility)
let composition = AVVideoComposition(asset: asset) { (request) in
let source = request.sourceImage.clampedToExtent()
//This is where you create your filter and get your filtered result.
//Here is an example
let filter = CIFilter(name: "CIBlendWithMask")
filter!.setValue(maskImage, forKey: "inputMaskImage")
filter!.setValue(regCIImage, forKey: "inputImage")
let filteredImage = filter!.outputImage.clamped(to: source.extent)
request.finish(with: filteredImage, context: nil)
}
var i = 0
getAudioFromURL(url: url) { (buffer) in
writer.addAudio(audio: buffer, time: .zero)
i == 0 ? writer.startCreatingVideo(initialBuffer: buffer, completion: {}) : nil
i += 1
}
let group = DispatchGroup()
for i in 0..<numberOfImages {
group.enter()
autoreleasepool {
let time = CMTime(seconds: Double(Double(i) * frameTime / Double(timeScale)), preferredTimescale: timeScale)
let image = url.videoSnapshot(time: time, composition: composition)
queue.async {
writer.addImageAndAudio(image: image!, audio: nil, time: time.seconds)
group.leave()
}
}
}
group.notify(queue: queue) {
writer.finishWriting()
let url = writer.getURL()
//Now create exporter to add audio then do completion handler
completionHandler(AVAsset(url: url))
}
}
static func getAudioFromURL(url: URL, completionHandlerPerBuffer: #escaping ((_ buffer:CMSampleBuffer) -> Void)) {
let asset = AVURLAsset(url: url, options: [AVURLAssetPreferPreciseDurationAndTimingKey: NSNumber(value: true as Bool)])
guard let assetTrack = asset.tracks(withMediaType: AVMediaType.audio).first else {
fatalError("Couldn't load AVAssetTrack")
}
guard let reader = try? AVAssetReader(asset: asset)
else {
fatalError("Couldn't initialize the AVAssetReader")
}
reader.timeRange = CMTimeRange(start: .zero, duration: asset.duration)
let outputSettingsDict: [String : Any] = [
AVFormatIDKey: Int(kAudioFormatLinearPCM),
AVLinearPCMBitDepthKey: 16,
AVLinearPCMIsBigEndianKey: false,
AVLinearPCMIsFloatKey: false,
AVLinearPCMIsNonInterleaved: false
]
let readerOutput = AVAssetReaderTrackOutput(track: assetTrack,
outputSettings: outputSettingsDict)
readerOutput.alwaysCopiesSampleData = false
reader.add(readerOutput)
while reader.status == .reading {
guard let readSampleBuffer = readerOutput.copyNextSampleBuffer() else { break }
completionHandlerPerBuffer(readSampleBuffer)
}
}
extension URL {
func videoSnapshot(time:CMTime? = nil, composition:AVVideoComposition? = nil) -> UIImage? {
let asset = AVURLAsset(url: self)
let generator = AVAssetImageGenerator(asset: asset)
generator.appliesPreferredTrackTransform = true
generator.requestedTimeToleranceBefore = .zero
generator.requestedTimeToleranceAfter = .zero
generator.videoComposition = composition
let timestamp = time == nil ? CMTime(seconds: 1, preferredTimescale: 60) : time
do {
let imageRef = try generator.copyCGImage(at: timestamp!, actualTime: nil)
return UIImage(cgImage: imageRef)
}
catch let error as NSError
{
print("Image generation failed with error \(error)")
return nil
}
}
}
Below is the VideoCreator
//
// VideoCreator.swift
// AKPickerView-Swift
//
// Created by Impression7vx on 7/16/19.
//
import UIKit
import AVFoundation
import UIKit
import Photos
#available(iOS 11.0, *)
public class VideoCreator: NSObject {
private var settings:RenderSettings!
private var imageAnimator:ImageAnimator!
public override init() {
self.settings = RenderSettings()
self.imageAnimator = ImageAnimator(renderSettings: self.settings)
}
public convenience init(fps: Int32, width: CGFloat, height: CGFloat, audioSettings: [String:Any]?) {
self.init()
self.settings = RenderSettings(fps: fps, width: width, height: height)
self.imageAnimator = ImageAnimator(renderSettings: self.settings, audioSettings: audioSettings)
}
public convenience init(width: CGFloat, height: CGFloat) {
self.init()
self.settings = RenderSettings(width: width, height: height)
self.imageAnimator = ImageAnimator(renderSettings: self.settings)
}
func startCreatingVideo(initialBuffer: CMSampleBuffer?, completion: #escaping (() -> Void)) {
self.imageAnimator.render(initialBuffer: initialBuffer) {
completion()
}
}
func finishWriting() {
self.imageAnimator.isDone = true
}
func addImageAndAudio(image:UIImage, audio:CMSampleBuffer?, time:CFAbsoluteTime) {
self.imageAnimator.addImageAndAudio(image: image, audio: audio, time: time)
}
func getURL() -> URL {
return settings!.outputURL
}
func addAudio(audio: CMSampleBuffer, time: CMTime) {
self.imageAnimator.videoWriter.addAudio(buffer: audio, time: time)
}
}
#available(iOS 11.0, *)
public struct RenderSettings {
var width: CGFloat = 1280
var height: CGFloat = 720
var fps: Int32 = 2 // 2 frames per second
var avCodecKey = AVVideoCodecType.h264
var videoFilename = "video"
var videoFilenameExt = "mov"
init() { }
init(width: CGFloat, height: CGFloat) {
self.width = width
self.height = height
}
init(fps: Int32) {
self.fps = fps
}
init(fps: Int32, width: CGFloat, height: CGFloat) {
self.fps = fps
self.width = width
self.height = height
}
var size: CGSize {
return CGSize(width: width, height: height)
}
var outputURL: URL {
// Use the CachesDirectory so the rendered video file sticks around as long as we need it to.
// Using the CachesDirectory ensures the file won't be included in a backup of the app.
let fileManager = FileManager.default
if let tmpDirURL = try? fileManager.url(for: .cachesDirectory, in: .userDomainMask, appropriateFor: nil, create: true) {
return tmpDirURL.appendingPathComponent(videoFilename).appendingPathExtension(videoFilenameExt)
}
fatalError("URLForDirectory() failed")
}
}
#available(iOS 11.0, *)
public class ImageAnimator {
// Apple suggests a timescale of 600 because it's a multiple of standard video rates 24, 25, 30, 60 fps etc.
static let kTimescale: Int32 = 600
let settings: RenderSettings
let videoWriter: VideoWriter
var imagesAndAudio:SynchronizedArray<(UIImage, CMSampleBuffer?, CFAbsoluteTime)> = SynchronizedArray<(UIImage, CMSampleBuffer?, CFAbsoluteTime)>()
var isDone:Bool = false
let semaphore = DispatchSemaphore(value: 1)
var frameNum = 0
class func removeFileAtURL(fileURL: URL) {
do {
try FileManager.default.removeItem(atPath: fileURL.path)
}
catch _ as NSError {
// Assume file doesn't exist.
}
}
init(renderSettings: RenderSettings, audioSettings:[String:Any]? = nil) {
settings = renderSettings
videoWriter = VideoWriter(renderSettings: settings, audioSettings: audioSettings)
}
func addImageAndAudio(image: UIImage, audio: CMSampleBuffer?, time:CFAbsoluteTime) {
self.imagesAndAudio.append((image, audio, time))
// print("Adding to array -- \(self.imagesAndAudio.count)")
}
func render(initialBuffer: CMSampleBuffer?, completion: #escaping ()->Void) {
// The VideoWriter will fail if a file exists at the URL, so clear it out first.
ImageAnimator.removeFileAtURL(fileURL: settings.outputURL)
videoWriter.start(initialBuffer: initialBuffer)
videoWriter.render(appendPixelBuffers: appendPixelBuffers) {
//ImageAnimator.saveToLibrary(self.settings.outputURL)
completion()
}
}
// This is the callback function for VideoWriter.render()
func appendPixelBuffers(writer: VideoWriter) -> Bool {
//Don't stop while images are NOT empty
while !imagesAndAudio.isEmpty || !isDone {
if(!imagesAndAudio.isEmpty) {
let date = Date()
if writer.isReadyForVideoData == false {
// Inform writer we have more buffers to write.
// print("Writer is not ready for more data")
return false
}
autoreleasepool {
//This should help but truly doesn't suffice - still need a mutex/lock
if(!imagesAndAudio.isEmpty) {
semaphore.wait() // requesting resource
let imageAndAudio = imagesAndAudio.first()!
let image = imageAndAudio.0
// let audio = imageAndAudio.1
let time = imageAndAudio.2
self.imagesAndAudio.removeAtIndex(index: 0)
semaphore.signal() // releasing resource
let presentationTime = CMTime(seconds: time, preferredTimescale: 600)
// if(audio != nil) { videoWriter.addAudio(buffer: audio!) }
let success = videoWriter.addImage(image: image, withPresentationTime: presentationTime)
if success == false {
fatalError("addImage() failed")
}
else {
// print("Added image # frame \(frameNum) with presTime: \(presentationTime)")
}
frameNum += 1
let final = Date()
let timeDiff = final.timeIntervalSince(date)
// print("Time: \(timeDiff)")
}
else {
// print("Images was empty")
}
}
}
}
print("Done writing")
// Inform writer all buffers have been written.
return true
}
}
#available(iOS 11.0, *)
public class VideoWriter {
let renderSettings: RenderSettings
var audioSettings: [String:Any]?
var videoWriter: AVAssetWriter!
var videoWriterInput: AVAssetWriterInput!
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor!
var audioWriterInput: AVAssetWriterInput!
static var ci:Int = 0
var initialTime:CMTime!
var isReadyForVideoData: Bool {
return (videoWriterInput == nil ? false : videoWriterInput!.isReadyForMoreMediaData )
}
var isReadyForAudioData: Bool {
return (audioWriterInput == nil ? false : audioWriterInput!.isReadyForMoreMediaData)
}
class func pixelBufferFromImage(image: UIImage, pixelBufferPool: CVPixelBufferPool, size: CGSize, alpha:CGImageAlphaInfo) -> CVPixelBuffer? {
var pixelBufferOut: CVPixelBuffer?
let status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBufferOut)
if status != kCVReturnSuccess {
fatalError("CVPixelBufferPoolCreatePixelBuffer() failed")
}
let pixelBuffer = pixelBufferOut!
CVPixelBufferLockBaseAddress(pixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(size.width), height: Int(size.height),
bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: rgbColorSpace, bitmapInfo: alpha.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: size.width, height: size.height))
let horizontalRatio = size.width / image.size.width
let verticalRatio = size.height / image.size.height
//aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: image.size.width * aspectRatio, height: image.size.height * aspectRatio)
let x = newSize.width < size.width ? (size.width - newSize.width) / 2 : 0
let y = newSize.height < size.height ? (size.height - newSize.height) / 2 : 0
let cgImage = image.cgImage != nil ? image.cgImage! : image.ciImage!.convertCIImageToCGImage()
context!.draw(cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
return pixelBuffer
}
#available(iOS 11.0, *)
init(renderSettings: RenderSettings, audioSettings:[String:Any]? = nil) {
self.renderSettings = renderSettings
self.audioSettings = audioSettings
}
func start(initialBuffer: CMSampleBuffer?) {
let avOutputSettings: [String: AnyObject] = [
AVVideoCodecKey: renderSettings.avCodecKey as AnyObject,
AVVideoWidthKey: NSNumber(value: Float(renderSettings.width)),
AVVideoHeightKey: NSNumber(value: Float(renderSettings.height))
]
let avAudioSettings = audioSettings
func createPixelBufferAdaptor() {
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(renderSettings.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(renderSettings.height))
]
pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
}
func createAssetWriter(outputURL: URL) -> AVAssetWriter {
guard let assetWriter = try? AVAssetWriter(outputURL: outputURL, fileType: AVFileType.mov) else {
fatalError("AVAssetWriter() failed")
}
guard assetWriter.canApply(outputSettings: avOutputSettings, forMediaType: AVMediaType.video) else {
fatalError("canApplyOutputSettings() failed")
}
return assetWriter
}
videoWriter = createAssetWriter(outputURL: renderSettings.outputURL)
videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: avOutputSettings)
// if(audioSettings != nil) {
audioWriterInput = AVAssetWriterInput(mediaType: .audio, outputSettings: nil)
audioWriterInput.expectsMediaDataInRealTime = true
// }
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
else {
fatalError("canAddInput() returned false")
}
// if(audioSettings != nil) {
if videoWriter.canAdd(audioWriterInput) {
videoWriter.add(audioWriterInput)
}
else {
fatalError("canAddInput() returned false")
}
// }
// The pixel buffer adaptor must be created before we start writing.
createPixelBufferAdaptor()
if videoWriter.startWriting() == false {
fatalError("startWriting() failed")
}
self.initialTime = initialBuffer != nil ? CMSampleBufferGetPresentationTimeStamp(initialBuffer!) : CMTime.zero
videoWriter.startSession(atSourceTime: self.initialTime)
precondition(pixelBufferAdaptor.pixelBufferPool != nil, "nil pixelBufferPool")
}
func render(appendPixelBuffers: #escaping (VideoWriter)->Bool, completion: #escaping ()->Void) {
precondition(videoWriter != nil, "Call start() to initialze the writer")
let queue = DispatchQueue(__label: "mediaInputQueue", attr: nil)
videoWriterInput.requestMediaDataWhenReady(on: queue) {
let isFinished = appendPixelBuffers(self)
if isFinished {
self.videoWriterInput.markAsFinished()
self.videoWriter.finishWriting() {
DispatchQueue.main.async {
print("Done Creating Video")
completion()
}
}
}
else {
// Fall through. The closure will be called again when the writer is ready.
}
}
}
func addAudio(buffer: CMSampleBuffer, time: CMTime) {
if(isReadyForAudioData) {
print("Writing audio \(VideoWriter.ci) of a time of \(CMSampleBufferGetPresentationTimeStamp(buffer))")
let duration = CMSampleBufferGetDuration(buffer)
let offsetBuffer = CMSampleBuffer.createSampleBuffer(fromSampleBuffer: buffer, withTimeOffset: time, duration: duration)
if(offsetBuffer != nil) {
print("Added audio")
self.audioWriterInput.append(offsetBuffer!)
}
else {
print("Not adding audio")
}
}
VideoWriter.ci += 1
}
func addImage(image: UIImage, withPresentationTime presentationTime: CMTime) -> Bool {
precondition(pixelBufferAdaptor != nil, "Call start() to initialze the writer")
//1
let pixelBuffer = VideoWriter.pixelBufferFromImage(image: image, pixelBufferPool: pixelBufferAdaptor.pixelBufferPool!, size: renderSettings.size, alpha: CGImageAlphaInfo.premultipliedFirst)!
return pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime + self.initialTime)
}
}
I was looking a little further into this - and while I could have updated my answer, I'd rather open this tangent in a new area to separate these ideas. Apple states that we can use an AVVideoComposition to "To use the created video composition for playback, create an AVPlayerItem object from the same asset used as the composition’s source, then assign the composition to the player item’s videoComposition property. To export the composition to a new movie file, create an AVAssetExportSession object from the same source asset, then assign the composition to the export session’s videoComposition property.".
https://developer.apple.com/documentation/avfoundation/avasynchronousciimagefilteringrequest
So, what you COULD try is using the AVPlayer for the ORIGINAL URL. Then try applying your filter.
let filter = CIFilter(name: "CIGaussianBlur")!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source = request.sourceImage.imageByClampingToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
// Vary filter parameters based on video timing
let seconds = CMTimeGetSeconds(request.compositionTime)
filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output = filter.outputImage!.imageByCroppingToRect(request.sourceImage.extent)
// Provide the filter output to the composition
request.finishWithImage(output, context: nil)
})
let asset = AVAsset(url: originalURL)
let item = AVPlayerItem(asset: asset)
item.videoComposition = composition
let player = AVPlayer(playerItem: item)
I'm sure you know what to do from here. This may allow you to do a "Real-time" of your filtering. What I could see as a potential issue is that this runs into the same issues as your original thing, whereas it still takes a set time to run each frame and leading to a delay between audio and video. However, this may not happen. If you do get this working, once the user selects their filter, you can use AVAssetExportSession to export the specific videoComposition.
More here if you need help!

How to add image watermarker on video in iOS Swift 4?

I have bee trying to add image overlay on video and save video to device library with video overlay.
let myURL = "https://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4"
let url = URL(string:myURL)
player = AVPlayer(url: url!)
avpController.player = player
avpController.player?.play()
avpController.showsPlaybackControls = false
avpController.view.frame.size.height = videoView.frame.size.height
avpController.view.frame.size.width = videoView.frame.size.width
self.videoView.addSubview(avpController.view)
How to add image overlay on video? any help much appreciated pls..
I have created this
import UIKit
import AVFoundation
import AVKit
class ViewController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
shareClicked()
}
func shareClicked() {
let url = URL(string:"https://clips.vorwaerts-gmbh.de/big_buck_bunny.mp4")!
DispatchQueue.global(qos: .background).async {
if let urlData = try? Data(contentsOf: url) {
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true).first!
let fileUrl = URL(fileURLWithPath: documentsPath).appendingPathComponent("fffffff.mp4")
if FileManager.default.fileExists(atPath:fileUrl.path) {
try? FileManager.default.removeItem(at:fileUrl)
print("removed")
}
try? urlData.write(to: fileUrl)
self.merge(video: fileUrl.path, withForegroundImage:UIImage(named: "images.png")!, completion: { (uuu) in
DispatchQueue.main.async {
self.play(uuu!)
}
})
}
}
}
func play(_ url : URL) {
DispatchQueue.main.async {
let vc = AVPlayerViewController()
vc.player = AVPlayer(url: url)
vc.player?.externalPlaybackVideoGravity = AVLayerVideoGravity.resizeAspect
self.present(vc, animated: true, completion: nil)
}
}
private func addAudioTrack(composition: AVMutableComposition, videoUrl: URL) {
let videoUrlAsset = AVURLAsset(url: videoUrl, options: nil)
let audioTracks = videoUrlAsset.tracks(withMediaType: AVMediaType.audio)
let compositionAudioTrack:AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: CMPersistentTrackID())!
for audioTrack in audioTracks {
try! compositionAudioTrack.insertTimeRange(audioTrack.timeRange, of: audioTrack, at: CMTime.zero)
}
}
func merge(
video videoPath: String,
withForegroundImage foregroundImage: UIImage,
completion: #escaping (URL?) -> Void) -> () {
let videoUrl = URL(fileURLWithPath: videoPath)
let videoUrlAsset = AVURLAsset(url: videoUrl, options: nil)
// Setup `mutableComposition` from the existing video
let mutableComposition = AVMutableComposition()
let videoAssetTrack = videoUrlAsset.tracks(withMediaType: AVMediaType.video).first!
let videoCompositionTrack = mutableComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
videoCompositionTrack?.preferredTransform = videoAssetTrack.preferredTransform
try! videoCompositionTrack?.insertTimeRange(CMTimeRange(start:CMTime.zero, duration:videoAssetTrack.timeRange.duration), of: videoAssetTrack, at: CMTime.zero)
addAudioTrack(composition: mutableComposition, videoUrl: videoUrl)
let videoSize: CGSize = (videoCompositionTrack?.naturalSize)!
let frame = CGRect(x: 0.0, y: 0.0, width: videoSize.width, height: videoSize.height)
let imageLayer = CALayer()
imageLayer.contents = foregroundImage.cgImage
imageLayer.frame = CGRect(x: 0.0, y: 0.0, width:50, height:50)
let videoLayer = CALayer()
videoLayer.frame = frame
let animationLayer = CALayer()
animationLayer.frame = frame
animationLayer.addSublayer(videoLayer)
animationLayer.addSublayer(imageLayer)
let videoComposition = AVMutableVideoComposition(propertiesOf: (videoCompositionTrack?.asset!)!)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: animationLayer)
let documentDirectory = NSSearchPathForDirectoriesInDomains(FileManager.SearchPathDirectory.cachesDirectory, FileManager.SearchPathDomainMask.userDomainMask, true).first!
let documentDirectoryUrl = URL(fileURLWithPath: documentDirectory)
let destinationFilePath = documentDirectoryUrl.appendingPathComponent("result.mp4")
do {
if FileManager.default.fileExists(atPath: destinationFilePath.path) {
try FileManager.default.removeItem(at: destinationFilePath)
print("removed")
}
} catch {
print(error)
}
let exportSession = AVAssetExportSession( asset: mutableComposition, presetName: AVAssetExportPresetHighestQuality)!
exportSession.videoComposition = videoComposition
exportSession.outputURL = destinationFilePath
exportSession.outputFileType = AVFileType.mp4
exportSession.exportAsynchronously { [weak exportSession] in
if let strongExportSession = exportSession {
completion(strongExportSession.outputURL!)
//self.play(strongExportSession.outputURL!)
}
}
}
}
And you will get this result

compressing Video Error: Terminated due to memory issue

I want to first trimming video that choose from photoLibrary, and then compress video file for custom size and bitrate. I'm using PryntTrimmerView for Trimming video, and then use trimmed video for compress video file.
there is my code for trimming and compressing video file.
I successfully export trimming asset, and then get compressed file successfully. when I choose short video from gallery there is no problem, but when choose video big size after compressing I have this error in console:
Message from debugger: Terminated due to memory issue
there is my code for trimming and compressing video file.
func prepareAssetComposition() throws {
topActivity.isHidden = false
topActivity.startAnimating()
confirmButton.isUserInteractionEnabled = false
//get asset and track
guard let asset = trimmerView.asset, let videoTrack = asset.tracks(withMediaType: AVMediaTypeVideo).first else {
return
}
let assetComposition = AVMutableComposition()
let start = trimmerView.startTime?.seconds
let end = trimmerView.endTime?.seconds
let startTime = CMTime(seconds: Double(start ?? 0), preferredTimescale: 1000)
let endTime = CMTime(seconds: Double(end ?? 0), preferredTimescale: 1000)
let trackTimeRange = CMTimeRange(start: startTime, end: endTime)
let videoCompositionTrack = assetComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
try videoCompositionTrack.insertTimeRange(trackTimeRange, of: videoTrack, at: kCMTimeZero)
if let audioTrack = asset.tracks(withMediaType: AVMediaTypeAudio).first {
let audioCompositionTrack = assetComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
try audioCompositionTrack.insertTimeRange(trackTimeRange, of: audioTrack, at: kCMTimeZero)
}
//set video oriention to portrati
let size = videoTrack.naturalSize
let txf = videoTrack.preferredTransform
var recordType = ""
if (size.width == txf.tx && size.height == txf.ty){
recordType = "UIInterfaceOrientationLandscapeRight"
}else if (txf.tx == 0 && txf.ty == 0){
recordType = "UIInterfaceOrientationLandscapeLeft"
}else if (txf.tx == 0 && txf.ty == size.width){
recordType = "UIInterfaceOrientationPortraitUpsideDown"
}else{
recordType = "UIInterfaceOrientationPortrait"
}
if recordType == "UIInterfaceOrientationPortrait" {
let t1: CGAffineTransform = CGAffineTransform(translationX: videoTrack.naturalSize.height, y: -(videoTrack.naturalSize.width - videoTrack.naturalSize.height)/2)
let t2: CGAffineTransform = t1.rotated(by: CGFloat(Double.pi / 2))
let finalTransform: CGAffineTransform = t2
videoCompositionTrack.preferredTransform = finalTransform
}else if recordType == "UIInterfaceOrientationLandscapeRight" {
let t1: CGAffineTransform = CGAffineTransform(translationX: videoTrack.naturalSize.height, y: -(videoTrack.naturalSize.width - videoTrack.naturalSize.height)/2)
let t2: CGAffineTransform = t1.rotated(by: -CGFloat(Double.pi))
let finalTransform: CGAffineTransform = t2
videoCompositionTrack.preferredTransform = finalTransform
}else if recordType == "UIInterfaceOrientationPortraitUpsideDown" {
let t1: CGAffineTransform = CGAffineTransform(translationX: videoTrack.naturalSize.height, y: -(videoTrack.naturalSize.width - videoTrack.naturalSize.height)/2)
let t2: CGAffineTransform = t1.rotated(by: -CGFloat(Double.pi/2))
let finalTransform: CGAffineTransform = t2
videoCompositionTrack.preferredTransform = finalTransform
}
//start exporting video
var name = ""
var url: URL!
if self.state == .Left {
url = URL(fileURLWithPath: "\(NSTemporaryDirectory())TrimmedMovie1.mp4")
name = "TrimmedMovie1.mp4"
}else if state == .Right {
url = URL(fileURLWithPath: "\(NSTemporaryDirectory())TrimmedMovie3.mp4")
name = "TrimmedMovie3.mp4"
}else if state == .Center {
url = URL(fileURLWithPath: "\(NSTemporaryDirectory())TrimmedMovie2.mp4")
name = "TrimmedMovie2.mp4"
}
try? FileManager.default.removeItem(at: url)
let exportSession = AVAssetExportSession(asset: assetComposition, presetName: AVAssetExportPresetHighestQuality)
if UIDevice.current.userInterfaceIdiom == .phone {
exportSession?.outputFileType = AVFileTypeQuickTimeMovie
}else {
exportSession?.outputFileType = AVFileTypeQuickTimeMovie
}
exportSession?.shouldOptimizeForNetworkUse = true
exportSession?.outputURL = url
exportSession?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
if let url = exportSession?.outputURL, exportSession?.status == .completed {
let asset = AVAsset(url: url)
print(asset.duration)
var thump: UIImage?
var vData: Data?
if let img = asset.videoThumbnail {
thump = img
if recordType == "UIInterfaceOrientationPortrait" {
if thump != nil {
let img = UIImage(cgImage: thump!.cgImage!, scale: CGFloat(1.0), orientation: .right)
thump = img
thump = thump?.fixedOrientation()
}
}else if recordType == "UIInterfaceOrientationLandscapeRight" {
if thump != nil {
let img = UIImage(cgImage: thump!.cgImage!, scale: CGFloat(1.0), orientation: .down)
thump = img
thump = thump?.fixedOrientation()
}
}else if recordType == "UIInterfaceOrientationPortraitUpsideDown" {
if thump != nil {
let img = UIImage(cgImage: thump!.cgImage!, scale: CGFloat(1.0), orientation: .left)
thump = img
thump = thump?.fixedOrientation()
}
}
}
if let videoData = NSData(contentsOf: url) {
vData = videoData as Data
}
if let delegate = self.delegate {
self.playbackTimeCheckerTimer?.invalidate()
self.playButton.setImage(#imageLiteral(resourceName: "play"), for: .normal)
self.playbackTimeCheckerTimer = nil
let size = CGSize(width: 1280, height: 720)
if let videoData = NSData(contentsOf: url) {
vData = videoData as Data
}
let directoryURL: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let folderPath: URL = directoryURL.appendingPathComponent(name, isDirectory: true)
do {
try vData?.write(to: folderPath, options: [])
}
catch {
print(error.localizedDescription)
}
self.compress(fileName:name,videoPath: folderPath.path, exportVideoPath: folderPath.path, renderSize: size, completion: {res in
if res {
OperationQueue.main.addOperation {
self.topActivity.isHidden = true
self.topActivity.stopAnimating()
self.confirmButton.isUserInteractionEnabled = true
delegate.setVideoFromPath(path: folderPath.path, thump: thump, videoData: vData)
self.dismiss(animated: true, completion: nil)
return
}
}else {
print("can not compress")
}
})
}
} else {
self.topActivity.isHidden = true
self.topActivity.stopAnimating()
self.confirmButton.isUserInteractionEnabled = true
let error = exportSession?.error
print("error exporting video \(String(describing: error))")
}
}
})
}
private func existsFileAtUrl(url:String,name:String) -> Bool {
let path = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] as String
let url = URL(fileURLWithPath: path)
let filePath = url.appendingPathComponent(name).path
let fileManager = FileManager.default
if fileManager.fileExists(atPath: filePath) {
return true
} else {
return false
}
}
//MARK: Compress
func compress(fileName:String,videoPath : String, exportVideoPath : String, renderSize : CGSize, completion : #escaping (Bool) -> ()) {
let videoUrl = URL(fileURLWithPath: videoPath)
if (!existsFileAtUrl(url: videoUrl.absoluteString,name:fileName)) {
completion(false)
return
}
let videoAssetUrl = AVURLAsset(url: videoUrl)
let videoTrackArray = videoAssetUrl.tracks(withMediaType: AVMediaTypeVideo)
if videoTrackArray.count < 1 {
completion(false)
return
}
let videoAssetTrack = videoTrackArray[0]
let audioTrackArray = videoAssetUrl.tracks(withMediaType: AVMediaTypeAudio)
if audioTrackArray.count < 1 {
completion(false)
return
}
let audioAssetTrack = audioTrackArray[0]
let outputUrl = URL(fileURLWithPath: exportVideoPath)
var videoWriter = try? AVAssetWriter(url: outputUrl, fileType: AVFileTypeQuickTimeMovie)
videoWriter?.shouldOptimizeForNetworkUse = true
let vSetting = videoSettings(size: renderSize)
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: vSetting)
videoWriterInput.expectsMediaDataInRealTime = false
videoWriterInput.transform = videoAssetTrack.preferredTransform
videoWriter?.add(videoWriterInput)
// output readers
let videoReaderSettings : [String : Int] = [kCVPixelBufferPixelFormatTypeKey as String : Int(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange)]
let videoReaderOutput = AVAssetReaderTrackOutput(track: videoAssetTrack, outputSettings: videoReaderSettings)
let videoReader = try! AVAssetReader(asset: videoAssetUrl)
videoReader.add(videoReaderOutput)
videoWriter?.startWriting()
videoReader.startReading()
videoWriter?.startSession(atSourceTime: kCMTimeZero)
let processingVideoQueue = DispatchQueue(label: "processingVideoCompressionQueue")
videoWriterInput.requestMediaDataWhenReady(on: processingVideoQueue, using: {
while(videoWriterInput.isReadyForMoreMediaData){
let sampleVideoBuffer = videoReaderOutput.copyNextSampleBuffer()
if (videoReader.status == .reading && sampleVideoBuffer != nil) {
videoWriterInput.append(sampleVideoBuffer!)
}else {
videoWriterInput.markAsFinished()
if (videoReader.status == .completed) {
videoWriter?.finishWriting(completionHandler: {
videoWriter = nil
completion(true)
})
}
}
}
})
}
//MARK: Setting
func videoSettings(size : CGSize) -> [String : AnyObject] {
var compressionSettings = [String : AnyObject]()
compressionSettings[AVVideoAverageBitRateKey] = 5 as AnyObject
var settings = [String : AnyObject]()
settings[AVVideoCompressionPropertiesKey] = compressionSettings as AnyObject
settings[AVVideoCodecKey] = AVVideoCodecH264 as AnyObject?
settings[AVVideoHeightKey] = size.height as AnyObject?
settings[AVVideoWidthKey] = size.width as AnyObject?
return settings
}
I found issue, the problem is while statement. when I dismiss view controller this statement repeatedly call and I get this error. now when I want to dismiss view controller stop while loop with break and everything is working fine.

iOS swift memory issue during a while-loop inside a closure: AVAssetWriter

My app is for making a video file from many images that are produced by code.
When my code has finished making an image and put it in myImage, it toggles isImageReady to 'true'. And when self.i is set(or changed),by Property Observer,it starts making another image. and finally the self.iReset is set to 'true' when there's no more image to be produced.
but the app is terminated due to memory issue during the while-loop. I have commented out the if-statement that actually assembles video frames. and it still has a memory issue. so I think the poblem lives during the while-loop inside requestMediaDataWhenReadyOnQueue:usingBlock closure.
I have no idea how to solve the problem. please help me.
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 30
let frameDuration = CMTimeMake(1, fps)
var lastFrameTime:CMTime = CMTime()
var presentationTime:CMTime = CMTime()
while (self.iReset != true) {
if videoWriterInput.readyForMoreMediaData && self.isImageReady {
lastFrameTime = CMTimeMake(Int64(self.i), fps)
presentationTime = self.i == 1 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
//commented out for tracking
/* if !self.appendPixelBufferForImage(self.myImage, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(
domain: kErrorDomain,
code: kFailedToAppendPixelBufferError,
userInfo: [
"description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
"rawError": videoWriter.error ?? "(none)"])
break
} */
self.isImageReady = false
self.i++
}// if ..&&..
} //while loop ends
// Finish writing
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
print("Finished Making a Movie !!")
success(videoOutputURL)
}
self.videoWriter = nil
}
}) // requestMediaDataWhenReadyOnQueue ends
}
Probably too late but I had a similar issue to this (images in a loop) which caused me a major headache. My solution was to put an autoreleasepool within the loop which solved the issue.
As per #C. Carter has suggested you can use autoreleasepool to free the used memory like below:
autoreleasepool {
/* your code */
}
Other than that here is a code from which I am making movie using UIImages and Audio which works perfectly fine.
func build(chosenPhotos: [UIImage], audioURL: NSURL, completion: (NSURL) -> ()) {
showLoadingIndicator(appDel.window!.rootViewController!.view)
let outputSize = CGSizeMake(640, 480)
var choosenPhotos = chosenPhotos
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("OutputVideo.mp4")
print("Video Output URL \(videoOutputURL)")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeMPEG4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(outputSize.width)), AVVideoHeightKey : NSNumber(float: Float(outputSize.height))]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!choosenPhotos.isEmpty) {
if (videoWriterInput.readyForMoreMediaData) {
let nextPhoto = choosenPhotos.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(outputSize.width), Int(outputSize.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextClearRect(context, CGRectMake(0, 0, CGFloat(outputSize.width), CGFloat(outputSize.height)))
let horizontalRatio = CGFloat(outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(outputSize.height) / nextPhoto.size.height
//aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize:CGSize = CGSizeMake(nextPhoto.size.width * aspectRatio, nextPhoto.size.height * aspectRatio)
let x = newSize.width < outputSize.width ? (outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < outputSize.height ? (outputSize.height - newSize.height) / 2 : 0
CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), nextPhoto.CGImage)
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
print("FINISHED!!!!!")
self.compileToMakeMovie(videoOutputURL, audioOutPutURL: audioURL, completion: { url in
completion(url)
})
}
})
}
}
func compileToMakeMovie(videoOutputURL: NSURL, audioOutPutURL: NSURL, completion: (NSURL) -> ()){
let mixComposition = AVMutableComposition()
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let actualVideoURl = documentDirectory.URLByAppendingPathComponent("OutputVideoMusic.mp4")
print("Video Output URL \(actualVideoURl)")
if NSFileManager.defaultManager().fileExistsAtPath(actualVideoURl.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(actualVideoURl.path!)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
let nextClipStartTime = kCMTimeZero
let videoAsset = AVURLAsset(URL: videoOutputURL)
let video_timeRange = CMTimeRangeMake(kCMTimeZero,videoAsset.duration)
let a_compositionVideoTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
try! a_compositionVideoTrack.insertTimeRange(video_timeRange, ofTrack: videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0], atTime: nextClipStartTime)
let audioAsset = AVURLAsset(URL: audioOutPutURL)
let audio_timeRange = CMTimeRangeMake(kCMTimeZero,audioAsset.duration)
let b_compositionAudioTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try b_compositionAudioTrack.insertTimeRange(audio_timeRange, ofTrack: audioAsset.tracksWithMediaType(AVMediaTypeAudio)[0], atTime: nextClipStartTime)
}catch _ {}
let assetExport = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
assetExport?.outputFileType = "com.apple.quicktime-movie"
assetExport?.outputURL = actualVideoURl
assetExport?.exportAsynchronouslyWithCompletionHandler({
completion(actualVideoURl)
})
}
Let me know, if you're still facing the issue.

Resources