compressing Video Error: Terminated due to memory issue - ios

I want to first trimming video that choose from photoLibrary, and then compress video file for custom size and bitrate. I'm using PryntTrimmerView for Trimming video, and then use trimmed video for compress video file.
there is my code for trimming and compressing video file.
I successfully export trimming asset, and then get compressed file successfully. when I choose short video from gallery there is no problem, but when choose video big size after compressing I have this error in console:
Message from debugger: Terminated due to memory issue
there is my code for trimming and compressing video file.
func prepareAssetComposition() throws {
topActivity.isHidden = false
topActivity.startAnimating()
confirmButton.isUserInteractionEnabled = false
//get asset and track
guard let asset = trimmerView.asset, let videoTrack = asset.tracks(withMediaType: AVMediaTypeVideo).first else {
return
}
let assetComposition = AVMutableComposition()
let start = trimmerView.startTime?.seconds
let end = trimmerView.endTime?.seconds
let startTime = CMTime(seconds: Double(start ?? 0), preferredTimescale: 1000)
let endTime = CMTime(seconds: Double(end ?? 0), preferredTimescale: 1000)
let trackTimeRange = CMTimeRange(start: startTime, end: endTime)
let videoCompositionTrack = assetComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
try videoCompositionTrack.insertTimeRange(trackTimeRange, of: videoTrack, at: kCMTimeZero)
if let audioTrack = asset.tracks(withMediaType: AVMediaTypeAudio).first {
let audioCompositionTrack = assetComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
try audioCompositionTrack.insertTimeRange(trackTimeRange, of: audioTrack, at: kCMTimeZero)
}
//set video oriention to portrati
let size = videoTrack.naturalSize
let txf = videoTrack.preferredTransform
var recordType = ""
if (size.width == txf.tx && size.height == txf.ty){
recordType = "UIInterfaceOrientationLandscapeRight"
}else if (txf.tx == 0 && txf.ty == 0){
recordType = "UIInterfaceOrientationLandscapeLeft"
}else if (txf.tx == 0 && txf.ty == size.width){
recordType = "UIInterfaceOrientationPortraitUpsideDown"
}else{
recordType = "UIInterfaceOrientationPortrait"
}
if recordType == "UIInterfaceOrientationPortrait" {
let t1: CGAffineTransform = CGAffineTransform(translationX: videoTrack.naturalSize.height, y: -(videoTrack.naturalSize.width - videoTrack.naturalSize.height)/2)
let t2: CGAffineTransform = t1.rotated(by: CGFloat(Double.pi / 2))
let finalTransform: CGAffineTransform = t2
videoCompositionTrack.preferredTransform = finalTransform
}else if recordType == "UIInterfaceOrientationLandscapeRight" {
let t1: CGAffineTransform = CGAffineTransform(translationX: videoTrack.naturalSize.height, y: -(videoTrack.naturalSize.width - videoTrack.naturalSize.height)/2)
let t2: CGAffineTransform = t1.rotated(by: -CGFloat(Double.pi))
let finalTransform: CGAffineTransform = t2
videoCompositionTrack.preferredTransform = finalTransform
}else if recordType == "UIInterfaceOrientationPortraitUpsideDown" {
let t1: CGAffineTransform = CGAffineTransform(translationX: videoTrack.naturalSize.height, y: -(videoTrack.naturalSize.width - videoTrack.naturalSize.height)/2)
let t2: CGAffineTransform = t1.rotated(by: -CGFloat(Double.pi/2))
let finalTransform: CGAffineTransform = t2
videoCompositionTrack.preferredTransform = finalTransform
}
//start exporting video
var name = ""
var url: URL!
if self.state == .Left {
url = URL(fileURLWithPath: "\(NSTemporaryDirectory())TrimmedMovie1.mp4")
name = "TrimmedMovie1.mp4"
}else if state == .Right {
url = URL(fileURLWithPath: "\(NSTemporaryDirectory())TrimmedMovie3.mp4")
name = "TrimmedMovie3.mp4"
}else if state == .Center {
url = URL(fileURLWithPath: "\(NSTemporaryDirectory())TrimmedMovie2.mp4")
name = "TrimmedMovie2.mp4"
}
try? FileManager.default.removeItem(at: url)
let exportSession = AVAssetExportSession(asset: assetComposition, presetName: AVAssetExportPresetHighestQuality)
if UIDevice.current.userInterfaceIdiom == .phone {
exportSession?.outputFileType = AVFileTypeQuickTimeMovie
}else {
exportSession?.outputFileType = AVFileTypeQuickTimeMovie
}
exportSession?.shouldOptimizeForNetworkUse = true
exportSession?.outputURL = url
exportSession?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
if let url = exportSession?.outputURL, exportSession?.status == .completed {
let asset = AVAsset(url: url)
print(asset.duration)
var thump: UIImage?
var vData: Data?
if let img = asset.videoThumbnail {
thump = img
if recordType == "UIInterfaceOrientationPortrait" {
if thump != nil {
let img = UIImage(cgImage: thump!.cgImage!, scale: CGFloat(1.0), orientation: .right)
thump = img
thump = thump?.fixedOrientation()
}
}else if recordType == "UIInterfaceOrientationLandscapeRight" {
if thump != nil {
let img = UIImage(cgImage: thump!.cgImage!, scale: CGFloat(1.0), orientation: .down)
thump = img
thump = thump?.fixedOrientation()
}
}else if recordType == "UIInterfaceOrientationPortraitUpsideDown" {
if thump != nil {
let img = UIImage(cgImage: thump!.cgImage!, scale: CGFloat(1.0), orientation: .left)
thump = img
thump = thump?.fixedOrientation()
}
}
}
if let videoData = NSData(contentsOf: url) {
vData = videoData as Data
}
if let delegate = self.delegate {
self.playbackTimeCheckerTimer?.invalidate()
self.playButton.setImage(#imageLiteral(resourceName: "play"), for: .normal)
self.playbackTimeCheckerTimer = nil
let size = CGSize(width: 1280, height: 720)
if let videoData = NSData(contentsOf: url) {
vData = videoData as Data
}
let directoryURL: URL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let folderPath: URL = directoryURL.appendingPathComponent(name, isDirectory: true)
do {
try vData?.write(to: folderPath, options: [])
}
catch {
print(error.localizedDescription)
}
self.compress(fileName:name,videoPath: folderPath.path, exportVideoPath: folderPath.path, renderSize: size, completion: {res in
if res {
OperationQueue.main.addOperation {
self.topActivity.isHidden = true
self.topActivity.stopAnimating()
self.confirmButton.isUserInteractionEnabled = true
delegate.setVideoFromPath(path: folderPath.path, thump: thump, videoData: vData)
self.dismiss(animated: true, completion: nil)
return
}
}else {
print("can not compress")
}
})
}
} else {
self.topActivity.isHidden = true
self.topActivity.stopAnimating()
self.confirmButton.isUserInteractionEnabled = true
let error = exportSession?.error
print("error exporting video \(String(describing: error))")
}
}
})
}
private func existsFileAtUrl(url:String,name:String) -> Bool {
let path = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] as String
let url = URL(fileURLWithPath: path)
let filePath = url.appendingPathComponent(name).path
let fileManager = FileManager.default
if fileManager.fileExists(atPath: filePath) {
return true
} else {
return false
}
}
//MARK: Compress
func compress(fileName:String,videoPath : String, exportVideoPath : String, renderSize : CGSize, completion : #escaping (Bool) -> ()) {
let videoUrl = URL(fileURLWithPath: videoPath)
if (!existsFileAtUrl(url: videoUrl.absoluteString,name:fileName)) {
completion(false)
return
}
let videoAssetUrl = AVURLAsset(url: videoUrl)
let videoTrackArray = videoAssetUrl.tracks(withMediaType: AVMediaTypeVideo)
if videoTrackArray.count < 1 {
completion(false)
return
}
let videoAssetTrack = videoTrackArray[0]
let audioTrackArray = videoAssetUrl.tracks(withMediaType: AVMediaTypeAudio)
if audioTrackArray.count < 1 {
completion(false)
return
}
let audioAssetTrack = audioTrackArray[0]
let outputUrl = URL(fileURLWithPath: exportVideoPath)
var videoWriter = try? AVAssetWriter(url: outputUrl, fileType: AVFileTypeQuickTimeMovie)
videoWriter?.shouldOptimizeForNetworkUse = true
let vSetting = videoSettings(size: renderSize)
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: vSetting)
videoWriterInput.expectsMediaDataInRealTime = false
videoWriterInput.transform = videoAssetTrack.preferredTransform
videoWriter?.add(videoWriterInput)
// output readers
let videoReaderSettings : [String : Int] = [kCVPixelBufferPixelFormatTypeKey as String : Int(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange)]
let videoReaderOutput = AVAssetReaderTrackOutput(track: videoAssetTrack, outputSettings: videoReaderSettings)
let videoReader = try! AVAssetReader(asset: videoAssetUrl)
videoReader.add(videoReaderOutput)
videoWriter?.startWriting()
videoReader.startReading()
videoWriter?.startSession(atSourceTime: kCMTimeZero)
let processingVideoQueue = DispatchQueue(label: "processingVideoCompressionQueue")
videoWriterInput.requestMediaDataWhenReady(on: processingVideoQueue, using: {
while(videoWriterInput.isReadyForMoreMediaData){
let sampleVideoBuffer = videoReaderOutput.copyNextSampleBuffer()
if (videoReader.status == .reading && sampleVideoBuffer != nil) {
videoWriterInput.append(sampleVideoBuffer!)
}else {
videoWriterInput.markAsFinished()
if (videoReader.status == .completed) {
videoWriter?.finishWriting(completionHandler: {
videoWriter = nil
completion(true)
})
}
}
}
})
}
//MARK: Setting
func videoSettings(size : CGSize) -> [String : AnyObject] {
var compressionSettings = [String : AnyObject]()
compressionSettings[AVVideoAverageBitRateKey] = 5 as AnyObject
var settings = [String : AnyObject]()
settings[AVVideoCompressionPropertiesKey] = compressionSettings as AnyObject
settings[AVVideoCodecKey] = AVVideoCodecH264 as AnyObject?
settings[AVVideoHeightKey] = size.height as AnyObject?
settings[AVVideoWidthKey] = size.width as AnyObject?
return settings
}

I found issue, the problem is while statement. when I dismiss view controller this statement repeatedly call and I get this error. now when I want to dismiss view controller stop while loop with break and everything is working fine.

Related

Swift - Add watermark to a video is very slow

Here is my code that adds image & text overlays to a local video. The problem is that it's extremely SLOW. Any ideas how to fix it?
Also I would appreciate if you can suggest 3rd party libraries that can do watermarking.
public func addWatermark(
fromVideoAt videoURL: URL,
watermark: Watermark,
fileName: String,
onSuccess: #escaping (URL) -> Void,
onFailure: #escaping ((Error?) -> Void)
) {
let asset = AVURLAsset(url: videoURL)
let composition = AVMutableComposition()
guard
let compositionTrack = composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid
),
let assetTrack = asset.tracks(withMediaType: .video).first
else {
onFailure(nil)
return
}
do {
let timeRange = CMTimeRange(start: .zero, duration: assetTrack.timeRange.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = asset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(
withMediaType: .audio,
preferredTrackID: kCMPersistentTrackID_Invalid
) {
try compositionAudioTrack.insertTimeRange(
timeRange,
of: audioAssetTrack,
at: .zero
)
}
} catch {
onFailure(error)
return
}
compositionTrack.preferredTransform = assetTrack.preferredTransform
let videoInfo = orientation(from: assetTrack.preferredTransform)
let videoSize: CGSize
if videoInfo.isPortrait {
videoSize = CGSize(
width: assetTrack.naturalSize.height,
height: assetTrack.naturalSize.width
)
} else {
videoSize = assetTrack.naturalSize
}
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: videoSize)
videoLayer.frame = CGRect(x: 0, y: 0, width: videoSize.width, height: videoSize.height)
let imageFrame = watermark.calculateImageFrame(parentSize: videoSize)
addImage(watermark.image, to: overlayLayer, frame: imageFrame)
let textOrigin = CGPoint(x: imageFrame.minX + 4, y: imageFrame.minY)
if let text = watermark.text {
addText(
text,
to: overlayLayer,
origin: textOrigin,
textAttributes: Watermark.textAttributes(type: watermark.type)
)
}
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 60)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer
)
videoComposition.colorPrimaries = AVVideoColorPrimaries_ITU_R_709_2
videoComposition.colorTransferFunction = "sRGB"
videoComposition.colorYCbCrMatrix = nil
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack
)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality
)
else {
onFailure(nil)
return
}
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(fileName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously {
DispatchQueue.main.async {
switch export.status {
case .completed:
onSuccess(exportURL)
default:
onFailure(export.error)
}
}
}
}
Watermark is the wrapper struct. It contains image/text, text attributes, size and other similar helpful information.
I've tried without any luck:
export.shouldOptimizeForNetworkUse = true. It did not work.
AVAssetExportPresetPassthrough instead of AVAssetExportPresetHighestQuality. It removed overlays.
I have the following code which is relatively fast. It watermarks an 8 second video in about 2.56 seconds. When I ran it under Metal System Trace Instrument it seemed to be balanced and using GPU-acceleration the whole time. You just call exportIt()
As a side matter, this code uses async await wrapping of AVKit functions and migrates off any deprecated interfaces as of iOS 16.
A tidied up and working sample app with resource files is https://github.com/faisalmemon/watermark
The core code is as follows:
//
// WatermarkHelper.swift
// watermark
//
// Created by Faisal Memon on 09/02/2023.
//
import Foundation
import AVKit
struct WatermarkHelper {
enum WatermarkError: Error {
case cannotLoadResources
case cannotAddTrack
case cannotLoadVideoTrack(Error?)
case cannotCopyOriginalAudioVideo(Error?)
case noVideoTrackPresent
case exportSessionCannotBeCreated
}
func compositionAddMediaTrack(_ composition: AVMutableComposition, withMediaType mediaType: AVMediaType) throws -> AVMutableCompositionTrack {
guard let compositionTrack = composition.addMutableTrack(
withMediaType: mediaType,
preferredTrackID: kCMPersistentTrackID_Invalid) else {
throw WatermarkError.cannotAddTrack
}
return compositionTrack
}
func loadTrack(inputVideo: AVAsset, withMediaType mediaType: AVMediaType) async throws -> AVAssetTrack? {
return try await withCheckedThrowingContinuation({
(continuation: CheckedContinuation<AVAssetTrack?, Error>) in
inputVideo.loadTracks(withMediaType: mediaType) { tracks, error in
if let tracks = tracks {
continuation.resume(returning: tracks.first)
} else {
continuation.resume(throwing: WatermarkError.cannotLoadVideoTrack(error))
}
}
})
}
func bringOverVideoAndAudio(inputVideo: AVAsset, assetTrack: AVAssetTrack, compositionTrack: AVMutableCompositionTrack, composition: AVMutableComposition) async throws {
do {
let timeRange = await CMTimeRange(start: .zero, duration: try inputVideo.load(.duration))
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
if let audioAssetTrack = try await loadTrack(inputVideo: inputVideo, withMediaType: .audio) {
let compositionAudioTrack = try compositionAddMediaTrack(composition, withMediaType: .audio)
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
} catch {
print(error)
throw WatermarkError.cannotCopyOriginalAudioVideo(error)
}
}
private func orientation(from transform: CGAffineTransform) -> (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
func preferredTransformAndSize(compositionTrack: AVMutableCompositionTrack, assetTrack: AVAssetTrack) async throws -> (preferredTransform: CGAffineTransform, videoSize: CGSize) {
let transform = try await assetTrack.load(.preferredTransform)
let videoInfo = orientation(from: transform)
let videoSize: CGSize
let naturalSize = try await assetTrack.load(.naturalSize)
if videoInfo.isPortrait {
videoSize = CGSize(
width: naturalSize.height,
height: naturalSize.width)
} else {
videoSize = naturalSize
}
return (transform, videoSize)
}
private func compositionLayerInstruction(for track: AVCompositionTrack, assetTrack: AVAssetTrack, preferredTransform: CGAffineTransform) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
instruction.setTransform(preferredTransform, at: .zero)
return instruction
}
private func addImage(to layer: CALayer, watermark: UIImage, videoSize: CGSize) {
let imageLayer = CALayer()
let aspect: CGFloat = watermark.size.width / watermark.size.height
let width = videoSize.width
let height = width / aspect
imageLayer.frame = CGRect(
x: 0,
y: -height * 0.15,
width: width,
height: height)
imageLayer.contents = watermark.cgImage
layer.addSublayer(imageLayer)
}
func composeVideo(composition: AVMutableComposition, videoComposition: AVMutableVideoComposition, compositionTrack: AVMutableCompositionTrack, assetTrack: AVAssetTrack, preferredTransform: CGAffineTransform) {
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(
start: .zero,
duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(
for: compositionTrack,
assetTrack: assetTrack, preferredTransform: preferredTransform)
instruction.layerInstructions = [layerInstruction]
}
func exportSession(composition: AVMutableComposition, videoComposition: AVMutableVideoComposition, outputURL: URL) throws -> AVAssetExportSession {
guard let export = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality)
else {
print("Cannot create export session.")
throw WatermarkError.exportSessionCannotBeCreated
}
export.videoComposition = videoComposition
export.outputFileType = .mp4
export.outputURL = outputURL
return export
}
func executeSession(_ session: AVAssetExportSession) async throws -> AVAssetExportSession.Status {
return try await withCheckedThrowingContinuation({
(continuation: CheckedContinuation<AVAssetExportSession.Status, Error>) in
session.exportAsynchronously {
DispatchQueue.main.async {
if let error = session.error {
continuation.resume(throwing: error)
} else {
continuation.resume(returning: session.status)
}
}
}
})
}
func addWatermarkTopDriver(inputVideo: AVAsset, outputURL: URL, watermark: UIImage) async throws -> AVAssetExportSession.Status {
let composition = AVMutableComposition()
let compositionTrack = try compositionAddMediaTrack(composition, withMediaType: .video)
guard let videoAssetTrack = try await loadTrack(inputVideo: inputVideo, withMediaType: .video) else {
throw WatermarkError.noVideoTrackPresent
}
try await bringOverVideoAndAudio(inputVideo: inputVideo, assetTrack: videoAssetTrack, compositionTrack: compositionTrack, composition: composition)
let transformAndSize = try await preferredTransformAndSize(compositionTrack: compositionTrack, assetTrack: videoAssetTrack)
compositionTrack.preferredTransform = transformAndSize.preferredTransform
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
addImage(to: overlayLayer, watermark: watermark, videoSize: transformAndSize.videoSize)
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: transformAndSize.videoSize)
outputLayer.addSublayer(videoLayer)
outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = transformAndSize.videoSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(
postProcessingAsVideoLayer: videoLayer,
in: outputLayer)
composeVideo(composition: composition, videoComposition: videoComposition, compositionTrack: compositionTrack, assetTrack: videoAssetTrack, preferredTransform: transformAndSize.preferredTransform)
let session = try exportSession(composition: composition, videoComposition: videoComposition, outputURL: outputURL)
return try await executeSession(session)
}
/// Creates a watermarked movie and saves it to the documents directory.
///
/// For an 8 second video (251 frames), this code takes 2.56 seconds on iPhone 11 producing a high quality video at 30 FPS.
/// - Returns: Time interval taken for processing.
public func exportIt() async throws -> TimeInterval {
let timeStart = Date()
guard
let filePath = Bundle.main.path(forResource: "donut-spinning", ofType: "mp4"),
let docUrl = try? FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: true),
let watermarkImage = UIImage(systemName: "seal") else {
throw WatermarkError.cannotLoadResources
}
let videoAsset = AVAsset(url: URL(filePath: filePath))
let outputURL = docUrl.appending(component: "watermark-donut-spinning.mp4")
try? FileManager.default.removeItem(at: outputURL)
print(outputURL)
let result = try await addWatermarkTopDriver(inputVideo: videoAsset, outputURL: outputURL, watermark: watermarkImage)
let timeEnd = Date()
let duration = timeEnd.timeIntervalSince(timeStart)
print(result)
return duration
}
}
Use this below method for super fast watermark adding to video
func addWatermark(inputURL: URL, outputURL: URL, handler:#escaping (_ exportSession: AVAssetExportSession?)-> Void) {
let mixComposition = AVMutableComposition()
let asset = AVAsset(url: inputURL)
let videoTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let timerange = CMTimeRangeMake(start: CMTime.zero, duration: asset.duration)
let compositionVideoTrack:AVMutableCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))!
do {
try compositionVideoTrack.insertTimeRange(timerange, of: videoTrack, at: CMTime.zero)
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
} catch {
print(error)
}
let watermarkFilter = CIFilter(name: "CISourceOverCompositing")!
let watermarkImage = CIImage(image: UIImage(named: "waterMark")!)
let videoComposition = AVVideoComposition(asset: asset) { (filteringRequest) in
let source = filteringRequest.sourceImage.clampedToExtent()
watermarkFilter.setValue(source, forKey: "inputBackgroundImage")
let transform = CGAffineTransform(translationX: filteringRequest.sourceImage.extent.width - (watermarkImage?.extent.width)! - 2, y: 0)
watermarkFilter.setValue(watermarkImage?.transformed(by: transform), forKey: "inputImage")
filteringRequest.finish(with: watermarkFilter.outputImage!, context: nil)
}
guard let exportSession = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset640x480) else {
handler(nil)
return
}
exportSession.outputURL = outputURL
exportSession.outputFileType = AVFileType.mp4
exportSession.shouldOptimizeForNetworkUse = true
exportSession.videoComposition = videoComposition
exportSession.exportAsynchronously { () -> Void in
handler(exportSession)
}
}
Call this method when you want to add watermark easily
let outputURL = NSURL.fileURL(withPath: "TempPath")
let inputURL = NSURL.fileURL(withPath: "VideoWithWatermarkPath")
addWatermark(inputURL: inputURL, outputURL: outputURL, handler: { (exportSession) in
guard let session = exportSession else {
// Error
return
}
switch session.status {
case .completed:
guard NSData(contentsOf: outputURL) != nil else {
// Error
return
}
// Now you can find the video with the watermark in the location outputURL
default:
// Error
}
})
Have you checked out Apple's documentation? It adds a title layer (CALayer) on top of an existing AVMutableComposition or an AVAsset? Since it's a legacy doc from iOS 6, you'll need to refactor a bit, but it should be fast on today's tech.

AVAssetExportSession export video AVFoundationErrorDomain Code=-11841 error

I trying to export video using following code. It works fine for first 3 times and then fails for more than 3 attempts, I am trying to add recorded voices over video, I am pretty new to all these concepts so any help will be appreciated
open func generate(video url: URL, with frame: CGRect? = nil, byApplying transformation: CGAffineTransform? = nil, in previewArea: CGRect? = nil, previewCornerRadius: Float = 0, overlayImage: UIImage? = nil, setOverlayAsBackground: Bool = false, gifLayer: CALayer? = nil, audioUrl: URL? = nil, muteAudio: Bool = false, success: #escaping ((URL) -> Void), failure: #escaping ((Error) -> Void)) {
let mixComposition: AVMutableComposition = AVMutableComposition()
var mutableCompositionVideoTrack: AVMutableCompositionTrack? = nil
var mutableCompositionOriginalAudioTrack: AVMutableCompositionTrack? = nil
var mutableCompositionAudioTrack: AVMutableCompositionTrack? = nil
let totalVideoCompositionInstruction : AVMutableVideoCompositionInstruction = AVMutableVideoCompositionInstruction()
let aVideoAsset: AVAsset = AVAsset(url: url)
var aAudioAsset: AVAsset? = nil
if let url = audioUrl {
aAudioAsset = AVAsset(url: url)
}
if let videoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid) {
mutableCompositionVideoTrack = videoTrack
if aAudioAsset != nil, let audioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
mutableCompositionAudioTrack = audioTrack
}
if !muteAudio, aVideoAsset.hasAudio, let originalAudioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
/// If original audio present
mutableCompositionOriginalAudioTrack = originalAudioTrack
}
}
do {
var originalVideoSize: CGSize = self.prefferedVideoSize
let ratio = self.prefferedVideoSize.width / Utility.get9By16ScreenSize().width
if let aVideoAssetTrack: AVAssetTrack = aVideoAsset.tracks(withMediaType: .video).first {
originalVideoSize = aVideoAssetTrack.naturalSize
var transforms = aVideoAssetTrack.preferredTransform
if aVideoAsset.videoOrientation().orientation == .landscapeLeft || aVideoAsset.videoOrientation().orientation == .landscapeRight {
let ratio = self.prefferedVideoSize.width / originalVideoSize.width
let centerY: CGFloat = (self.prefferedVideoSize.height - (originalVideoSize.height * ratio)) / 2
transforms = transforms.concatenating(CGAffineTransform(translationX: 0, y: centerY).scaledBy(x: ratio, y: ratio))
}
try mutableCompositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration), of: aVideoAssetTrack, at: CMTime.zero)
if !muteAudio, aVideoAsset.hasAudio, let audioAssetTrack: AVAssetTrack = aVideoAsset.tracks(withMediaType: .audio).first {
try mutableCompositionOriginalAudioTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: audioAssetTrack.timeRange.duration), of: audioAssetTrack, at: CMTime.zero)
}
if let audioAsset = aAudioAsset, let aAudioAssetTrack: AVAssetTrack = audioAsset.tracks(withMediaType: .audio).first {
try mutableCompositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: aAudioAssetTrack.timeRange.duration), of: aAudioAssetTrack, at: CMTime.zero)
}
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration)
let mixVideoTrack = mixComposition.tracks(withMediaType: AVMediaType.video)[0]
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: mixVideoTrack)
layerInstruction.setTransform(transforms, at: CMTime.zero)
totalVideoCompositionInstruction.layerInstructions = [layerInstruction]
}
let mutableVideoComposition: AVMutableVideoComposition = AVMutableVideoComposition()
mutableVideoComposition.frameDuration = CMTimeMake(value: 1, timescale: 12)
mutableVideoComposition.renderSize = self.prefferedVideoSize
mutableVideoComposition.instructions = [totalVideoCompositionInstruction]
let parentLayer = CALayer()
parentLayer.frame = self.prefferedVideoRect
parentLayer.isGeometryFlipped = true
let videoLayer = CALayer()
videoLayer.contentsGravity = .resizeAspect
videoLayer.contentsScale = 1
videoLayer.frame = self.prefferedVideoRect
if let frame = frame {
let scalledFrame = frame.scale(by: ratio)
videoLayer.frame = scalledFrame
let videoContainerLayer = CALayer()
parentLayer.frame = self.prefferedVideoRect
parentLayer.addSublayer(videoContainerLayer)
videoContainerLayer.addSublayer(videoLayer)
if let transformation = transformation {
if let previewFrame = previewArea {
let maskLayer = CALayer()
maskLayer.backgroundColor = UIColor.black.cgColor
let scalledMaskFrame = previewFrame.scale(by: ratio)
maskLayer.frame = scalledMaskFrame
maskLayer.cornerRadius = previewCornerRadius.cgFloat
maskLayer.masksToBounds = true
videoContainerLayer.mask = maskLayer
}
videoLayer.transform = CATransform3DMakeAffineTransform(transformation)
}
} else {
parentLayer.addSublayer(videoLayer)
}
/// Add overlay if overlay image present
if let image = overlayImage {
let imageLayer = CALayer()
imageLayer.contents = image.cgImage
imageLayer.frame = self.prefferedVideoRect
imageLayer.masksToBounds = true
if setOverlayAsBackground {
parentLayer.insertSublayer(imageLayer, at: 0)
} else {
parentLayer.addSublayer(imageLayer)
}
}
/// Add overlay if overlay image present
if let overlay = gifLayer {
overlay.frame = CGRect(origin: CGPoint(x: (self.prefferedVideoSize.width - overlay.frame.width) / 2, y: (self.prefferedVideoSize.height - overlay.frame.height) / 2), size: overlay.frame.size)
overlay.transform = CATransform3DMakeAffineTransform(CGAffineTransform(scaleX: ratio, y: ratio))
parentLayer.addSublayer(overlay)
}
mutableVideoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentLayer)
let outputURL = URL(fileURLWithPath: NSTemporaryDirectory()).appendingPathComponent("tmp-rendered-video-R6S9K2B4.m4v")
self.exportVideo(from: mixComposition, toFile: outputURL, with: mutableVideoComposition, success: success, failure: failure)
} catch{
DCDebug.print(error)
failure(error)
}
}
func exportVideo(from composition: AVComposition, toFile output: URL, with videoComposition: AVVideoComposition? = nil, success: #escaping ((URL) -> Void), failure: #escaping ((Error) -> Void)) {
do {
if FileManager.default.fileExists(atPath: output.path) {
try FileManager.default.removeItem(at: output)
}
if let exportSession = AVAssetExportSession(asset: composition, presetName: self.presetName ?? AVAssetExportPresetHighestQuality) {
exportSession.outputURL = output
exportSession.outputFileType = AVFileType.mp4
exportSession.shouldOptimizeForNetworkUse = true
if let videoComposition = videoComposition {
exportSession.videoComposition = videoComposition
}
/// try to export the file and handle the status cases
exportSession.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
switch exportSession.status {
case .completed:
success(output)
case .failed:
if let _error = exportSession.error {
failure(_error)
}
case .cancelled:
if let _error = exportSession.error {
failure(_error)
}
default:
success(output)
}
}
})
} else {
failure(VideoMakerError(error: .kFailedToStartAssetExportSession))
}
} catch {
DCDebug.print(error)
failure(error)
}
}
I am getting following error
Domain=AVFoundationErrorDomain Code=-11841 "Operation Stopped" UserInfo={NSLocalizedDescription=Operation Stopped, NSLocalizedFailureReason=The video could not be composed.}
following are time range values when export fails
(lldb) po aVideoAsset.tracks(withMediaType: .audio).first?.timeRange.duration
▿ Optional<CMTime>
▿ some : CMTime
- value : 1852
- timescale : 600
▿ flags : CMTimeFlags
- rawValue : 1
- epoch : 0
(lldb) po aVideoAsset.tracks(withMediaType: .video).first?.timeRange.duration
▿ Optional<CMTime>
▿ some : CMTime
- value : 1800
- timescale : 600
▿ flags : CMTimeFlags
- rawValue : 1
- epoch : 0
I solved this issue by replacing following line
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration)
By
if let originalAudioTrack = mutableCompositionOriginalAudioTrack, originalAudioTrack.timeRange.duration > aVideoAssetTrack.timeRange.duration, !muteAudio, aVideoAsset.hasAudio {
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: originalAudioTrack.timeRange.duration)
} else {
totalVideoCompositionInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: aVideoAssetTrack.timeRange.duration)
}
This solved my issue, but I am not sure if this is correct solution to this problem or just a hack, so if anyone provide me proper explanation to this issue and a valid solution other than this, then bounty is yours.

Swift: Output file of AssetWriter not available to AVAsset on first creation

I am creating a video out of an image and then need to access it right away. I am creating it by adding an imageBuffer of the image to an AVAssetWriter. The video is created here:
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let first = adaptor.append(buffer, withPresentationTime: startFrameTime)
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let second = adaptor.append(buffer, withPresentationTime: endFrameTime)
videoWriterInput.markAsFinished()
videoWriter.finishWriting {
completion(videoWriter.error)
}
Both first and second return true (successful appends) and there is no error on videoWriter. The video has successfully been created. I can successfully "retrieve" the asset upon its completion with
makeVideo(image, urlDestination) { error in
guard error == nil else { return }
let imageAsset = AVAsset(url: url)
guard
let imageTrack = self.composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid),
let imageVideoTrack = imageAsset.tracks(withMediaType: .video).first else {
assertionFailure()
return
}
try! imageTrack.insertTimeRange(
CMTimeRangeMake(start: .zero, duration: self.duration),
of: imageVideoTrack,
at: .zero
)
let imageVideoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: imageTrack)
}
However, the first time I try to access it I don't get any video (there is a video track with the right duration, but with no video of any sort being displayed). If I add it to a PreviewController I get nothing. However, if I dismiss the PreviewController, and access the asset a second time, then it is successful.
My first thought was that this is a potential timing issue, but even if I add a delay it fails the first time.
Any thoughts? Keep in mind that this code works for when the file at the url already exists; just not right after it is made.
Edit:
The above are the parts of code I think are pertinent to this question. Fuller code is as below:
private func filePath() -> URL {
let fileManager = FileManager.default
let urls = fileManager.urls(for: .documentDirectory, in: .userDomainMask)
guard let documentDirectory = urls.first else {
fatalError("documentDir Error")
}
return documentDirectory
}
class VideoComposer {
let composition = AVMutableComposition()
let mainInstruction = AVMutableVideoCompositionInstruction()
let duration: CMTime
let videoSize: CGSize
var viewSizeMultiplier: CGFloat = 5.0
init(view: UIView) {
videoSize = CGSize(width: 1772.0, height: 3840.0)
viewSizeMultiplier = 1772.0 / view.frame.width
self.duration = CMTime(seconds: 15, preferredTimescale: 600)
mainInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: self.duration)
view.subviews.reversed().forEach { subview in
if let imageView = subview as? UIImageView {
addImage(of: imageView)
}
else {
print("unhandled view type")
}
}
}
func createVideo(completion: #escaping (AVAssetExportSession) -> Void) {
// make video composition
let videoComposition = AVMutableVideoComposition()
videoComposition.instructions = [mainInstruction]
videoComposition.frameDuration = CMTimeMake(value: 1, timescale: 60)
videoComposition.renderSize = videoSize
export(videoComposition: videoComposition) { (session) in
completion(session)
}
}
private func export(videoComposition: AVMutableVideoComposition, completion: #escaping (AVAssetExportSession) -> Void) {
// export
let url = filePath().appendingPathComponent("output.mov")
let fileManager = FileManager.default
if fileManager.fileExists(atPath: url.path) {
try! fileManager.removeItem(at: url)
}
guard let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality) else {
assertionFailure()
return
}
exporter.videoComposition = videoComposition
exporter.outputFileType = .mov
exporter.outputURL = url
exporter.exportAsynchronously {
DispatchQueue.main.async {
completion(exporter)
}
}
}
private func addImage(of imageView: UIImageView) {
guard let image = imageView.image else {
assertionFailure("no image")
return
}
let movieLength = TimeInterval(duration.seconds)
let url = filePath().appendingPathComponent("image.mov")
ImageVideoCreator.writeSingleImageToMovie(image: image, movieLength: movieLength, outputFileURL: url) { [weak self] success in
guard let `self` = self else {
return
}
let imageAsset = AVAsset(url: url)
let keys = ["playable", "readable", "composable", "tracks", "exportable"]
var error: NSError? = nil
imageAsset.loadValuesAsynchronously(forKeys: keys, completionHandler: {
DispatchQueue.main.async {
keys.forEach({ key in
let status = imageAsset.statusOfValue(forKey: key, error: &error)
switch status {
case .loaded:
print("loaded. \(error)")
case .loading:
print("loading. \(error)")
case .failed:
print("failed. \(error)")
case .cancelled:
print("cancelled. \(error)")
case .unknown:
print("unknown. \(error)")
}
})
guard
let imageTrack = self.composition.addMutableTrack(
withMediaType: .video,
preferredTrackID: kCMPersistentTrackID_Invalid),
let imageVideoTrack = imageAsset.tracks(withMediaType: .video).first
else {
assertionFailure()
return
}
try! imageTrack.insertTimeRange(CMTimeRangeMake(start: .zero, duration: self.duration), of: imageVideoTrack, at: .zero)
let imageVideoLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: imageTrack)
print("image")
self.setTransform(on: imageVideoLayerInstruction, of: imageView, andOf: imageVideoTrack)
self.mainInstruction.layerInstructions.append(imageVideoLayerInstruction)
}
})
}
}
}
class ViewController: UIViewController {
var composer: VideoComposer?
let player = AVPlayerViewController()
override func viewDidLoad() {
super.viewDidLoad()
guard let pathUrl = Bundle.main.url(forResource: "SampleVideo_1280x720_1mb", withExtension: "mp4") else {
assertionFailure()
return
}
let image = UIImage(named: "image")
let imageView = UIImageView(image: image)
view.addSubview(imageView)
imageView.translatesAutoresizingMaskIntoConstraints = false
imageView.topAnchor.constraint(equalTo: view.topAnchor, constant: 0).isActive = true
imageView.leadingAnchor.constraint(equalTo: view.leadingAnchor, constant: 0).isActive = true
imageView.widthAnchor.constraint(equalToConstant: image!.size.width / 4).isActive = true
imageView.heightAnchor.constraint(equalToConstant: image!.size.height / 4).isActive = true
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
composer = VideoComposer(view: view)
composer?.createVideo() { exporter in
self.didFinish(session: exporter)
}
}
func didFinish(session: AVAssetExportSession) {
guard let url = session.outputURL else {
assertionFailure()
return
}
self.showVideo(videoUrl: url)
}
func showVideo(videoUrl: URL) {
let videoPlayer = AVPlayer(url: videoUrl)
player.player = videoPlayer
self.present(player, animated: true) {
self.player.player?.play()
}
}
}
class ImageVideoCreator {
private static func pixelBuffer(fromImage image: CGImage, size: CGSize) -> CVPixelBuffer? {
let options: CFDictionary = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true] as CFDictionary
var pxbuffer: CVPixelBuffer? = nil
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(size.width), Int(size.height), kCVPixelFormatType_32ARGB, options, &pxbuffer)
guard let buffer = pxbuffer, status == kCVReturnSuccess else {
return nil
}
CVPixelBufferLockBaseAddress(buffer, [])
guard let pxdata = CVPixelBufferGetBaseAddress(buffer) else {
return nil
}
let bytesPerRow = CVPixelBufferGetBytesPerRow(buffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: pxdata, width: Int(size.width), height: Int(size.height), bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) else {
return nil
}
context.concatenate(CGAffineTransform(rotationAngle: 0))
context.draw(image, in: CGRect(x: 0, y: 0, width: size.width, height: size.height))
CVPixelBufferUnlockBaseAddress(buffer, [])
return buffer
}
static func writeSingleImageToMovie(image: UIImage, movieLength: TimeInterval, outputFileURL: URL, completion: #escaping (Bool) -> ()) {
let fileManager = FileManager.default
if fileManager.fileExists(atPath: outputFileURL.path) {
try! fileManager.removeItem(at: outputFileURL)
}
do {
let imageSize = image.size
let videoWriter = try AVAssetWriter(outputURL: outputFileURL, fileType: AVFileType.mov)
let videoSettings: [String: Any] = [AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: imageSize.width,
AVVideoHeightKey: imageSize.height]
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
let adaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: nil)
if !videoWriter.canAdd(videoWriterInput) {
completion(false)
return
}
videoWriterInput.expectsMediaDataInRealTime = true
videoWriter.add(videoWriterInput)
videoWriter.startWriting()
let timeScale: Int32 = 600 // recommended in CMTime for movies.
let startFrameTime = CMTimeMake(value: 0, timescale: 600)
let endFrameTime = CMTimeMakeWithSeconds(movieLength, preferredTimescale: timeScale)
videoWriter.startSession(atSourceTime: startFrameTime)
guard let cgImage = image.cgImage else {
completion(false)
return
}
let buffer: CVPixelBuffer = self.pixelBuffer(fromImage: cgImage, size: imageSize)!
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let first = adaptor.append(buffer, withPresentationTime: startFrameTime)
while !adaptor.assetWriterInput.isReadyForMoreMediaData { usleep(10) }
let second = adaptor.append(buffer, withPresentationTime: endFrameTime)
videoWriterInput.markAsFinished()
videoWriter.finishWriting {
completion(true)
}
} catch {
completion(false)
}
}
}

Swift Merge AVasset-Videos array

I want to merge the AVAsset-arrayVideos into one single video and save it on camera roll. Raywenderlich.com has a great tutorial where two videos are merged into one. I've created the following code, however the video that I get after exporting to camera roll includes only the first and the last video from the array (excluding the rest of the videos in the middle of arrayVideos). Am I missing something here?
var arrayVideos = [AVAsset]() //Videos Array
var atTimeM: CMTime = CMTimeMake(0, 0)
var lastAsset: AVAsset!
var layerInstructionsArray = [AVVideoCompositionLayerInstruction]()
var completeTrackDuration: CMTime = CMTimeMake(0, 1)
var videoSize: CGSize = CGSize(width: 0.0, height: 0.0)
func mergeVideoArray(){
let mixComposition = AVMutableComposition()
for videoAsset in arrayVideos{
let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
if videoAsset == arrayVideos.first{
atTimeM = kCMTimeZero
} else{
atTimeM = lastAsset!.duration
}
try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: atTimeM)
videoSize = videoTrack.naturalSize
} catch let error as NSError {
print("error: \(error)")
}
completeTrackDuration = CMTimeAdd(completeTrackDuration, videoAsset.duration)
let videoInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
if videoAsset != arrayVideos.last{
videoInstruction.setOpacity(0.0, at: videoAsset.duration)
}
layerInstructionsArray.append(videoInstruction)
lastAsset = videoAsset
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, completeTrackDuration)
mainInstruction.layerInstructions = layerInstructionsArray
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = CGSize(width: videoSize.width, height: videoSize.height)
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: NSDate() as Date)
let savePath = (documentDirectory as NSString).appendingPathComponent("mergeVideo-\(date).mov")
let url = NSURL(fileURLWithPath: savePath)
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter!.outputURL = url as URL
exporter!.outputFileType = AVFileTypeQuickTimeMovie
exporter!.shouldOptimizeForNetworkUse = true
exporter!.videoComposition = mainComposition
exporter!.exportAsynchronously {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: exporter!.outputURL!)
}) { saved, error in
if saved {
let alertController = UIAlertController(title: "Your video was successfully saved", message: nil, preferredStyle: .alert)
let defaultAction = UIAlertAction(title: "OK", style: .default, handler: nil)
alertController.addAction(defaultAction)
self.present(alertController, animated: true, completion: nil)
} else{
print("video erro: \(error)")
}
}
}
}
You need to track the total time for all of the assets and update it for each video.
The code in your question was rewriting the atTimeM with the current video. That's why only the first and last got included.
It will look something like this:
...
var totalTime : CMTime = CMTimeMake(0, 0)
func mergeVideoArray() {
let mixComposition = AVMutableComposition()
for videoAsset in arrayVideos {
let videoTrack =
mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
if videoAsset == arrayVideos.first {
atTimeM = kCMTimeZero
} else {
atTimeM = totalTime // <-- Use the total time for all the videos seen so far.
}
try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration),
of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0],
at: atTimeM)
videoSize = videoTrack.naturalSize
} catch let error as NSError {
print("error: \(error)")
}
totalTime += videoAsset.duration // <-- Update the total time for all videos.
...
You can remove the use of lastAsset.
Swift 4
Use like
MeargeVide.mergeVideoArray(arrayVideos: arrayAsset) { (urlMeargeVide, error) in
debugPrint("url",urlMeargeVide ?? "")
debugPrint("error",error ?? "")
}
Complete class with orientation and merge multiple clip in single.
class MeargeVide {
static func orientationFromTransform(_ transform: CGAffineTransform)
-> (orientation: UIImageOrientation, isPortrait: Bool) {
var assetOrientation = UIImageOrientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
static func videoCompositionInstruction(_ track: AVCompositionTrack, asset: AVAsset)
-> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: .video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor), at: kCMTimeZero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
.concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: kCMTimeZero)
}
return instruction
}
class func mergeVideoArray(arrayVideos:[AVAsset], callBack:#escaping (_ urlGet:URL?,_ errorGet:Error?) -> Void){
var atTimeM: CMTime = CMTimeMake(0, 0)
var lastAsset: AVAsset!
var layerInstructionsArray = [AVVideoCompositionLayerInstruction]()
var completeTrackDuration: CMTime = CMTimeMake(0, 1)
var videoSize: CGSize = CGSize(width: 0.0, height: 0.0)
var totalTime : CMTime = CMTimeMake(0, 0)
let mixComposition = AVMutableComposition.init()
for videoAsset in arrayVideos{
let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
if videoAsset == arrayVideos.first {
atTimeM = kCMTimeZero
} else {
atTimeM = totalTime // <-- Use the total time for all the videos seen so far.
}
try videoTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration),
of: videoAsset.tracks(withMediaType: AVMediaType.video)[0],
at: completeTrackDuration)
videoSize = (videoTrack?.naturalSize)!
} catch let error as NSError {
print("error: \(error)")
}
totalTime = CMTimeAdd(totalTime, videoAsset.duration)
completeTrackDuration = CMTimeAdd(completeTrackDuration, videoAsset.duration)
let firstInstruction = self.videoCompositionInstruction(videoTrack!, asset: videoAsset)
firstInstruction.setOpacity(0.0, at: videoAsset.duration)
layerInstructionsArray.append(firstInstruction)
lastAsset = videoAsset
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.layerInstructions = layerInstructionsArray
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, completeTrackDuration)
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = CGSize(width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = dateFormatter.string(from: NSDate() as Date)
let savePath = (documentDirectory as NSString).appendingPathComponent("mergeVideo-\(date).mov")
let url = NSURL(fileURLWithPath: savePath)
let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter!.outputURL = url as URL
exporter!.outputFileType = AVFileType.mp4
exporter!.shouldOptimizeForNetworkUse = true
exporter!.videoComposition = mainComposition
exporter!.exportAsynchronously {
DispatchQueue.main.async {
callBack(exporter?.outputURL, exporter?.error)
}
}
}
}
You don't need atTimeM at all, since you are simply marching completeTrackDuration along it is where the next piece should be added. So replace
if videoAsset == arrayVideos.first{
atTimeM = kCMTimeZero
} else{
atTimeM = lastAsset!.duration
}
try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: atTimeM)
with
try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: completeTrackDuration)

iOS swift memory issue during a while-loop inside a closure: AVAssetWriter

My app is for making a video file from many images that are produced by code.
When my code has finished making an image and put it in myImage, it toggles isImageReady to 'true'. And when self.i is set(or changed),by Property Observer,it starts making another image. and finally the self.iReset is set to 'true' when there's no more image to be produced.
but the app is terminated due to memory issue during the while-loop. I have commented out the if-statement that actually assembles video frames. and it still has a memory issue. so I think the poblem lives during the while-loop inside requestMediaDataWhenReadyOnQueue:usingBlock closure.
I have no idea how to solve the problem. please help me.
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 30
let frameDuration = CMTimeMake(1, fps)
var lastFrameTime:CMTime = CMTime()
var presentationTime:CMTime = CMTime()
while (self.iReset != true) {
if videoWriterInput.readyForMoreMediaData && self.isImageReady {
lastFrameTime = CMTimeMake(Int64(self.i), fps)
presentationTime = self.i == 1 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
//commented out for tracking
/* if !self.appendPixelBufferForImage(self.myImage, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(
domain: kErrorDomain,
code: kFailedToAppendPixelBufferError,
userInfo: [
"description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
"rawError": videoWriter.error ?? "(none)"])
break
} */
self.isImageReady = false
self.i++
}// if ..&&..
} //while loop ends
// Finish writing
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
print("Finished Making a Movie !!")
success(videoOutputURL)
}
self.videoWriter = nil
}
}) // requestMediaDataWhenReadyOnQueue ends
}
Probably too late but I had a similar issue to this (images in a loop) which caused me a major headache. My solution was to put an autoreleasepool within the loop which solved the issue.
As per #C. Carter has suggested you can use autoreleasepool to free the used memory like below:
autoreleasepool {
/* your code */
}
Other than that here is a code from which I am making movie using UIImages and Audio which works perfectly fine.
func build(chosenPhotos: [UIImage], audioURL: NSURL, completion: (NSURL) -> ()) {
showLoadingIndicator(appDel.window!.rootViewController!.view)
let outputSize = CGSizeMake(640, 480)
var choosenPhotos = chosenPhotos
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("OutputVideo.mp4")
print("Video Output URL \(videoOutputURL)")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeMPEG4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(outputSize.width)), AVVideoHeightKey : NSNumber(float: Float(outputSize.height))]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!choosenPhotos.isEmpty) {
if (videoWriterInput.readyForMoreMediaData) {
let nextPhoto = choosenPhotos.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(outputSize.width), Int(outputSize.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextClearRect(context, CGRectMake(0, 0, CGFloat(outputSize.width), CGFloat(outputSize.height)))
let horizontalRatio = CGFloat(outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(outputSize.height) / nextPhoto.size.height
//aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize:CGSize = CGSizeMake(nextPhoto.size.width * aspectRatio, nextPhoto.size.height * aspectRatio)
let x = newSize.width < outputSize.width ? (outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < outputSize.height ? (outputSize.height - newSize.height) / 2 : 0
CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), nextPhoto.CGImage)
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
print("FINISHED!!!!!")
self.compileToMakeMovie(videoOutputURL, audioOutPutURL: audioURL, completion: { url in
completion(url)
})
}
})
}
}
func compileToMakeMovie(videoOutputURL: NSURL, audioOutPutURL: NSURL, completion: (NSURL) -> ()){
let mixComposition = AVMutableComposition()
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let actualVideoURl = documentDirectory.URLByAppendingPathComponent("OutputVideoMusic.mp4")
print("Video Output URL \(actualVideoURl)")
if NSFileManager.defaultManager().fileExistsAtPath(actualVideoURl.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(actualVideoURl.path!)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
let nextClipStartTime = kCMTimeZero
let videoAsset = AVURLAsset(URL: videoOutputURL)
let video_timeRange = CMTimeRangeMake(kCMTimeZero,videoAsset.duration)
let a_compositionVideoTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
try! a_compositionVideoTrack.insertTimeRange(video_timeRange, ofTrack: videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0], atTime: nextClipStartTime)
let audioAsset = AVURLAsset(URL: audioOutPutURL)
let audio_timeRange = CMTimeRangeMake(kCMTimeZero,audioAsset.duration)
let b_compositionAudioTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try b_compositionAudioTrack.insertTimeRange(audio_timeRange, ofTrack: audioAsset.tracksWithMediaType(AVMediaTypeAudio)[0], atTime: nextClipStartTime)
}catch _ {}
let assetExport = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
assetExport?.outputFileType = "com.apple.quicktime-movie"
assetExport?.outputURL = actualVideoURl
assetExport?.exportAsynchronouslyWithCompletionHandler({
completion(actualVideoURl)
})
}
Let me know, if you're still facing the issue.

Resources