Missing Some Videos When Stitching Using AVFoundations - ios

I'm trying to merge multiple videos using AVFoundation. It works fine when I only add two videos. But when I'm trying to add 3 videos, the 2nd video track is empty but the total duration of the exported videos is correct.
Here is my sample code:
The code for adding videos:
func addVideo(videoAsset: AVAsset, isLast: Bool = false)
{
// Add video track
guard let videoTrack = self.mixComposition.addMutableTrack(
withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
else { return }
do
{
try videoTrack.insertTimeRange(
CMTimeRangeMake(start: .zero, duration: videoAsset.duration),
of: videoAsset.tracks(withMediaType: .video)[0],
at: totalVideoTrackDuration)
// Add instruction for this track
// video helper fixes the video orientation
let instruction = VideoHelper.videoCompositionInstruction(videoTrack, asset: videoAsset)
if !isLast
{
// hide this clip when its done rendering
instruction.setOpacity(0.0, at: videoAsset.duration)
}
// add to layer instruction
self.instructions.append(instruction)
// get the sum of all added track durations
self.totalVideoTrackDuration = CMTimeAdd(self.totalVideoTrackDuration, videoAsset.duration)
}
catch
{
print("Failed to load track")
return
}
}
Here is the code for exporting videos:
func export()
{
// 6
let mainInstruction = AVMutableVideoCompositionInstruction()
// set time range
mainInstruction.timeRange = CMTimeRangeMake(
start: .zero,
duration: self.totalVideoTrackDuration
)
mainInstruction.layerInstructions = self.instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: self.timeScale)
mainComposition.renderSize = self.videoSize
guard let documentDirectory = FileManager.default.urls(
for: .documentDirectory,
in: .userDomainMask).first
else { return }
// file name
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = Date().format(format: "mm-dd-yy-HH-mm-ss")!
let url = documentDirectory.appendingPathComponent("mergeVideo-\(date).mov")
guard let exporter = AVAssetExportSession(
asset: self.mixComposition,
presetName: AVAssetExportPresetHighestQuality)
else { return }
exporter.outputURL = url
exporter.outputFileType = AVFileType.mov
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = mainComposition
exporter.exportAsynchronously {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.
creationRequestForAssetFromVideo(
atFileURL: exporter.outputURL!)
}){
saved, error in
if saved
{
print("Export successful")
}
else
{
print("video erro: \(error)")
}
}
}
}
Here is how I call the addVideo function:
// 4-second video
self.addVideo(videoAsset: asset1)
// 3-second video
self.addVideo(videoAsset: asset2)
// 4-second video
self.addVideo(videoAsset: asset3, isLast: true)
// export
self.export()
// the total duration of the exported video is 11 seconds. But the middle part, is blank.

It turns out the problematic line is:
if !isLast
{
// hide this clip when its done rendering
instruction.setOpacity(0.0, at: videoAsset.duration)
}
// get the sum of all added track durations
self.totalVideoTrackDuration = CMTimeAdd(self.totalVideoTrackDuration, videoAsset.duration)
It should be:
// get the sum of all added track durations
self.totalVideoTrackDuration = CMTimeAdd(self.totalVideoTrackDuration, videoAsset.duration)
if !isLast
{
instruction.setOpacity(0.0, at: self.totalVideoTrackDuration)
}
The documentation for setOpacity is a little bit confusing. I thought the at parameter is the time within that specific video time range.

Related

Swift - Workaround/Alternative to M3u8 to play mp4 segment or merge segments into mp4

I used AVAssetExportSession to download a session URL but the issue that you can't download live stream so to get around it, the live stream is split into 10 seconds mp4 segments that are downloaded using an m3u8 to create the URLs. I then use AVAssetExportSession to merge those mp4 segments.
I can merge those clips one by one into one mp4 file which is what I want but as the file gets bigger, the longer it takes as I am dealing with thousands of segments which becomes unpractical.
I thought about using AVplayerLooper but I cannot scrub, rewind or forward through the mp4 segment like a single video.
Is there a way to combine the mp4 clips together to play as one video as the m3u8 does without merging? or is there a fast way to merge videos?
Note: The server uses FFmpeg but I am not allowed to use FFmpeg or pods in the app.
below is the function to merge videos
var mp4Array: [AVAsset] = []
var avAssetExportSession: AVAssetExportSession?
var firstAsset: AVAsset?
var secondAsset: AVAsset?
func mergeVideos() {
firstAsset = mp4Array.first
secondAsset = mp4Array[1]
guard let firstAsset = firstAsset, let secondAsset = secondAsset else { return }
let mixComposition = AVMutableComposition()
guard let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {return}
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration),
of: firstAsset.tracks(withMediaType: .video)[0],
at: CMTime.zero)
} catch {
print("Couldn't load track 1")
return
}
guard let secondTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {return}
do {
try secondTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: secondAsset.duration),
of: secondAsset.tracks(withMediaType: .video)[0],
at: firstAsset.duration)
} catch {
print("couldn't load track 2")
return
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: CMTimeAdd(firstAsset.duration, secondAsset.duration))
let firstAssetInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack)
firstAssetInstruction.setOpacity(0.0, at: firstAsset.duration)
let secondAssetInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: secondTrack)
mainInstruction.layerInstructions = [firstAssetInstruction, secondAssetInstruction]
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = firstTrack.naturalSize
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else { return }
let url = documentDirectory.appendingPathComponent("MergedVideos/mergeVideo\(videoInt).mp4")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else {return}
exporter.outputURL = url
exporter.outputFileType = AVFileType.mp4
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = mainComposition
exporter.exportAsynchronously {
if exporter.status == .completed {
let avasset = AVAsset(url:url)
self.mergeUrl = avasset
if self.mp4Array.count > 1{
print("This add the merged video to the front of the mp4array")
self.mp4Array.remove(at: 1)
self.mp4Array.removeFirst()
self.videoInt = self.videoInt + 1
self.mp4Array.append(self.mergeUrl!)
self.mp4Array.bringToFront(item: self.mp4Array.last!)
}
if (self.mp4Array.count > 1){
if self.mergeUrl != nil {
self.mergeVideos()
}
} else {
var numberofvideosdeleted = 0
while (numberofvideosdeleted < self.videoInt - 1){
do {
print("deleting")
let url = documentDirectory.appendingPathComponent("MergedVideos/mergeVideo\(numberofvideosdeleted).mp4")
try FileManager.default.removeItem(at: url)
numberofvideosdeleted = numberofvideosdeleted + 1
} catch {
print("Error removing videos")
}
}
self.deleteCurrentSegementsInFolder()
}
}
}
}
I ended up using FFmpeg Mobile to concatenate the videos and it works really well. Takes around 1 minute to concatenate a 3GB movie file.
Link below to the cocoapod:
https://github.com/tanersener/mobile-ffmpeg

Getting the first video track multiple time while trying to combine three different video tracks in same frame using AVFoundation

I want to combine multiple videos and their audio in one video frame for that I am using AVFoundation framework.
For that I have created a method which accepts array of asset and as of now I am passing three different video's asset.
So far I have combined their audio but problem is with video frame in which only first asset's video is repeating in every frame.
I am using below code to combine videos which combine all three video's audio perfectly but first video in input array is repeating three times which is the main issue:
I want all three different video in frames.
func merge(Videos aArrAssets: [AVAsset]){
let mixComposition = AVMutableComposition()
func setup(asset aAsset: AVAsset, WithComposition aComposition: AVMutableComposition) -> AVAssetTrack{
let aMutableCompositionVideoTrack = aComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let aMutableCompositionAudioTrack = aComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
let aVideoAssetTrack: AVAssetTrack = aAsset.tracks(withMediaType: .video)[0]
let aAudioAssetTrack: AVAssetTrack = aAsset.tracks(withMediaType: .audio)[0]
do{
try aMutableCompositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: .zero, duration: aAsset.duration), of: aVideoAssetTrack, at: .zero)
try aMutableCompositionAudioTrack?.insertTimeRange(CMTimeRangeMake(start: .zero, duration: aAsset.duration), of: aAudioAssetTrack, at: .zero)
}catch{}
return aVideoAssetTrack
}
let aArrVideoTracks = aArrAssets.map { setup(asset: $0, WithComposition: mixComposition) }
var aArrLayerInstructions : [AVMutableVideoCompositionLayerInstruction] = []
//Transform every video
var aNewHeight : CGFloat = 0
for (aIndex,aTrack) in aArrVideoTracks.enumerated(){
aNewHeight += aIndex > 0 ? aArrVideoTracks[aIndex - 1].naturalSize.height : 0
let aLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: aTrack)
let aFristTransform = CGAffineTransform(translationX: 0, y: aNewHeight)
aLayerInstruction.setTransform(aFristTransform, at: .zero)
aArrLayerInstructions.append(aLayerInstruction)
}
let aTotalTime = aArrVideoTracks.map { $0.timeRange.duration }.max()
let aInstruction = AVMutableVideoCompositionInstruction()
aInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: aTotalTime!)
aInstruction.layerInstructions = aArrLayerInstructions
let aVideoComposition = AVMutableVideoComposition()
aVideoComposition.instructions = [aInstruction]
aVideoComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
let aTotalWidth = aArrVideoTracks.map { $0.naturalSize.width }.max()!
let aTotalHeight = aArrVideoTracks.map { $0.naturalSize.height }.reduce(0){ $0 + $1 }
aVideoComposition.renderSize = CGSize(width: aTotalWidth, height: aTotalHeight)
saveVideo(WithAsset: mixComposition, videoComp : aVideoComposition) { (aError, aUrl) in
print("Location : \(String(describing: aUrl))")
}
}
private func saveVideo(WithAsset aAsset : AVAsset, videoComp : AVVideoComposition, completion: #escaping (_ error: Error?, _ url: URL?) -> Void){
let dateFormatter = DateFormatter()
dateFormatter.dateFormat = "ddMMyyyy_HHmm"
let date = dateFormatter.string(from: NSDate() as Date)
// Exporting
let savePathUrl: URL = URL(fileURLWithPath: NSHomeDirectory() + "/Documents/newVideo_\(date).mov")
do { // delete old video
try FileManager.default.removeItem(at: savePathUrl)
} catch { print(error.localizedDescription) }
let assetExport: AVAssetExportSession = AVAssetExportSession(asset: aAsset, presetName: AVAssetExportPresetMediumQuality)!
assetExport.outputFileType = .mov
assetExport.outputURL = savePathUrl
// assetExport.shouldOptimizeForNetworkUse = true
assetExport.videoComposition = videoComp
assetExport.exportAsynchronously { () -> Void in
switch assetExport.status {
case .completed:
print("success")
completion(nil, savePathUrl)
case .failed:
print("failed \(assetExport.error?.localizedDescription ?? "error nil")")
completion(assetExport.error, nil)
case .cancelled:
print("cancelled \(assetExport.error?.localizedDescription ?? "error nil")")
completion(assetExport.error, nil)
default:
print("complete")
completion(assetExport.error, nil)
}
}
}
I know I am doing something wrong in code but couldn't figure out where so I need some help to find it out.
Thanks in advance.
Your issue is that when you're constructing your AVMutableVideoCompositionLayerInstruction the aTrack reference is a reference to the track of the original asset which your are setting with
let aVideoAssetTrack: AVAssetTrack = aAsset.tracks(withMediaType: .video)[0]
It's trackID is 1, because it is the first track in it's source AVAsset. Accordingly, when you inspect your aArrLayerInstructions you will see that the trackIDs of your instructions are all 1. Which is why you're getting the first video three times
(lldb) p aArrLayerInstructions[0].trackID
(CMPersistentTrackID) $R8 = 1
(lldb) p aArrLayerInstructions[1].trackID
(CMPersistentTrackID) $R10 = 1
...
The solution is not to enumerate your source tracks but the tracks of your composition when constructing the composition layer instructions.
let tracks = mixComposition.tracks(withMediaType: .video)
for (aIndex,aTrack) in tracks.enumerated(){
...
If you do it like that you will get the correct trackIDs for your layer instructions
(lldb) p aArrLayerInstructions[0].trackID
(CMPersistentTrackID) $R2 = 1
(lldb) p aArrLayerInstructions[1].trackID
(CMPersistentTrackID) $R4 = 3
...

Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could not be completed" {Error Domain=NSOSStatusErrorDomain Code=-16976 "(null)"}

I am working on Video application in Swift3 iOS. Basically I have to merged the Video Assets and Audios into one with Fade Effect and save this to iPhone gallery. To achieve this, I am using below method:
private func doMerge(arrayVideos:[AVAsset], arrayAudios:[AVAsset], animation:Bool, completion:#escaping Completion) -> Void {
var insertTime = kCMTimeZero
var audioInsertTime = kCMTimeZero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayVideos {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaTypeVideo)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
outputSize = videoSize
}
// Init composition
let mixComposition = AVMutableComposition.init()
for index in 0..<arrayVideos.count {
// Get video track
guard let videoTrack = arrayVideos[index].tracks(withMediaType: AVMediaTypeVideo).first else { continue }
// Get audio track
var audioTrack:AVAssetTrack?
if index < arrayAudios.count {
if arrayAudios[index].tracks(withMediaType: AVMediaTypeAudio).count > 0 {
audioTrack = arrayAudios[index].tracks(withMediaType: AVMediaTypeAudio).first
}
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = kCMTimeZero
let duration = arrayVideos[index].duration
// Add video track to video composition at specific time
try videoCompositionTrack.insertTimeRange(CMTimeRangeMake(startTime, duration), of: videoTrack, at: insertTime)
// Add audio track to audio composition at specific time
var audioDuration = kCMTimeZero
if index < arrayAudios.count {
audioDuration = arrayAudios[index].duration
}
if let audioTrack = audioTrack {
do {
try audioCompositionTrack.insertTimeRange(CMTimeRangeMake(startTime, audioDuration), of: audioTrack, at: audioInsertTime)
}
catch {
print(error.localizedDescription)
}
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack, asset: arrayVideos[index], standardSize: outputSize, atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = arrayVideos[index].duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp (fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
audioInsertTime = CMTimeAdd(audioInsertTime, audioDuration)
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileTypeQuickTimeMovie//AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = false //true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
}
})
}
fileprivate func exportDidFinish(exporter:AVAssetExportSession?, videoURL:URL, completion:#escaping Completion) -> Void {
if exporter?.status == AVAssetExportSessionStatus.completed {
print("Exported file: \(videoURL.absoluteString)")
completion(videoURL,nil)
}
else if exporter?.status == AVAssetExportSessionStatus.failed {
completion(videoURL,exporter?.error)
print(exporter?.error as Any)
}
}
Problem: In my exportDidFinish method, AVAssetExportSessionStatus is getting failed with below error message:
Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could
not be completed" UserInfo={NSLocalizedFailureReason=An unknown error
occurred (-16976), NSLocalizedDescription=The operation could not be
completed, NSUnderlyingError=0x1c065fb30 {Error
Domain=NSOSStatusErrorDomain Code=-16976 "(null)"}}
Can anyone suggest me on this.
I had the exact same error and only on the iPhone 5S simulator running iOS11. I fixed it by changing the quality setting on the export operation from "Highest" (AVAssetExportPresetHighestQuality) to "Pass through" (AVAssetExportPresetPassthrough) (keeping original quality):
/// try to start an export session and set the path and file type
if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetPassthrough) { /* AVAssetExportPresetHighestQuality */
exportSession.outputURL = videoOutputURL
exportSession.outputFileType = AVFileType.mp4
exportSession.shouldOptimizeForNetworkUse = true
exportSession.exportAsynchronously(completionHandler: {
switch exportSession.status {
case .failed:
if let _error = exportSession.error {
// !!!used to fail over here with 11800, -16976 codes, if using AVAssetExportPresetHighestQuality. But works fine when using: AVAssetExportPresetPassthrough
failure(_error)
}
....
Hope this helps someone, because that error code and message doesn't provide any information. It's just an "Unknown error". Besides changing the quality setting, I would try changing other settings and stripping down the export operation to identify a specific component of that operation that may be failing. (Some specific image, audio or video asset). When you have such a general error message, it's good to use the process of elimination, cutting the code in half each time, to get to the problem in Logarithmic time.

Swift AVFoundation stitching multiple videos together and keep preferred transform

I'm trying to stitch multiple video clips together. If I stitch each AVAsset in one AVMutableCompositionTrack it works but loses the transformation on the first asset by appending another one with enabled mirroring mode for front facing camera. Can I somehow use multiple AVMutableCompositionTrack of type video in one AVMutableComposition?
// create mix composition
let mixComposition = AVMutableComposition()
// insert video track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
// keep track of total duration
var totalDuration = kCMTimeZero
// for each video clip add to mutable composition and transform each video layer
for (index, videoClip) in videoClips.enumerated() {
if let videoAsset = videoClip.asset, let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first {
// insert current video track to composition
try videoCompositionTrack!.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAssetTrack, at: totalDuration)
videoCompositionTrack?.preferredTransform = videoAssetTrack.preferredTransform
// shift duration to next
totalDuration = CMTimeAdd(totalDuration, videoAsset.duration)
}
}
// Use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1920x1080)
assetExport?.outputFileType = AVFileType.mp4
// get needed save url to save the video to recommended url
let movieDestinationUrl = self.getRecommendedSaveUrl()
// seting up asset export session
assetExport?.outputURL = movieDestinationUrl
assetExport?.shouldOptimizeForNetworkUse = true
// export video to file system asyc
assetExport?.exportAsynchronously(completionHandler: {
assetExport?.cancelExport()
switch assetExport!.status {
case AVAssetExportSessionStatus.failed:
break
case AVAssetExportSessionStatus.cancelled:
break
default:
DispatchQueue.main.async {
completion?(movieDestinationUrl, nil)
}
}
if ((assetExport?.error) != nil) {
AppDelegate.logger.error("Could not create user video: \((assetExport?.error)!)")
DispatchQueue.main.async {
completion?(nil, assetExport?.error)
}
}
})
I'm trying to use something like this and multiple AVMutableCompositionTrack's with different CGAffineTransform objects.
// create mix composition
let mixComposition = AVMutableComposition()
// keep track of total duration
var totalDuration = kCMTimeZero
// for each video clip add to mutable composition and transform each video layer
for (index, videoClip) in videoClips.enumerated() {
if let videoAsset = videoClip.asset, let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first {
// insert video track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(index))
// insert current video track to composition
try videoCompositionTrack!.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAssetTrack, at: totalDuration)
videoCompositionTrack?.preferredTransform = videoAssetTrack.preferredTransform
// shift duration to next
totalDuration = CMTimeAdd(totalDuration, videoAsset.duration)
}
}
// Use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1920x1080)
assetExport?.outputFileType = AVFileType.mp4
// get needed save url to save the video to recommended url
let movieDestinationUrl = self.getRecommendedSaveUrl()
// seting up asset export session
assetExport?.outputURL = movieDestinationUrl
assetExport?.shouldOptimizeForNetworkUse = true
// export video to file system asyc
assetExport?.exportAsynchronously(completionHandler: {
assetExport?.cancelExport()
switch assetExport!.status {
case AVAssetExportSessionStatus.failed:
break
case AVAssetExportSessionStatus.cancelled:
break
default:
DispatchQueue.main.async {
completion?(movieDestinationUrl, nil)
}
}
if ((assetExport?.error) != nil) {
AppDelegate.logger.error("Could not create user video: \((assetExport?.error)!)")
DispatchQueue.main.async {
completion?(nil, assetExport?.error)
}
}
})
In the case above I'm not able to get any useable video: it is much shorter than it should be. I'm trying to avoid using any AVMutableVideoCompositionInstruction because it takes too long to process but it would still be an option if it worked for any resolution and especially with mirroring support.
// create mix composition
let mixComposition = AVMutableComposition()
// keep track of total duration
var totalDuration = kCMTimeZero
// keeps all layer transformations for each video asset
var videoCompositionLayerInstructions = [AVMutableVideoCompositionLayerInstruction]()
// for each video clip add to mutable composition and transform each video layer
for (index, videoClip) in videoClips.enumerated() {
if let videoAsset = videoClip.asset {
// use first video asset track for setting like height and width
let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first!
// insert video trakc
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(index))
// insert current video track to composition
try videoCompositionTrack!.insertTimeRange(CMTimeRangeMake(totalDuration, videoAssetTrack.timeRange.duration), of: videoAssetTrack, at: totalDuration)
videoCompositionTrack?.preferredTransform = videoAssetTrack.preferredTransform
let videoCompositionLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoAssetTrack)
videoCompositionLayerInstruction.setTransform((videoCompositionTrack?.preferredTransform)!, at: totalDuration)
videoCompositionLayerInstruction.setOpacity(0.0, at: videoAsset.duration)
// apply instruction
videoCompositionLayerInstructions.append(videoCompositionLayerInstruction)
// shift duration to next
totalDuration = CMTimeAdd(totalDuration, videoAsset.duration)
}
}
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, totalDuration)
videoCompositionInstruction.layerInstructions = videoCompositionLayerInstructions
let mainComposition = AVMutableVideoComposition()
mainComposition.renderSize = CGSize(width: 1080, height: 1920)
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.instructions = [videoCompositionInstruction]
// Use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1920x1080)
assetExport?.outputFileType = AVFileType.mp4
// get needed save url to save the video to recommended url
let movieDestinationUrl = self.getRecommendedSaveUrl()
// seting up asset export session
assetExport?.outputURL = movieDestinationUrl
assetExport?.shouldOptimizeForNetworkUse = true
assetExport?.videoComposition = mainComposition
Anybody an idea how to implement this functionality?
Note: I don't need to care about audio at all.

What's causing AVMutableComposition to increase the size of video drastically? - iOS, Swift, AVFoundation

Assuming that we have two video assets (AVAsset objects), let's call them blank and main, where main is a video of random limited length, let's say 2-5 minutes, and blank is always a 4 second video, we want to merge the videos in the following order:
blank - main - blank
// Create AVMutableComposition Object.This object will hold our multiple AVMutableCompositionTrack.
let mixComposition = AVMutableComposition()
let assets = [blank, main, blank]
var totalTime : CMTime = CMTimeMake(0, 0)
var atTimeM: CMTime = CMTimeMake(0, 0)
Utils.log([blank.duration, main.duration])
// VIDEO TRACK
let videoTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
for (index,asset) in assets.enumerated() {
do {
if index == 0 {
atTimeM = kCMTimeZero
} else {
atTimeM = totalTime // <-- Use the total time for all the videos seen so far.
}
try videoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, asset.duration), of: asset.tracks(withMediaType: AVMediaTypeVideo)[0], at: atTimeM)
} catch let error as NSError {
Utils.log("error: \(error)")
}
totalTime = CMTimeAdd(totalTime, asset.duration)
}
// AUDIO TRACK
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try audioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, main.duration), of: main.tracks(withMediaType: AVMediaTypeAudio)[0], at: blank.duration)
} catch _ {
completionHandler(nil, ErrorType(rawValue: "Unable to add audio in composition."))
return
}
let outputURL = mainVideoObject.getDirectoryURL()?.appendingPathComponent("video-with-blank.mp4")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1280x720) else {
completionHandler(nil, ErrorType(rawValue: "Unable to create export session."))
return
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeAdd(blank.duration, CMTimeAdd(main.duration, blank.duration)))
// Fixing orientation
let firstLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let firstAssetTrack = blank.tracks(withMediaType: AVMediaTypeVideo)[0]
firstLayerInstruction.setTransform(firstAssetTrack.preferredTransform, at: kCMTimeZero)
firstLayerInstruction.setOpacity(0.0, at: blank.duration)
let secondLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let secondAssetTrack = main.tracks(withMediaType: AVMediaTypeVideo)[0]
var isSecondAssetPortrait = false
let secondTransform = secondAssetTrack.preferredTransform
if (secondTransform.a == 0 && secondTransform.b == 1.0 && secondTransform.c == -1.0 && secondTransform.d == 0) {
isSecondAssetPortrait = true
}
if (secondTransform.a == 0 && secondTransform.b == -1.0 && secondTransform.c == 1.0 && secondTransform.d == 0) {
isSecondAssetPortrait = true
}
secondLayerInstruction.setTransform(secondAssetTrack.preferredTransform, at: blank.duration)
secondLayerInstruction.setOpacity(0.0, at: CMTimeAdd(blank.duration, main.duration))
let thirdLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
let thirdAssetTrack = blank.tracks(withMediaType: AVMediaTypeVideo)[0]
thirdLayerInstruction.setTransform(thirdAssetTrack.preferredTransform, at: CMTimeAdd(blank.duration, main.duration))
mainInstruction.layerInstructions = [firstLayerInstruction, secondLayerInstruction, thirdLayerInstruction]
var naturalSize = CGSize()
if(isSecondAssetPortrait) {
naturalSize = CGSize(width: secondAssetTrack.naturalSize.height, height: secondAssetTrack.naturalSize.width)
} else {
naturalSize = secondAssetTrack.naturalSize
}
let renderWidth = naturalSize.width
let renderHeight = naturalSize.height
let mainCompositionInst = AVMutableVideoComposition()
mainCompositionInst.instructions = [mainInstruction]
mainCompositionInst.frameDuration = CMTimeMake(1, 30)
mainCompositionInst.renderSize = CGSize(width: renderWidth, height: renderHeight)
exporter.outputURL = outputURL
exporter.outputFileType = AVFileTypeMPEG4
exporter.videoComposition = mainCompositionInst
//exporter.shouldOptimizeForNetworkUse = true
exporter.exportAsynchronously {
if exporter.status == .completed {
completionHandler(AVAsset(url: outputURL!), nil)
} else {
completionHandler(nil, ErrorType(rawValue: "Unable to export video."))
if let error = exporter.error {
Utils.log("Unable to export video. \(error)")
}
}
}
Assuming that the original video recorder for 5 minutes at 720p quality takes around 200MB of space, adding the 4s blank video on begining and end of the main video should not drastically change the size, and should finish processing very fast.
The result however is a video that's 2 to 2.5x the size of the original video (so 400 - 500 MB) and takes too long to process.
Please advise,
Thanks
Here i have prepared a custom class where you can just pass your name of your videos and keep those video into to the bundle. Once you run your app it will generate a new video file as per your requirement and drop it into the application document directory path.
Using Swift 4 i have prepared this demo
//
// ViewController.swift
// SOVideoMergingDemo
//
// Created by iOS Test User on 03/01/18.
// Copyright © 2018 Test User. Ltd. All rights reserved.
//
import UIKit
import AVFoundation
import MediaPlayer
import Photos
import AssetsLibrary
import AVKit
class ViewController : UIViewController {
//--------------------------------------------------
//MARK:
//MARK: - IBOutlets
//--------------------------------------------------
//--------------------------------------------------
//MARK:
//MARK: - Properties
//--------------------------------------------------
var videoUrls : [URL] = []
var arrVideoAsset : [AVAsset] = []
let video1 = "1"
let video2 = "2"
let outPutVideo = "MergedVideo.mp4"
let semaphore = DispatchSemaphore(value: 1)
//--------------------------------------------------
//MARK:
//MARK: - Custom Methods
//--------------------------------------------------
func getVideoURL(forVideo : String) -> URL {
let videoPath = Bundle.main.path(forResource: forVideo, ofType:"mp4")
let vidURL = URL(fileURLWithPath: videoPath!)
return vidURL
}
//--------------------------------------------------
func mergeVideos(arrVideoAsset : [AVAsset]) {
let mixComposition = AVMutableComposition()
//Tracks to insert in Composition for Merging
// Create video tracks
let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let secondTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
let thirdTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try firstTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[0].duration), of: arrVideoAsset[0].tracks(withMediaType: .video)[0], at: kCMTimeZero)
} catch _ {
print("Failed to load first track")
}
do {
try secondTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[1].duration), of: arrVideoAsset[1].tracks(withMediaType: .video)[0], at: arrVideoAsset[0].duration)
} catch _ {
print("Failed to load second track")
}
do {
try thirdTrack?.insertTimeRange(CMTimeRangeMake(kCMTimeZero, arrVideoAsset[0].duration), of: arrVideoAsset[0].tracks(withMediaType: .video)[0], at: arrVideoAsset[1].duration)
} catch _ {
print("Failed to load second track")
}
//This Instruciton is Created for Merging Video Tracks
let compositionInstruction = AVMutableVideoCompositionInstruction()
compositionInstruction.timeRange = CMTimeRangeMake(kCMTimeZero,CMTimeAdd(arrVideoAsset[0].duration, CMTimeAdd(arrVideoAsset[1].duration, arrVideoAsset[2].duration)))
//Creating Layer Instruction for Videos
let firstInstruction = videoCompositionInstructionForTrack(firstTrack!, asset: arrVideoAsset[0])
firstInstruction.setOpacity(0.0, at: arrVideoAsset[0].duration )
let secondInstruction = videoCompositionInstructionForTrack(secondTrack!, asset: arrVideoAsset[1])
secondInstruction.setOpacity(0.0, at: arrVideoAsset[1].duration)
let thirdInstruction = videoCompositionInstructionForTrack(thirdTrack!, asset: arrVideoAsset[2])
compositionInstruction.layerInstructions = [firstInstruction, secondInstruction,thirdInstruction]
//By Changing These Height and Width User can affect Size of Merged Video. Calucalte it Carefully and As per you needs
let height = (Float((firstTrack?.naturalSize.height)!) < Float((secondTrack?.naturalSize.height)!)) ? firstTrack?.naturalSize.height : secondTrack?.naturalSize.height
let width = (Float((firstTrack?.naturalSize.width)!) < Float((secondTrack?.naturalSize.width)!)) ? firstTrack?.naturalSize.width : secondTrack?.naturalSize.width
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [compositionInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = CGSize(width:width!,height: height!)
let exporter = AVAssetExportSession(asset:mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = URL(fileURLWithPath: getDocumentDirectoryPath() + "/" + outPutVideo)
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
print(self.getDocumentDirectoryPath())
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
if exporter?.status == AVAssetExportSessionStatus.completed {
do {
let videoData = try Data(contentsOf: exporter!.outputURL!)
try videoData.write(to: URL(fileURLWithPath : self.getDocumentDirectoryPath() + "/" + self.outPutVideo), options: Data.WritingOptions.atomic)
} catch {
print("Failed to Save video ===>>> \(error.localizedDescription)")
}
//Uncomment This If you want to save video in Photos Library
// PHPhotoLibrary.shared().performChanges({
// PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: (exporter?.outputURL)!)
// }, completionHandler: { (success, error) in
// if success {
// let fetchOptions = PHFetchOptions()
// fetchOptions.sortDescriptors = [NSSortDescriptor.init(key:"creationDate", ascending: false)]
// _ = PHAsset.fetchAssets(with: .video, options:fetchOptions).firstObject
// } else {
// print("Error in Saving File in Photo Libaray -> \(String(describing: error?.localizedDescription))")
// }
// })
} else {
print("Error -> \(String(describing: exporter?.error?.localizedDescription))")
}
}
})
}
//--------------------------------------------------
//This Methiod is Used to Make Layer Instruction for Particular Video
func videoCompositionInstructionForTrack(_ track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let scale : CGAffineTransform = CGAffineTransform(scaleX: 1, y:1)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scale), at: kCMTimeZero)
return instruction
}
//--------------------------------------------------
func getDocumentDirectoryPath() -> String {
let arrPaths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
return arrPaths[0]
}
//--------------------------------------------------
//MARK:
//MARK: - View Life Cycle Methods
//--------------------------------------------------
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
//Prepare Video Assets
arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video1)))
arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video2)))
arrVideoAsset.append(AVAsset(url:getVideoURL(forVideo:video1)))
//Merge this Videos
mergeVideos(arrVideoAsset:arrVideoAsset)
}
}

Resources