I have successfully merge the video clips to a single video but I am having a problem in the final merged video, the final video shows a white frame after the end of every video clip. I have tried a lot to remove this but couldn't find success. Please review my code below.
func merge(arrayVideos:[AVAsset], completion:#escaping (_ exporter: AVAssetExportSession) -> ()) -> Void {
let mainComposition = AVMutableComposition()
let compositionVideoTrack = mainComposition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)
compositionVideoTrack?.preferredTransform = CGAffineTransform(rotationAngle: .pi / 2)
let soundtrackTrack = mainComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
var time:Double = 0.0
for (index, videoAsset) in arrayVideos.enumerated() {
let atTime = CMTime(seconds: time, preferredTimescale: 1)
try! compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: videoAsset.duration), of: videoAsset.tracks(withMediaType: .video)[0], at: atTime)
try! soundtrackTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: videoAsset.duration), of: videoAsset.tracks(withMediaType: .audio)[0], at: atTime)
time += videoAsset.duration.seconds
}
let outputFileURL = URL(fileURLWithPath: NSTemporaryDirectory() + "merge.mp4")
print("final URL:\(outputFileURL)")
let fileManager = FileManager()
do {
try fileManager.removeItem(at: outputFileURL)
} catch let error as NSError {
print("Error: \(error.domain)")
}
let exporter = AVAssetExportSession(asset: mainComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = outputFileURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.exportAsynchronously {
DispatchQueue.main.async {
completion(exporter!)
}
}
}
Don't use a Double to track the insertion time, this can result in gaps due to rounding errors. And don't use a preferredTimescale of 1 when converting seconds, this will effectively round everything to whole seconds (1000 would be a more common timescale for this).
Instead to track the insertion time use a CMTime initialized to kCMTimeZero, and use CMTimeAdd to advance it.
And one more thing: Video and audio tracks can have different durations, particularly when recorded. So to keep things in sync, you may want to use CMTimeRangeGetIntersection to get the common time range of audio and video in the asset, and then use result to for insertion in the composition.
Related
I am trying to merge two videos together in AVFoundation.
I am using AVMutableComposition and I add both tracks to the composition, resulting in a final video where I have the first video with its audio, and after that the 2nd audio but no video.
How can I get the audio and video of both tracks?
Thank you
let composition = AVMutableComposition()
let audioTrack: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)!
let videoTrack: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)!
let audioTrack2: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)!
let videoTrack2: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)!
var outputURL = documentDirectory.appendingPathComponent("output-temp")
do {
try! audioTrack.insertTimeRange(CMTimeRangeFromTimeToTime(start: startTime, end: endTime), of: asset.tracks(withMediaType: AVMediaType.audio)[0], at: CMTime.zero)
try! videoTrack.insertTimeRange(CMTimeRangeFromTimeToTime(start: startTime, end: endTime), of: asset.tracks(withMediaType: AVMediaType.video)[0], at: CMTime.zero)
try! audioTrack2.insertTimeRange(CMTimeRangeFromTimeToTime(start: startTime, end: asset2.duration), of: asset2.tracks(withMediaType: AVMediaType.audio)[0], at: CMTime.invalid)
try! videoTrack2.insertTimeRange(CMTimeRangeFromTimeToTime(start: startTime, end: asset2.duration), of: asset2.tracks(withMediaType: AVMediaType.video)[0], at: CMTime.invalid)
try manager.createDirectory(at: outputURL, withIntermediateDirectories: true, attributes: nil)
let id = "id-\(Int.random(in: 0...199))"
let mediaType = "mp4"
outputURL = outputURL.appendingPathComponent("preVideo-\(id).\(mediaType)")
} catch let error {
print(error)
}
The problem is that you are adding a second video track to the composition. You need to insert both videos into the same video track. Just delete your let videoTrack2 and go from there.
How can I trim milliseconds from a videoUrl when using AVAssetExportSession? I'm using the below code which gives the final video a duration like 15.233333334 seconds or 17.9333333334 seconds depending on the number of assets and their time frames. Once they are all added together, I want to trim the mixComposition to 15 seconds, 17 seconds, etc.
AVMutableComposition:
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let soundtrackTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
var insertTime = CMTime.zero
for videoAsset in videoAssets {
do {
let videoTrack = videoAsset.tracks(withMediaType: .video)
try compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: videoAsset.duration), videoAsset.tracks(withMediaType: .video)[0], at: insertTime)
let audioTrack = videoAsset.tracks(withMediaType: .audio)
try soundtrackTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: videoAsset.duration), videoAsset.tracks(withMediaType: .audio)[0], at: insertTime)
insertTime = CMTimeAdd(insertTime, videoAsset.duration)
} catch {
}
}
AVAssetExportSession:
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
let start = CMTimeMakeWithSeconds(.zero, preferredTimescale: 600)
let videoDuration = CMTimeMakeWithSeconds(mixComposition.duration, preferredTimescale: 600)
let range = CMTimeRangeMake(start: start, duration: videoDuration)
exporter.timeRange = range
// ...
I found an easy was to do this. There is a built in method named trunc() that truncates any remainder and only leaves a whole number.
If using a AVMutableComposition():
let mixComposition = AVMutableComposition()
// ...
let videoDuration = CMTimeGetSeconds(mixComposition.duration)
let dub = Double(videoDuration)
let durationTruncated = trunc(dub)
print(".......truncate: ", durationTruncated)
let duration = CMTimeMakeWithSeconds(durationTruncated, preferredTimescale: 600)
let start = CMTimeMakeWithSeconds(.zero, preferredTimescale: 600)
let range = CMTimeRangeMake(start: start, duration: duration)
exporter.timeRange = range
If using a url from the photoLibrary:
let asset = AVURLAsset(url: yourLibraryUrl) // if this isn't a libraryUrl you will need to run it through asset.loadValuesAsynchronously and use the "duration" asset key to get the duration first
let videoDuration = CMTimeGetSeconds(asset.duration)
let dub = Double(videoDuration)
let durationTruncated = trunc(dub)
print(".......truncate: ", durationTruncated)
let duration = CMTimeMakeWithSeconds(durationTruncated, preferredTimescale: 600)
let start = CMTimeMakeWithSeconds(.zero, preferredTimescale: 600)
let range = CMTimeRangeMake(start: start, duration: duration)
exporter.timeRange = range
I am attempting to merge videos with scaleTimeRanges (to make them slo-mo or speed-up); however, it is not working as desired. Only the first video has the timerange effect... not all of them.
The work is done in the merge videos function; it is pretty simple... however I am not sure why the scaling of the time range is not working for only the first video and not the next ones...
This is a test project to test with, it has my current code: https://github.com/meyesyesme/creationMergeProj
This is the merge function I use, with the time range scaling currently commented out (you can uncomment to see it not working):
func mergeVideosTestSQ(arrayVideos:[VideoSegment], completion:#escaping (URL?, Error?) -> ()) {
let mixComposition = AVMutableComposition()
var instructions: [AVMutableVideoCompositionLayerInstruction] = []
var insertTime = CMTime(seconds: 0, preferredTimescale: 1)
print(arrayVideos, "<- arrayVideos")
/// for each URL add the video and audio tracks and their duration to the composition
for videoSegment in arrayVideos {
let sourceAsset = AVAsset(url: videoSegment.videoURL!)
let frameRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: sourceAsset.duration)
guard
let nthVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let nthAudioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), //0 used to be kCMPersistentTrackID_Invalid
let assetVideoTrack = sourceAsset.tracks(withMediaType: .video).first
else {
print("didnt work")
return
}
var assetAudioTrack: AVAssetTrack?
assetAudioTrack = sourceAsset.tracks(withMediaType: .audio).first
print(assetAudioTrack, ",-- assetAudioTrack???", assetAudioTrack?.asset, "<-- hes", sourceAsset)
do {
try nthVideoTrack.insertTimeRange(frameRange, of: assetVideoTrack, at: insertTime)
try nthAudioTrack.insertTimeRange(frameRange, of: assetAudioTrack!, at: insertTime)
//MY CURRENT SPEED ATTEMPT:
let newDuration = CMTimeMultiplyByFloat64(frameRange.duration, multiplier: videoSegment.videoSpeed)
nthVideoTrack.scaleTimeRange(frameRange, toDuration: newDuration)
nthAudioTrack.scaleTimeRange(frameRange, toDuration: newDuration)
print(insertTime.value, "<-- fiji, newdur --->", newDuration.value, "sourceasset duration--->", sourceAsset.duration.value, "frameRange.duration -->", frameRange.duration.value)
//instructions:
let nthInstruction = ViewController.videoCompositionInstruction(nthVideoTrack, asset: sourceAsset)
nthInstruction.setOpacity(0.0, at: CMTimeAdd(insertTime, newDuration)) //sourceasset.duration
instructions.append(nthInstruction)
insertTime = insertTime + newDuration //sourceAsset.duration
} catch {
DispatchQueue.main.async {
print("didnt wor2k")
}
}
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: insertTime)
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = CGSize(width: 1080, height: 1920)
let outputFileURL = URL(fileURLWithPath: NSTemporaryDirectory() + "merge.mp4")
//below to clear the video form docuent folder for new vid...
let fileManager = FileManager()
try? fileManager.removeItem(at: outputFileURL)
print("<now will export: 🔥 🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥")
/// try to start an export session and set the path and file type
if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) { //DOES NOT WORK WITH AVAssetExportPresetPassthrough
exportSession.outputFileType = .mov
exportSession.outputURL = outputFileURL
exportSession.videoComposition = mainComposition
exportSession.shouldOptimizeForNetworkUse = true
/// try to export the file and handle the status cases
exportSession.exportAsynchronously {
if let url = exportSession.outputURL{
completion(url, nil)
}
if let error = exportSession.error {
completion(nil, error)
}
}
}
}
You'll see this behavior: the first one is working well, but then the next videos do not and have issues with when they were set opacity, etc... I have tried different combinations and this is the closest one yet.
I've been stuck on this for a while!
After you scale the video, duration of the composition gets recalculated, so you need to append the next part according to this change. Replace
insertTime = insertTime + duration
with
insertTime = insertTime + newDuration
You also need to update setOpacity at value, I'd advise you to move that line after insertTime update and use new value, to remove duplicate work here
When you're applying scale, it's applied to new composition, so you need to use relative range:
let currentRange = CMTimeRange(start: insertTime, duration: frameRange.duration)
nthVideoTrack.scaleTimeRange(currentRange, toDuration: newDuration)
nthAudioTrack.scaleTimeRange(currentRange, toDuration: newDuration)
I used AVAssetExportSession to download a session URL but the issue that you can't download live stream so to get around it, the live stream is split into 10 seconds mp4 segments that are downloaded using an m3u8 to create the URLs. I then use AVAssetExportSession to merge those mp4 segments.
I can merge those clips one by one into one mp4 file which is what I want but as the file gets bigger, the longer it takes as I am dealing with thousands of segments which becomes unpractical.
I thought about using AVplayerLooper but I cannot scrub, rewind or forward through the mp4 segment like a single video.
Is there a way to combine the mp4 clips together to play as one video as the m3u8 does without merging? or is there a fast way to merge videos?
Note: The server uses FFmpeg but I am not allowed to use FFmpeg or pods in the app.
below is the function to merge videos
var mp4Array: [AVAsset] = []
var avAssetExportSession: AVAssetExportSession?
var firstAsset: AVAsset?
var secondAsset: AVAsset?
func mergeVideos() {
firstAsset = mp4Array.first
secondAsset = mp4Array[1]
guard let firstAsset = firstAsset, let secondAsset = secondAsset else { return }
let mixComposition = AVMutableComposition()
guard let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {return}
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration),
of: firstAsset.tracks(withMediaType: .video)[0],
at: CMTime.zero)
} catch {
print("Couldn't load track 1")
return
}
guard let secondTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {return}
do {
try secondTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: secondAsset.duration),
of: secondAsset.tracks(withMediaType: .video)[0],
at: firstAsset.duration)
} catch {
print("couldn't load track 2")
return
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: CMTimeAdd(firstAsset.duration, secondAsset.duration))
let firstAssetInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack)
firstAssetInstruction.setOpacity(0.0, at: firstAsset.duration)
let secondAssetInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: secondTrack)
mainInstruction.layerInstructions = [firstAssetInstruction, secondAssetInstruction]
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = firstTrack.naturalSize
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else { return }
let url = documentDirectory.appendingPathComponent("MergedVideos/mergeVideo\(videoInt).mp4")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else {return}
exporter.outputURL = url
exporter.outputFileType = AVFileType.mp4
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = mainComposition
exporter.exportAsynchronously {
if exporter.status == .completed {
let avasset = AVAsset(url:url)
self.mergeUrl = avasset
if self.mp4Array.count > 1{
print("This add the merged video to the front of the mp4array")
self.mp4Array.remove(at: 1)
self.mp4Array.removeFirst()
self.videoInt = self.videoInt + 1
self.mp4Array.append(self.mergeUrl!)
self.mp4Array.bringToFront(item: self.mp4Array.last!)
}
if (self.mp4Array.count > 1){
if self.mergeUrl != nil {
self.mergeVideos()
}
} else {
var numberofvideosdeleted = 0
while (numberofvideosdeleted < self.videoInt - 1){
do {
print("deleting")
let url = documentDirectory.appendingPathComponent("MergedVideos/mergeVideo\(numberofvideosdeleted).mp4")
try FileManager.default.removeItem(at: url)
numberofvideosdeleted = numberofvideosdeleted + 1
} catch {
print("Error removing videos")
}
}
self.deleteCurrentSegementsInFolder()
}
}
}
}
I ended up using FFmpeg Mobile to concatenate the videos and it works really well. Takes around 1 minute to concatenate a 3GB movie file.
Link below to the cocoapod:
https://github.com/tanersener/mobile-ffmpeg
The code below exports a video using AVMutableComposition. But in the exported video, if you want an image to display for 3 seconds after the source video finishes, is there a way to do that with AVMutableCompositionTrack or do you need to add an image layer and animate its appearance after the video ends?
Eventually, the goal is to merge an arbitrary number of images and videos into one master video.
Unfortunately, during testing it seems like AVVideoCompositionCoreAnimationTool severely slows down the export process (from < 1 second to 10-20 seconds), so the goal is to avoid AVVideoCompositionCoreAnimationTool if possible.
// Create composition object
let composition = AVMutableComposition()
let compositionVideoTrack = composition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = composition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
var insertTime = kCMTimeZero
// Extract tracks from slice video
let videoURL = NSURL(fileURLWithPath: videoPath)
let videoAsset = AVURLAsset(URL: videoURL, options: nil)
let sourceVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0]
let sourceAudioTrack = videoAsset.tracksWithMediaType(AVMediaTypeAudio)[0]
do {
try compositionVideoTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: sourceVideoTrack, atTime: kCMTimeZero)
try compositionAudioTrack.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), ofTrack: sourceAudioTrack, atTime: kCMTimeZero)
} catch {
print("Error with insertTimeRange while exporting video: \(error)")
}
// Export composition to video
let outputURL = getFilePath(getUniqueFilename(gMP4File))
let exporter = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)
exporter!.outputURL = NSURL(fileURLWithPath: outputURL)
exporter!.outputFileType = AVFileTypeMPEG4
exporter!.exportAsynchronouslyWithCompletionHandler({
self.exportDidFinish(exporter!)
})
After consulting others on SO and performing more web research, it seems like this is not possible. Merging an image with a video into a master video that is playable out of an app seems to require AVVideoCompositionCoreAnimationTool.