Swift AVFoundation stitching multiple videos together and keep preferred transform - ios

I'm trying to stitch multiple video clips together. If I stitch each AVAsset in one AVMutableCompositionTrack it works but loses the transformation on the first asset by appending another one with enabled mirroring mode for front facing camera. Can I somehow use multiple AVMutableCompositionTrack of type video in one AVMutableComposition?
// create mix composition
let mixComposition = AVMutableComposition()
// insert video track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
// keep track of total duration
var totalDuration = kCMTimeZero
// for each video clip add to mutable composition and transform each video layer
for (index, videoClip) in videoClips.enumerated() {
if let videoAsset = videoClip.asset, let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first {
// insert current video track to composition
try videoCompositionTrack!.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAssetTrack, at: totalDuration)
videoCompositionTrack?.preferredTransform = videoAssetTrack.preferredTransform
// shift duration to next
totalDuration = CMTimeAdd(totalDuration, videoAsset.duration)
}
}
// Use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1920x1080)
assetExport?.outputFileType = AVFileType.mp4
// get needed save url to save the video to recommended url
let movieDestinationUrl = self.getRecommendedSaveUrl()
// seting up asset export session
assetExport?.outputURL = movieDestinationUrl
assetExport?.shouldOptimizeForNetworkUse = true
// export video to file system asyc
assetExport?.exportAsynchronously(completionHandler: {
assetExport?.cancelExport()
switch assetExport!.status {
case AVAssetExportSessionStatus.failed:
break
case AVAssetExportSessionStatus.cancelled:
break
default:
DispatchQueue.main.async {
completion?(movieDestinationUrl, nil)
}
}
if ((assetExport?.error) != nil) {
AppDelegate.logger.error("Could not create user video: \((assetExport?.error)!)")
DispatchQueue.main.async {
completion?(nil, assetExport?.error)
}
}
})
I'm trying to use something like this and multiple AVMutableCompositionTrack's with different CGAffineTransform objects.
// create mix composition
let mixComposition = AVMutableComposition()
// keep track of total duration
var totalDuration = kCMTimeZero
// for each video clip add to mutable composition and transform each video layer
for (index, videoClip) in videoClips.enumerated() {
if let videoAsset = videoClip.asset, let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first {
// insert video track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(index))
// insert current video track to composition
try videoCompositionTrack!.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoAsset.duration), of: videoAssetTrack, at: totalDuration)
videoCompositionTrack?.preferredTransform = videoAssetTrack.preferredTransform
// shift duration to next
totalDuration = CMTimeAdd(totalDuration, videoAsset.duration)
}
}
// Use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1920x1080)
assetExport?.outputFileType = AVFileType.mp4
// get needed save url to save the video to recommended url
let movieDestinationUrl = self.getRecommendedSaveUrl()
// seting up asset export session
assetExport?.outputURL = movieDestinationUrl
assetExport?.shouldOptimizeForNetworkUse = true
// export video to file system asyc
assetExport?.exportAsynchronously(completionHandler: {
assetExport?.cancelExport()
switch assetExport!.status {
case AVAssetExportSessionStatus.failed:
break
case AVAssetExportSessionStatus.cancelled:
break
default:
DispatchQueue.main.async {
completion?(movieDestinationUrl, nil)
}
}
if ((assetExport?.error) != nil) {
AppDelegate.logger.error("Could not create user video: \((assetExport?.error)!)")
DispatchQueue.main.async {
completion?(nil, assetExport?.error)
}
}
})
In the case above I'm not able to get any useable video: it is much shorter than it should be. I'm trying to avoid using any AVMutableVideoCompositionInstruction because it takes too long to process but it would still be an option if it worked for any resolution and especially with mirroring support.
// create mix composition
let mixComposition = AVMutableComposition()
// keep track of total duration
var totalDuration = kCMTimeZero
// keeps all layer transformations for each video asset
var videoCompositionLayerInstructions = [AVMutableVideoCompositionLayerInstruction]()
// for each video clip add to mutable composition and transform each video layer
for (index, videoClip) in videoClips.enumerated() {
if let videoAsset = videoClip.asset {
// use first video asset track for setting like height and width
let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first!
// insert video trakc
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: CMPersistentTrackID(index))
// insert current video track to composition
try videoCompositionTrack!.insertTimeRange(CMTimeRangeMake(totalDuration, videoAssetTrack.timeRange.duration), of: videoAssetTrack, at: totalDuration)
videoCompositionTrack?.preferredTransform = videoAssetTrack.preferredTransform
let videoCompositionLayerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoAssetTrack)
videoCompositionLayerInstruction.setTransform((videoCompositionTrack?.preferredTransform)!, at: totalDuration)
videoCompositionLayerInstruction.setOpacity(0.0, at: videoAsset.duration)
// apply instruction
videoCompositionLayerInstructions.append(videoCompositionLayerInstruction)
// shift duration to next
totalDuration = CMTimeAdd(totalDuration, videoAsset.duration)
}
}
let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
videoCompositionInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, totalDuration)
videoCompositionInstruction.layerInstructions = videoCompositionLayerInstructions
let mainComposition = AVMutableVideoComposition()
mainComposition.renderSize = CGSize(width: 1080, height: 1920)
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.instructions = [videoCompositionInstruction]
// Use AVAssetExportSession to export video
let assetExport = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPreset1920x1080)
assetExport?.outputFileType = AVFileType.mp4
// get needed save url to save the video to recommended url
let movieDestinationUrl = self.getRecommendedSaveUrl()
// seting up asset export session
assetExport?.outputURL = movieDestinationUrl
assetExport?.shouldOptimizeForNetworkUse = true
assetExport?.videoComposition = mainComposition
Anybody an idea how to implement this functionality?
Note: I don't need to care about audio at all.

Related

Trouble applying scaleTimeRange on multiple videos in a AVMutableComposition video

I am attempting to merge videos with scaleTimeRanges (to make them slo-mo or speed-up); however, it is not working as desired. Only the first video has the timerange effect... not all of them.
The work is done in the merge videos function; it is pretty simple... however I am not sure why the scaling of the time range is not working for only the first video and not the next ones...
This is a test project to test with, it has my current code: https://github.com/meyesyesme/creationMergeProj
This is the merge function I use, with the time range scaling currently commented out (you can uncomment to see it not working):
func mergeVideosTestSQ(arrayVideos:[VideoSegment], completion:#escaping (URL?, Error?) -> ()) {
let mixComposition = AVMutableComposition()
var instructions: [AVMutableVideoCompositionLayerInstruction] = []
var insertTime = CMTime(seconds: 0, preferredTimescale: 1)
print(arrayVideos, "<- arrayVideos")
/// for each URL add the video and audio tracks and their duration to the composition
for videoSegment in arrayVideos {
let sourceAsset = AVAsset(url: videoSegment.videoURL!)
let frameRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: sourceAsset.duration)
guard
let nthVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let nthAudioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), //0 used to be kCMPersistentTrackID_Invalid
let assetVideoTrack = sourceAsset.tracks(withMediaType: .video).first
else {
print("didnt work")
return
}
var assetAudioTrack: AVAssetTrack?
assetAudioTrack = sourceAsset.tracks(withMediaType: .audio).first
print(assetAudioTrack, ",-- assetAudioTrack???", assetAudioTrack?.asset, "<-- hes", sourceAsset)
do {
try nthVideoTrack.insertTimeRange(frameRange, of: assetVideoTrack, at: insertTime)
try nthAudioTrack.insertTimeRange(frameRange, of: assetAudioTrack!, at: insertTime)
//MY CURRENT SPEED ATTEMPT:
let newDuration = CMTimeMultiplyByFloat64(frameRange.duration, multiplier: videoSegment.videoSpeed)
nthVideoTrack.scaleTimeRange(frameRange, toDuration: newDuration)
nthAudioTrack.scaleTimeRange(frameRange, toDuration: newDuration)
print(insertTime.value, "<-- fiji, newdur --->", newDuration.value, "sourceasset duration--->", sourceAsset.duration.value, "frameRange.duration -->", frameRange.duration.value)
//instructions:
let nthInstruction = ViewController.videoCompositionInstruction(nthVideoTrack, asset: sourceAsset)
nthInstruction.setOpacity(0.0, at: CMTimeAdd(insertTime, newDuration)) //sourceasset.duration
instructions.append(nthInstruction)
insertTime = insertTime + newDuration //sourceAsset.duration
} catch {
DispatchQueue.main.async {
print("didnt wor2k")
}
}
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: insertTime)
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = CGSize(width: 1080, height: 1920)
let outputFileURL = URL(fileURLWithPath: NSTemporaryDirectory() + "merge.mp4")
//below to clear the video form docuent folder for new vid...
let fileManager = FileManager()
try? fileManager.removeItem(at: outputFileURL)
print("<now will export: πŸ”₯ πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯πŸ”₯")
/// try to start an export session and set the path and file type
if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) { //DOES NOT WORK WITH AVAssetExportPresetPassthrough
exportSession.outputFileType = .mov
exportSession.outputURL = outputFileURL
exportSession.videoComposition = mainComposition
exportSession.shouldOptimizeForNetworkUse = true
/// try to export the file and handle the status cases
exportSession.exportAsynchronously {
if let url = exportSession.outputURL{
completion(url, nil)
}
if let error = exportSession.error {
completion(nil, error)
}
}
}
}
You'll see this behavior: the first one is working well, but then the next videos do not and have issues with when they were set opacity, etc... I have tried different combinations and this is the closest one yet.
I've been stuck on this for a while!
After you scale the video, duration of the composition gets recalculated, so you need to append the next part according to this change. Replace
insertTime = insertTime + duration
with
insertTime = insertTime + newDuration
You also need to update setOpacity at value, I'd advise you to move that line after insertTime update and use new value, to remove duplicate work here
When you're applying scale, it's applied to new composition, so you need to use relative range:
let currentRange = CMTimeRange(start: insertTime, duration: frameRange.duration)
nthVideoTrack.scaleTimeRange(currentRange, toDuration: newDuration)
nthAudioTrack.scaleTimeRange(currentRange, toDuration: newDuration)

Missing Some Videos When Stitching Using AVFoundations

I'm trying to merge multiple videos using AVFoundation. It works fine when I only add two videos. But when I'm trying to add 3 videos, the 2nd video track is empty but the total duration of the exported videos is correct.
Here is my sample code:
The code for adding videos:
func addVideo(videoAsset: AVAsset, isLast: Bool = false)
{
// Add video track
guard let videoTrack = self.mixComposition.addMutableTrack(
withMediaType: .video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
else { return }
do
{
try videoTrack.insertTimeRange(
CMTimeRangeMake(start: .zero, duration: videoAsset.duration),
of: videoAsset.tracks(withMediaType: .video)[0],
at: totalVideoTrackDuration)
// Add instruction for this track
// video helper fixes the video orientation
let instruction = VideoHelper.videoCompositionInstruction(videoTrack, asset: videoAsset)
if !isLast
{
// hide this clip when its done rendering
instruction.setOpacity(0.0, at: videoAsset.duration)
}
// add to layer instruction
self.instructions.append(instruction)
// get the sum of all added track durations
self.totalVideoTrackDuration = CMTimeAdd(self.totalVideoTrackDuration, videoAsset.duration)
}
catch
{
print("Failed to load track")
return
}
}
Here is the code for exporting videos:
func export()
{
// 6
let mainInstruction = AVMutableVideoCompositionInstruction()
// set time range
mainInstruction.timeRange = CMTimeRangeMake(
start: .zero,
duration: self.totalVideoTrackDuration
)
mainInstruction.layerInstructions = self.instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: self.timeScale)
mainComposition.renderSize = self.videoSize
guard let documentDirectory = FileManager.default.urls(
for: .documentDirectory,
in: .userDomainMask).first
else { return }
// file name
let dateFormatter = DateFormatter()
dateFormatter.dateStyle = .long
dateFormatter.timeStyle = .short
let date = Date().format(format: "mm-dd-yy-HH-mm-ss")!
let url = documentDirectory.appendingPathComponent("mergeVideo-\(date).mov")
guard let exporter = AVAssetExportSession(
asset: self.mixComposition,
presetName: AVAssetExportPresetHighestQuality)
else { return }
exporter.outputURL = url
exporter.outputFileType = AVFileType.mov
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = mainComposition
exporter.exportAsynchronously {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.
creationRequestForAssetFromVideo(
atFileURL: exporter.outputURL!)
}){
saved, error in
if saved
{
print("Export successful")
}
else
{
print("video erro: \(error)")
}
}
}
}
Here is how I call the addVideo function:
// 4-second video
self.addVideo(videoAsset: asset1)
// 3-second video
self.addVideo(videoAsset: asset2)
// 4-second video
self.addVideo(videoAsset: asset3, isLast: true)
// export
self.export()
// the total duration of the exported video is 11 seconds. But the middle part, is blank.
It turns out the problematic line is:
if !isLast
{
// hide this clip when its done rendering
instruction.setOpacity(0.0, at: videoAsset.duration)
}
// get the sum of all added track durations
self.totalVideoTrackDuration = CMTimeAdd(self.totalVideoTrackDuration, videoAsset.duration)
It should be:
// get the sum of all added track durations
self.totalVideoTrackDuration = CMTimeAdd(self.totalVideoTrackDuration, videoAsset.duration)
if !isLast
{
instruction.setOpacity(0.0, at: self.totalVideoTrackDuration)
}
The documentation for setOpacity is a little bit confusing. I thought the at parameter is the time within that specific video time range.

Swift - Workaround/Alternative to M3u8 to play mp4 segment or merge segments into mp4

I used AVAssetExportSession to download a session URL but the issue that you can't download live stream so to get around it, the live stream is split into 10 seconds mp4 segments that are downloaded using an m3u8 to create the URLs. I then use AVAssetExportSession to merge those mp4 segments.
I can merge those clips one by one into one mp4 file which is what I want but as the file gets bigger, the longer it takes as I am dealing with thousands of segments which becomes unpractical.
I thought about using AVplayerLooper but I cannot scrub, rewind or forward through the mp4 segment like a single video.
Is there a way to combine the mp4 clips together to play as one video as the m3u8 does without merging? or is there a fast way to merge videos?
Note: The server uses FFmpeg but I am not allowed to use FFmpeg or pods in the app.
below is the function to merge videos
var mp4Array: [AVAsset] = []
var avAssetExportSession: AVAssetExportSession?
var firstAsset: AVAsset?
var secondAsset: AVAsset?
func mergeVideos() {
firstAsset = mp4Array.first
secondAsset = mp4Array[1]
guard let firstAsset = firstAsset, let secondAsset = secondAsset else { return }
let mixComposition = AVMutableComposition()
guard let firstTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {return}
do {
try firstTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: firstAsset.duration),
of: firstAsset.tracks(withMediaType: .video)[0],
at: CMTime.zero)
} catch {
print("Couldn't load track 1")
return
}
guard let secondTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {return}
do {
try secondTrack.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: secondAsset.duration),
of: secondAsset.tracks(withMediaType: .video)[0],
at: firstAsset.duration)
} catch {
print("couldn't load track 2")
return
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: CMTimeAdd(firstAsset.duration, secondAsset.duration))
let firstAssetInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: firstTrack)
firstAssetInstruction.setOpacity(0.0, at: firstAsset.duration)
let secondAssetInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: secondTrack)
mainInstruction.layerInstructions = [firstAssetInstruction, secondAssetInstruction]
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = firstTrack.naturalSize
guard let documentDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else { return }
let url = documentDirectory.appendingPathComponent("MergedVideos/mergeVideo\(videoInt).mp4")
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else {return}
exporter.outputURL = url
exporter.outputFileType = AVFileType.mp4
exporter.shouldOptimizeForNetworkUse = true
exporter.videoComposition = mainComposition
exporter.exportAsynchronously {
if exporter.status == .completed {
let avasset = AVAsset(url:url)
self.mergeUrl = avasset
if self.mp4Array.count > 1{
print("This add the merged video to the front of the mp4array")
self.mp4Array.remove(at: 1)
self.mp4Array.removeFirst()
self.videoInt = self.videoInt + 1
self.mp4Array.append(self.mergeUrl!)
self.mp4Array.bringToFront(item: self.mp4Array.last!)
}
if (self.mp4Array.count > 1){
if self.mergeUrl != nil {
self.mergeVideos()
}
} else {
var numberofvideosdeleted = 0
while (numberofvideosdeleted < self.videoInt - 1){
do {
print("deleting")
let url = documentDirectory.appendingPathComponent("MergedVideos/mergeVideo\(numberofvideosdeleted).mp4")
try FileManager.default.removeItem(at: url)
numberofvideosdeleted = numberofvideosdeleted + 1
} catch {
print("Error removing videos")
}
}
self.deleteCurrentSegementsInFolder()
}
}
}
}
I ended up using FFmpeg Mobile to concatenate the videos and it works really well. Takes around 1 minute to concatenate a 3GB movie file.
Link below to the cocoapod:
https://github.com/tanersener/mobile-ffmpeg

Setting multiple Volumes to each Video tracks using AudioMixInputParameters AVFoundation is not working in Swift iOS

I am working on Video based Application in Swift. As per the requirement I have to select multiple Videos from Device Gallery, setting up different different CIFilter effects and Volume for each Video Asset and then merge all the Videos and have to Save the Final Video. As an output, when I will play the Final Video then Video sound volume should change accordingly.
I have already merged all the selected Video Assets into one with different different CIFilter effects but my problem is when I am trying to set Volume for each Video Clips then it's not working. I am getting the default Volume for my Final Video. Here is my code:
func addFilerEffectAndVolumeToIndividualVideoClip(_ assetURL: URL, video: VideoFileModel, completion : ((_ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?){
let videoFilteredAsset = AVAsset(url: assetURL)
print(videoFilteredAsset)
createVideoComposition(myAsset: videoFilteredAsset, videos: video)
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("\(video.fileID)_\("FilterVideo").mov")
let filePath = url.path
let fileManager = FileManager.default
do {
if fileManager.fileExists(atPath: filePath) {
print("FILE AVAILABLE")
try fileManager.removeItem(atPath:filePath)
} else {
print("FILE NOT AVAILABLE")
}
} catch _ {
}
let composition: AVMutableComposition = AVMutableComposition()
let compositionVideo: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
let compositionAudioVideo: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
//Add video to the final record
do {
try compositionVideo.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoFilteredAsset.duration), of: videoFilteredAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: kCMTimeZero)
} catch _ {
}
//Extract audio from the video and the music
let audioMix: AVMutableAudioMix = AVMutableAudioMix()
var audioMixParam: [AVMutableAudioMixInputParameters] = []
let assetVideoTrack: AVAssetTrack = videoFilteredAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
let videoParam: AVMutableAudioMixInputParameters = AVMutableAudioMixInputParameters(track: assetVideoTrack)
videoParam.trackID = compositionAudioVideo.trackID
//Set final volume of the audio record and the music
videoParam.setVolume(video.videoClipVolume, at: kCMTimeZero)
//Add setting
audioMixParam.append(videoParam)
//Add audio on final record
do {
try compositionAudioVideo.insertTimeRange(CMTimeRangeMake(kCMTimeZero, videoFilteredAsset.duration), of: assetVideoTrack, at: kCMTimeZero)
} catch _ {
assertionFailure()
}
//Fading volume out for background music
let durationInSeconds = CMTimeGetSeconds(videoFilteredAsset.duration)
let firstSecond = CMTimeRangeMake(CMTimeMakeWithSeconds(0, 1), CMTimeMakeWithSeconds(1, 1))
let lastSecond = CMTimeRangeMake(CMTimeMakeWithSeconds(durationInSeconds-1, 1), CMTimeMakeWithSeconds(1, 1))
videoParam.setVolumeRamp(fromStartVolume: 0, toEndVolume: video.videoClipVolume, timeRange: firstSecond)
videoParam.setVolumeRamp(fromStartVolume: video.videoClipVolume, toEndVolume: 0, timeRange: lastSecond)
//Add parameter
audioMix.inputParameters = audioMixParam
// Export part, left for facility
let exporter = AVAssetExportSession(asset: videoFilteredAsset, presetName: AVAssetExportPresetHighestQuality)!
exporter.videoComposition = videoFilterComposition
exporter.outputURL = url
exporter.outputFileType = AVFileTypeQuickTimeMovie
exporter.audioMix = audioMix
exporter.exportAsynchronously(completionHandler: { () -> Void in
completion!(exporter, url)
})
}
After that again I am using a method to merge all the Video Clips using AVAssetExportSession, there I am not setting any AudioMixInputParameters.
Note: When I am setting up volume in final merging method using AVAssetExportSession's AudioMixInputParameters, then Volume is getting change for full Video.
My question: Is it possible to set multiple volume for each Video Clips. Please suggest. Thank you!
Here is the working solution for my question:
func addVolumeToIndividualVideoClip(_ assetURL: URL, video: VideoFileModel, completion : ((_ session: AVAssetExportSession?, _ outputURL : URL?) -> ())?){
//Create Asset from Url
let filteredVideoAsset: AVAsset = AVAsset(url: assetURL)
video.fileID = String(video.videoID)
//Get the path of App Document Directory
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let url = URL(fileURLWithPath: documentDirectory).appendingPathComponent("\(video.fileID)_\("FilterVideo").mov")
let filePath = url.path
let fileManager = FileManager.default
do {
if fileManager.fileExists(atPath: filePath) {
print("FILE AVAILABLE")
try fileManager.removeItem(atPath:filePath)
} else {
print("FILE NOT AVAILABLE")
}
} catch _ {
}
let composition: AVMutableComposition = AVMutableComposition()
let compositionVideo: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID())
let compositionAudioVideo: AVMutableCompositionTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID())
//Add video to the final record
do {
try compositionVideo.insertTimeRange(CMTimeRangeMake(kCMTimeZero, filteredVideoAsset.duration), of: filteredVideoAsset.tracks(withMediaType: AVMediaTypeVideo)[0], at: kCMTimeZero)
} catch _ {
}
//Extract audio from the video and the music
let audioMix: AVMutableAudioMix = AVMutableAudioMix()
var audioMixParam: [AVMutableAudioMixInputParameters] = []
let assetVideoTrack: AVAssetTrack = filteredVideoAsset.tracks(withMediaType: AVMediaTypeAudio)[0]
let videoParam: AVMutableAudioMixInputParameters = AVMutableAudioMixInputParameters(track: assetVideoTrack)
videoParam.trackID = compositionAudioVideo.trackID
//Set final volume of the audio record and the music
videoParam.setVolume(video.videoVolume, at: kCMTimeZero)
//Add setting
audioMixParam.append(videoParam)
//Add audio on final record
//First: the audio of the record and Second: the music
do {
try compositionAudioVideo.insertTimeRange(CMTimeRangeMake(kCMTimeZero, filteredVideoAsset.duration), of: assetVideoTrack, at: kCMTimeZero)
} catch _ {
assertionFailure()
}
//Fading volume out for background music
let durationInSeconds = CMTimeGetSeconds(filteredVideoAsset.duration)
let firstSecond = CMTimeRangeMake(CMTimeMakeWithSeconds(0, 1), CMTimeMakeWithSeconds(1, 1))
let lastSecond = CMTimeRangeMake(CMTimeMakeWithSeconds(durationInSeconds-1, 1), CMTimeMakeWithSeconds(1, 1))
videoParam.setVolumeRamp(fromStartVolume: 0, toEndVolume: video.videoVolume, timeRange: firstSecond)
videoParam.setVolumeRamp(fromStartVolume: video.videoVolume, toEndVolume: 0, timeRange: lastSecond)
//Add parameter
audioMix.inputParameters = audioMixParam
//Remove the previous temp video if exist
let filemgr = FileManager.default
do {
if filemgr.fileExists(atPath: "\(video.fileID)_\("FilterVideo").mov") {
try filemgr.removeItem(atPath: "\(video.fileID)_\("FilterVideo").mov")
} else {
}
} catch _ {
}
//Exporte the final record’
let exporter: AVAssetExportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)!
exporter.outputURL = url
exporter.outputFileType = AVFileTypeMPEG4
exporter.audioMix = audioMix
exporter.exportAsynchronously(completionHandler: { () -> Void in
completion!(exporter, url)
// self.saveVideoToLibrary(from: filePath)
})
}
I found, that exporting an asset with preset of AVAssetExportPresetPassthrough doesn't have an impact on output volume. When I tried to use AVAssetExportPresetLowQuality, volume change successfully applied.
I wish it is better documented somewhere :(
The working code:
// Assume we have:
let composition: AVMutableComposition
var inputParameters = [AVAudioMixInputParameters]()
// We add a track
let trackComposition = composition.addMutableTrack(...)
// Configure volume for this track
let inputParameter = AVMutableAudioMixInputParameters(track: trackComposition)
inputParameter.setVolume(desiredVolume, at: startTime)
// It works even without setting the `trackID`
// inputParameter.trackID = trackComposition.trackID
inputParameters.append(inputParameter)
// Apply gathered `inputParameters` before exporting
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = inputParameters
// I found it's not working, if using `AVAssetExportPresetPassthrough`,
// so try `AVAssetExportPresetLowQuality` first
let export = AVAssetExportSession(..., presetName: AVAssetExportPresetLowQuality)
export.audioMix = audioMix
Tested this with multiple assetTrack insertions to the same compositionTrack, setting different volume for each insertion. Seems to be working.

How to export arbitrary segment of video? "Operation Stopped" error when exporting last X seconds of video

The goal is to export an arbitrary segment of some video (e.g., middle third, last half), but AVAssetExportSession only succeeds if the starting point is the start of the video.
If cmStartTime is not 0, AVAssetExportSession fails with this error:
Failed: Optional(Error Domain=AVFoundationErrorDomain Code=-11841
"Operation Stopped" UserInfo=0x175872d00
{NSLocalizedDescription=Operation Stopped,
NSLocalizedFailureReason=The video could not be composed.}).
// Create main composition & its tracks
let mainComposition = AVMutableComposition()
let compositionVideoTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
let compositionAudioTrack = mainComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: CMPersistentTrackID(kCMPersistentTrackID_Invalid))
// Get source video & audio tracks
let videoURL = NSURL(fileURLWithPath: fileURL)
let videoAsset = AVURLAsset(URL: videoURL, options: nil)
let sourceVideoTrack = videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0]
let sourceAudioTrack = videoAsset.tracksWithMediaType(AVMediaTypeAudio)[0]
// Define time values for video
let timescale = Int32(600)
let cmStartTime = CMTimeMake(Int64(CGFloat(0.5) * CGFloat(timescale)), timescale)
let cmEndTime = CMTimeMake(10, 1)
let timeRange = CMTimeRangeMake(cmStartTime, cmEndTime)
// Add source tracks to composition
do {
try compositionVideoTrack.insertTimeRange(timeRange, ofTrack: sourceVideoTrack, atTime: cmStartTime)
try compositionAudioTrack.insertTimeRange(timeRange, ofTrack: sourceAudioTrack, atTime: cmStartTime)
} catch {
printError("Error with insertTimeRange while exporting video: \(error)")
}
// Create video composition
let renderSize = compositionVideoTrack.naturalSize
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = renderSize
videoComposition.frameDuration = CMTimeMake(Int64(1), Int32(frameRate))
// Add layer instruction to video composition
...
// Apply effects to video
...
// Define export URL
let exportPath = getUniqueTempPath(gMP4File)
let exportURL = NSURL(fileURLWithPath: exportPath)
// Create exporter
let exporter = AVAssetExportSession(asset: mainComposition, presetName: AVAssetExportPresetHighestQuality)!
exporter.videoComposition = videoComposition
exporter.outputFileType = AVFileTypeMPEG4
exporter.outputURL = exportURL
exporter.shouldOptimizeForNetworkUse = true
exporters.append(exporter)
// Export video
exporter.exportAsynchronouslyWithCompletionHandler() {
// Finish stuff
}
The problem arose from not understanding CMTimeRangeMake and insertTimeRange.
The second value of CMTimeRangeMake should be the clip duration, not the end time. So if your start time is the 5 second mark, and the clip lasts 10 seconds, the second value should be 10, not 15.
The atTime parameter of insertTimeRange should be kCMTimeZero since the goal is to create a new clip. In other words, this value says where in the new track to insert the clip from the source track.

Resources