AVAudioMix audio ducking not working - ios

I'm trying to do audio editing on an AVMutableComposition that I have build.
var commentaryTimeRange = CMTimeRange(start: commentaryItem.startTimeInTimeline, duration: commentaryItem.timeRange.duration)
if CMTimeCompare(CMTimeRangeGetEnd(commentaryTimeRange), composition.duration) == 1 {
commentaryTimeRange.duration = CMTimeSubtract(composition.duration, commentaryTimeRange.start);
commentaryItem.timeRange = commentaryTimeRange
}
// Add the commentary track
let compositionCommentaryTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let track = commentaryItem.asset.tracks(withMediaType: AVMediaTypeAudio).first!
try! compositionCommentaryTrack.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration:commentaryTimeRange.duration), of: track, at: commentaryTimeRange.start)
let tracksToDuck = composition.tracks(withMediaType: AVMediaTypeAudio)
var trackMixArray = [AVMutableAudioMixInputParameters]()
let rampDuration = CMTime(seconds: 1, preferredTimescale: 2)
for track in tracksToDuck {
let trackMix = AVMutableAudioMixInputParameters(track: track)
trackMix.setVolumeRamp(fromStartVolume: 1.0, toEndVolume: 0.2, timeRange: CMTimeRange(start: CMTimeSubtract(commentaryTimeRange.start, rampDuration), duration: CMTimeSubtract(commentaryTimeRange.duration, rampDuration)))
trackMix.setVolumeRamp(fromStartVolume: 0.2, toEndVolume: 1.0, timeRange: CMTimeRange(start: CMTimeRangeGetEnd(commentaryTimeRange), duration: rampDuration))
trackMixArray.append(trackMix)
}
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = trackMixArray
Basically I'm truing to add a commentary on a video track by ducking the original volume.
The audio is correctly mixed in the output, but audio instructions seem to ignored.
Of course the audiomix is passed to the AVPlayerItem, from debugging I can see that all the instructions are there and correctly passed to it.
func makePlayable() -> AVPlayerItem {
let playerItem = AVPlayerItem(asset: composition.copy() as! AVAsset, automaticallyLoadedAssetKeys: NewsPlayerViewController.assetKeysRequiredToPlay)
playerItem.videoComposition = videoComposition
playerItem.audioMix = audioMix?.copy() as! AVAudioMix?
if let overlayLayer = overlayLayer {
let syncLayer = AVSynchronizedLayer(playerItem: playerItem)
syncLayer.addSublayer(overlayLayer)
playerItem.syncLayer = syncLayer
}
return playerItem
}
I've found some answers that indicate as reason the lack of a track identifiers, or a sort of mismatch between composition that has one and a track that hasn't.
My composition doesn't use any track id, plus the AVEdit sample code from Apple doesn't use them and it works.

The solution was simply to count the tracks to duck BEFORE adding the commentary track.
let tracksToDuck = composition.tracks(withMediaType: AVMediaTypeAudio)// <- MOVE HERE, AT THE TOP
var commentaryTimeRange = CMTimeRange(start: commentaryItem.startTimeInTimeline, duration: commentaryItem.timeRange.duration)
if CMTimeCompare(CMTimeRangeGetEnd(commentaryTimeRange), composition.duration) == 1 {
commentaryTimeRange.duration = CMTimeSubtract(composition.duration, commentaryTimeRange.start);
commentaryItem.timeRange = commentaryTimeRange
}
// Add the commentary track
let compositionCommentaryTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let track = commentaryItem.asset.tracks(withMediaType: AVMediaTypeAudio).first!
try! compositionCommentaryTrack.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration:commentaryTimeRange.duration), of: track, at: commentaryTimeRange.start)
var trackMixArray = [AVMutableAudioMixInputParameters]()
let rampDuration = CMTime(seconds: 1, preferredTimescale: 2)
for track in tracksToDuck {
let trackMix = AVMutableAudioMixInputParameters(track: track)
trackMix.setVolumeRamp(fromStartVolume: 1.0, toEndVolume: 0.2, timeRange: CMTimeRange(start: CMTimeSubtract(commentaryTimeRange.start, rampDuration), duration: CMTimeSubtract(commentaryTimeRange.duration, rampDuration)))
trackMix.setVolumeRamp(fromStartVolume: 0.2, toEndVolume: 1.0, timeRange: CMTimeRange(start: CMTimeRangeGetEnd(commentaryTimeRange), duration: rampDuration))
trackMixArray.append(trackMix)
}
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = trackMixArray

Related

AVAssetExportSession -how to trim millisecond from video duration

How can I trim milliseconds from a videoUrl when using AVAssetExportSession? I'm using the below code which gives the final video a duration like 15.233333334 seconds or 17.9333333334 seconds depending on the number of assets and their time frames. Once they are all added together, I want to trim the mixComposition to 15 seconds, 17 seconds, etc.
AVMutableComposition:
let mixComposition = AVMutableComposition()
let compositionVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let soundtrackTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
var insertTime = CMTime.zero
for videoAsset in videoAssets {
do {
let videoTrack = videoAsset.tracks(withMediaType: .video)
try compositionVideoTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: videoAsset.duration), videoAsset.tracks(withMediaType: .video)[0], at: insertTime)
let audioTrack = videoAsset.tracks(withMediaType: .audio)
try soundtrackTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: videoAsset.duration), videoAsset.tracks(withMediaType: .audio)[0], at: insertTime)
insertTime = CMTimeAdd(insertTime, videoAsset.duration)
} catch {
}
}
AVAssetExportSession:
guard let exporter = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) else { return }
let start = CMTimeMakeWithSeconds(.zero, preferredTimescale: 600)
let videoDuration = CMTimeMakeWithSeconds(mixComposition.duration, preferredTimescale: 600)
let range = CMTimeRangeMake(start: start, duration: videoDuration)
exporter.timeRange = range
// ...
I found an easy was to do this. There is a built in method named trunc() that truncates any remainder and only leaves a whole number.
If using a AVMutableComposition():
let mixComposition = AVMutableComposition()
// ...
let videoDuration = CMTimeGetSeconds(mixComposition.duration)
let dub = Double(videoDuration)
let durationTruncated = trunc(dub)
print(".......truncate: ", durationTruncated)
let duration = CMTimeMakeWithSeconds(durationTruncated, preferredTimescale: 600)
let start = CMTimeMakeWithSeconds(.zero, preferredTimescale: 600)
let range = CMTimeRangeMake(start: start, duration: duration)
exporter.timeRange = range
If using a url from the photoLibrary:
let asset = AVURLAsset(url: yourLibraryUrl) // if this isn't a libraryUrl you will need to run it through asset.loadValuesAsynchronously and use the "duration" asset key to get the duration first
let videoDuration = CMTimeGetSeconds(asset.duration)
let dub = Double(videoDuration)
let durationTruncated = trunc(dub)
print(".......truncate: ", durationTruncated)
let duration = CMTimeMakeWithSeconds(durationTruncated, preferredTimescale: 600)
let start = CMTimeMakeWithSeconds(.zero, preferredTimescale: 600)
let range = CMTimeRangeMake(start: start, duration: duration)
exporter.timeRange = range

Trouble applying scaleTimeRange on multiple videos in a AVMutableComposition video

I am attempting to merge videos with scaleTimeRanges (to make them slo-mo or speed-up); however, it is not working as desired. Only the first video has the timerange effect... not all of them.
The work is done in the merge videos function; it is pretty simple... however I am not sure why the scaling of the time range is not working for only the first video and not the next ones...
This is a test project to test with, it has my current code: https://github.com/meyesyesme/creationMergeProj
This is the merge function I use, with the time range scaling currently commented out (you can uncomment to see it not working):
func mergeVideosTestSQ(arrayVideos:[VideoSegment], completion:#escaping (URL?, Error?) -> ()) {
let mixComposition = AVMutableComposition()
var instructions: [AVMutableVideoCompositionLayerInstruction] = []
var insertTime = CMTime(seconds: 0, preferredTimescale: 1)
print(arrayVideos, "<- arrayVideos")
/// for each URL add the video and audio tracks and their duration to the composition
for videoSegment in arrayVideos {
let sourceAsset = AVAsset(url: videoSegment.videoURL!)
let frameRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: sourceAsset.duration)
guard
let nthVideoTrack = mixComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)),
let nthAudioTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)), //0 used to be kCMPersistentTrackID_Invalid
let assetVideoTrack = sourceAsset.tracks(withMediaType: .video).first
else {
print("didnt work")
return
}
var assetAudioTrack: AVAssetTrack?
assetAudioTrack = sourceAsset.tracks(withMediaType: .audio).first
print(assetAudioTrack, ",-- assetAudioTrack???", assetAudioTrack?.asset, "<-- hes", sourceAsset)
do {
try nthVideoTrack.insertTimeRange(frameRange, of: assetVideoTrack, at: insertTime)
try nthAudioTrack.insertTimeRange(frameRange, of: assetAudioTrack!, at: insertTime)
//MY CURRENT SPEED ATTEMPT:
let newDuration = CMTimeMultiplyByFloat64(frameRange.duration, multiplier: videoSegment.videoSpeed)
nthVideoTrack.scaleTimeRange(frameRange, toDuration: newDuration)
nthAudioTrack.scaleTimeRange(frameRange, toDuration: newDuration)
print(insertTime.value, "<-- fiji, newdur --->", newDuration.value, "sourceasset duration--->", sourceAsset.duration.value, "frameRange.duration -->", frameRange.duration.value)
//instructions:
let nthInstruction = ViewController.videoCompositionInstruction(nthVideoTrack, asset: sourceAsset)
nthInstruction.setOpacity(0.0, at: CMTimeAdd(insertTime, newDuration)) //sourceasset.duration
instructions.append(nthInstruction)
insertTime = insertTime + newDuration //sourceAsset.duration
} catch {
DispatchQueue.main.async {
print("didnt wor2k")
}
}
}
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRange(start: CMTime(seconds: 0, preferredTimescale: 1), duration: insertTime)
mainInstruction.layerInstructions = instructions
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = CGSize(width: 1080, height: 1920)
let outputFileURL = URL(fileURLWithPath: NSTemporaryDirectory() + "merge.mp4")
//below to clear the video form docuent folder for new vid...
let fileManager = FileManager()
try? fileManager.removeItem(at: outputFileURL)
print("<now will export: 🔥 🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥🔥")
/// try to start an export session and set the path and file type
if let exportSession = AVAssetExportSession(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality) { //DOES NOT WORK WITH AVAssetExportPresetPassthrough
exportSession.outputFileType = .mov
exportSession.outputURL = outputFileURL
exportSession.videoComposition = mainComposition
exportSession.shouldOptimizeForNetworkUse = true
/// try to export the file and handle the status cases
exportSession.exportAsynchronously {
if let url = exportSession.outputURL{
completion(url, nil)
}
if let error = exportSession.error {
completion(nil, error)
}
}
}
}
You'll see this behavior: the first one is working well, but then the next videos do not and have issues with when they were set opacity, etc... I have tried different combinations and this is the closest one yet.
I've been stuck on this for a while!
After you scale the video, duration of the composition gets recalculated, so you need to append the next part according to this change. Replace
insertTime = insertTime + duration
with
insertTime = insertTime + newDuration
You also need to update setOpacity at value, I'd advise you to move that line after insertTime update and use new value, to remove duplicate work here
When you're applying scale, it's applied to new composition, so you need to use relative range:
let currentRange = CMTimeRange(start: insertTime, duration: frameRange.duration)
nthVideoTrack.scaleTimeRange(currentRange, toDuration: newDuration)
nthAudioTrack.scaleTimeRange(currentRange, toDuration: newDuration)

Loop audio for certain duration on iOS

I am wondering what the best solution to looping audio for a defined duration on iOS is.
I am currently playing around with
AVAudioPlayer (where I can define a repeat count but can't define an end-time)
AVPlayer (where I can define a forwardPlaybackEndTime bot not a loop count)
AVPlayerLooper (that I don't yet fully understand)
So what I need is to define a duration for which a certain sound-file is repeated. F.e. I have a 8 second mp3 and want to play it for f.e one minute.
What would also be suuuuper great, is if I could cross-fade when it starts over again.
You were on the right track with AVPlayerLooper.
This is how you setup AVPlayerLooper
var playerLooper: AVPlayerLooper!
var player: AVQueuePlayer!
func play(_ url: URL) {
let asset = AVAsset(url: url)
let playerItem = AVPlayerItem(asset: asset)
player = AVQueuePlayer(playerItem: playerItem)
playerLooper = AVPlayerLooper(player: player, templateItem: playerItem)
player.play()
}
To stop the loop after a set amount of time you can use addBoundaryTimeObserver(forTimes:queue:using:)
For example:
let assetDuration = CMTimeGetSeconds(asset.duration)
let maxDuration = 60.0 // Define max duration
let maxLoops = floor(maxDuration / assetDuration)
let lastLoopDuration = maxDuration - (assetDuration * maxLoops)
let boundaryTime = CMTimeMakeWithSeconds(lastLoopDuration, preferredTimescale: 1)
let boundaryTimeValue = NSValue(time: boundaryTime)
player.addBoundaryTimeObserver(forTimes: [boundaryTimeValue], queue: DispatchQueue.main) { [weak self] in
if self?.playerLooper.loopCount == Int(maxLoops) {
self?.player.pause()
}
}
For fading in/out you have to set the audioMix property to your AVPlayerItem instance before using it.
let introRange = CMTimeRangeMake(start: CMTimeMakeWithSeconds(0, preferredTimescale: 1), duration: CMTimeMakeWithSeconds(1, preferredTimescale: 1))
let endingSecond = CMTimeRangeMake(start: CMTimeMakeWithSeconds(assetDuration - 1, preferredTimescale: 1), duration: CMTimeMakeWithSeconds(1, preferredTimescale: 1))
let inputParams = AVMutableAudioMixInputParameters(track: asset.tracks.first! as AVAssetTrack)
inputParams.setVolumeRamp(fromStartVolume: 0, toEndVolume: 1, timeRange: introRange)
inputParams.setVolumeRamp(fromStartVolume: 1, toEndVolume: 0, timeRange: endingSecond)
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = [inputParams]
playerItem.audioMix = audioMix
Complete function:
func play(_ url: URL) {
let asset = AVAsset(url: url)
let playerItem = AVPlayerItem(asset: asset)
let assetDuration = CMTimeGetSeconds(asset.duration)
let introRange = CMTimeRangeMake(start: CMTimeMakeWithSeconds(0, preferredTimescale: 1), duration: CMTimeMakeWithSeconds(1, preferredTimescale: 1))
let endingSecond = CMTimeRangeMake(start: CMTimeMakeWithSeconds(assetDuration - 1, preferredTimescale: 1), duration: CMTimeMakeWithSeconds(1, preferredTimescale: 1))
let inputParams = AVMutableAudioMixInputParameters(track: asset.tracks.first! as AVAssetTrack)
inputParams.setVolumeRamp(fromStartVolume: 0, toEndVolume: 1, timeRange: introRange)
inputParams.setVolumeRamp(fromStartVolume: 1, toEndVolume: 0, timeRange: endingSecond)
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = [inputParams]
playerItem.audioMix = audioMix
player = AVQueuePlayer(playerItem: playerItem)
playerLooper = AVPlayerLooper(player: player, templateItem: playerItem)
player.play()
let maxDuration = 60.0 // Define max duration
let maxLoops = floor(maxDuration / assetDuration)
let lastLoopDuration = maxDuration - (assetDuration * maxLoops)
let boundaryTime = CMTimeMakeWithSeconds(lastLoopDuration, preferredTimescale: 1)
let boundaryTimeValue = NSValue(time: boundaryTime)
player.addBoundaryTimeObserver(forTimes: [boundaryTimeValue], queue: DispatchQueue.main) { [weak self] in
if self?.playerLooper.loopCount == Int(maxLoops) {
self?.player.pause()
}
}
}

Exporting a mirrored video with AVMutableComposition causes resizing issues

Everything works as expected if I turn off the mirroring on the front camera. However, if I turn it on, my final exported video has crucial resizing problems:
This is how I currently manage the mirroring for my videos:
if currentDevice == frontCamera {
if let connection = output.connections.first {
if connection.isVideoMirroringSupported {
connection.automaticallyAdjustsVideoMirroring = false
connection.isVideoMirrored = true //if true, this bug occurs.
}
}
}else {
//disabling photo mirroring on backCamera
if let connection = output.connections.first {
if connection.isVideoMirroringSupported {
connection.automaticallyAdjustsVideoMirroring = false
connection.isVideoMirrored = false
}
}
}
And this is how I export the video:
/// Create AVMutableComposition object. This object will hold the AVMutableCompositionTrack instances.
let mainMutableComposition = AVMutableComposition()
/// Creating an empty video track
let videoTrack = mainMutableComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
do {
//Adding the video track
try videoTrack?.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration: videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaType.video).first!, at: kCMTimeZero)
} catch {
completion(false, nil)
}
/// Adding audio if user wants to.
if withAudio {
do {
//Adding the video track
let audio = videoAsset.tracks(withMediaType: AVMediaType.audio).first
if audio != nil {
let audioTrack = mainMutableComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
try audioTrack?.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration: videoAsset.duration), of: audio!, at: kCMTimeZero)
}
} catch {
completion(false, nil)
}
}
// * MARK - Composition is ready ----------
// Create AVMutableVideoCompositionInstruction
let compositionInstructions = AVMutableVideoCompositionInstruction()
compositionInstructions.timeRange = CMTimeRange(start: kCMTimeZero, duration: videoAsset.duration)
// Create an AvmutableVideoCompositionLayerInstruction
let videoLayerInstruction = AVMutableVideoCompositionLayerInstruction.init(assetTrack: videoTrack!)
videoLayerInstruction.setTransform(videoAssetTrack.preferredTransform, at: kCMTimeZero)
compositionInstructions.layerInstructions = [videoLayerInstruction]
//Add instructions
let videoComposition = AVMutableVideoComposition()
let naturalSize : CGSize = videoAssetTrack.naturalSize
///Rendering image into video
let renderWidth = naturalSize.width
let renderHeight = naturalSize.height
//Assigning instructions and rendering size
videoComposition.renderSize = CGSize(width: renderWidth, height: renderHeight)
videoComposition.instructions = [compositionInstructions]
videoComposition.frameDuration = CMTime(value: 1, timescale: Int32((videoTrack?.nominalFrameRate)!))
//Applying image to instruction
self.applyVideoImage(to: videoComposition, withSize: naturalSize, image: image)
// Getting the output path
let documentsURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first
let outputPath = documentsURL?.appendingPathComponent("lastEditedVideo.mp4")
if FileManager.default.fileExists(atPath: (outputPath?.path)!) {
do {
try FileManager.default.removeItem(atPath: (outputPath?.path)!)
}
catch {
completion(false, nil)
}
}
// Create exporter
let exporter = NextLevelSessionExporter(withAsset: mainMutableComposition)
exporter.outputURL = outputPath
exporter.outputFileType = AVFileType.mp4
exporter.videoComposition = videoComposition
let compressionDict: [String: Any] = [
AVVideoAverageBitRateKey: NSNumber(integerLiteral: 2300000),
AVVideoProfileLevelKey: AVVideoProfileLevelH264BaselineAutoLevel as String
]
exporter.videoOutputConfiguration = [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: NSNumber(integerLiteral: Int(naturalSize.width)),
AVVideoHeightKey: NSNumber(integerLiteral: Int(naturalSize.height)),
AVVideoCompressionPropertiesKey: compressionDict
]
exporter.audioOutputConfiguration = [
AVFormatIDKey: kAudioFormatMPEG4AAC,
AVEncoderBitRateKey: NSNumber(integerLiteral: 128000),
AVNumberOfChannelsKey: NSNumber(integerLiteral: 2),
AVSampleRateKey: NSNumber(value: Float(44100))
]
completion(true, exporter)
}
I'm using the NextLevelSessionExporter to export the video. It doesn't matter if I use the default exporter or not, the resizing problems still occur.
There is an active bug that prevents you from exporting mirrored videos correctly. You need a few workarounds:
Turn off the mirroring on the movieOutputFile
Manually flip the video horizontally when needed:
if needsMirroring == true {
var transform:CGAffineTransform = CGAffineTransform(scaleX: -1.0, y: 1.0)
transform = transform.translatedBy(x: -naturalSize.width, y: 0.0)
transform = transform.rotated(by: CGFloat(Double.pi/2))
transform = transform.translatedBy(x: 0.0, y: -naturalSize.width)
videoTransform = transform
}
It took me days to figure this out, hope it helps.

How to add a Fade In and Fade Out effect on Video AVAsset in Swift3 iOS

I am developing a Video application in Swift3. Where I have to convert any text to Video and then have to add a Fade In and Fade Out effect and post the Fade effect Video to server. I don't have to use any Third Party Library for Fade effect.
I can able to convert my Text to a Video, My problem is how can I add Fade In and Fade Out on Video AVAsset.
Can anyone suggest me to achieve this. I cannot find any recent answers to this problem. Thanks for any help!
Fade Out effect
let parentLayer = CALayer()
let fadeOut = CABasicAnimation(keyPath: "opacity")
fadeOut.fromValue = 1.0
fadeOut.toValue = 0.0
fadeOut.duration = 5.0//This will video duration
fadeOut.setValue("video", forKey:"fadeOut")
fadeOut.isRemovedOnCompletion = false
fadeOut.fillMode = CAMediaTimingFillMode.forwards
parentLayer.add(fadeOut, forKey: "opacity")
Fade in effect
fadeIn.fromValue = 0.0
fadeIn.toValue = 1.0
Add to your player
self.playerView?.playerLayer?.add(fadeOut, forKey: nil)
Add to your assets
var startTime = CMTime.zero
var timeDuration = CMTimeMake(value: 3, timescale: 1)
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
//MARK: Fade in effect
layerInstruction.setOpacityRamp(fromStartOpacity: 0.0, toEndOpacity: 1.0, timeRange: CMTimeRange(start: startTime, duration: timeDuration))
//MARK: Fade out effect
startTime = CMTimeSubtract(mutableComposition.duration, CMTimeMake(value: 3, timescale: 1))
timeDuration = CMTimeMake(value: 3, timescale: 1)
layerInstruction.setOpacityRamp(
fromStartOpacity: 1.0,
toEndOpacity: 0.0,
timeRange: CMTimeRangeMake(start: startTime, duration: timeDuration)
)
AVVideoCompositionLayerInstruction
An array of instances of AVVideoCompositionLayerInstruction that specify how video frames from source tracks should be layered and composed.
AVMutableVideoCompositionInstruction
An AVVideoComposition object maintains an array of instructions to perform its composition.
Example Swift4:
I merged videos with fade-in and fade-out effect and change sequence of audio
func doMerge(arrayVideos:[AVAsset], arrayAudios:[AVAsset], animation:Bool, completion:#escaping Completion) -> Void {
var insertTime = kCMTimeZero
var audioInsertTime = kCMTimeZero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayVideos {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
outputSize = videoSize
}
// Init composition
let mixComposition = AVMutableComposition.init()
for index in 0..<arrayVideos.count {
// Get video track
guard let videoTrack = arrayVideos[index].tracks(withMediaType: AVMediaType.video).first else { continue }
// Get audio track
var audioTrack:AVAssetTrack?
if index < arrayAudios.count {
if arrayAudios[index].tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = arrayAudios[index].tracks(withMediaType: AVMediaType.audio).first
}
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = kCMTimeZero
let duration = arrayVideos[index].duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration), of: videoTrack, at: insertTime)
// Add audio track to audio composition at specific time
var audioDuration = kCMTimeZero
if index < arrayAudios.count {
audioDuration = arrayAudios[index].duration
}
if let audioTrack = audioTrack {
do {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, audioDuration), of: audioTrack, at: audioInsertTime)
}
catch {
print(error.localizedDescription)
}
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!, asset: arrayVideos[index], standardSize: outputSize, atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = arrayVideos[index].duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp (fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
audioInsertTime = CMTimeAdd(audioInsertTime, audioDuration)
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
}
})
}

Resources