I have a local mp3 audio file that I would like to break up into segments or chapters. I wanted to present all the segments in a list and highlight which segmented is currently playing.
This is what I'm trying:
let asset = AVAsset(url: Bundle.main.url(...)!)
let item = AVPlayerItem(asset: asset)
let meta1 = AVMutableMetadataItem()
meta1.key = AVMetadataKey.quickTimeUserDataKeyChapter as NSCopying & NSObjectProtocol
meta1.value = "Chapter 1" as NSCopying & NSObjectProtocol
let start1 = CMTime(seconds: 0, preferredTimescale: 1)
let end1 = CMTime(seconds: 3, preferredTimescale: 1)
let range1 = CMTimeRange(start: start1, end: end1)
let segment1 = AVTimedMetadataGroup(items: [meta1], timeRange: range1)
let meta2 = AVMutableMetadataItem()
meta2.key = AVMetadataKey.quickTimeUserDataKeyChapter as NSCopying & NSObjectProtocol
meta2.value = "Chapter 2" as NSCopying & NSObjectProtocol
let start2 = CMTime(seconds: 3, preferredTimescale: 1)
let end2 = CMTime(seconds: 8, preferredTimescale: 1)
let range2 = CMTimeRange(start: start2, end: end2)
let segment2 = AVTimedMetadataGroup(items: [meta2], timeRange: range1)
// TODO: Add segments to asset
let player = AVPlayer(playerItem: item)
How do I associated the AVTimedMetadataGroup object to my AVPlayerItem? Is this the correct approach?
Related
I am trying to merge images and video clips together. I kept an option to add animation in between videos and images. There has few options like fade-in, fade-out, rotate, slide-up, slide-down, left, right etc. For images I am able to add animation, but how to add animation for video? Specifically when a video clip is completed and another video clip is going to start that time I want to add animations. Now my merging functionality is working well. Only to add the animation in between the videos.
I have tried with:
instruction.setOpacityRamp(fromStartOpacity: <#T##Float#>, toEndOpacity: <#T##Float#>, timeRange: <#T##CMTimeRange#>)
but this option only showing fade-in/ fade-out effect. But other custom animation options where to add those effect and how?
Here is me source code for merging. Many dependant functions are there in the code. But I have posted only merging functionality code. I have commented with //HERE TO ADD THE ANIMATION. So that you can directly reach that point where I am trying to add animations.
func merge(allAssets: [MovieAssetPresentable], isHDR: Bool, success: #escaping (URL?) -> (Void), progress: #escaping (CGFloat) -> (Void), failed: #escaping (String?) -> (Void)) {
cancelExport()
let defaultSize = isHDR ? self.videoOutputResolution.HD : self.videoOutputResolution.lowQuality
let videoPresetName = self.getPresetName(resolution: defaultSize)
self.mergeSuccess = success
self.mergeError = failed
self.mergeProgress = progress
let mixComposition = AVMutableComposition()
let mainInstruction = AVMutableVideoCompositionInstruction()
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
guard let urlVideoForBackground = Bundle.main.url(forResource: "black", withExtension: "mov") else {
self.mergeError("Need black background video !")
return
}
let assetForBackground = AVAsset(url: urlVideoForBackground)
let trackForBackground = assetForBackground.tracks(withMediaType: AVMediaType.video).first
//Set output size
var outputSize = CGSize.zero
for asset in allAssets.filter({$0.assetType! == .video}) {
guard let videoAsset = asset.asset else { continue }
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
let assetInfo = self.orientationFromTransform(videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = CGSize(width: defaultSize.width, height: ((videoSize.height / videoSize.width) * defaultSize.width))
}
}
if outputSize == CGSize.zero {
outputSize = defaultSize
}
debugPrint("OUTPUT SIZE: \(outputSize)")
let layerContentsGravity = VideoSettings.shared.fetchVideoFitClips()
var layerImages = [CALayer]()
var insertTime = CMTime.zero
var audioMixInputParameters = [AVMutableAudioMixInputParameters]()
// Init Video layer
let videoLayer = CALayer()
videoLayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
videoLayer.contentsGravity = layerContentsGravity
let parentlayer = CALayer()
parentlayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
parentlayer.addSublayer(videoLayer)
for asset in allAssets.filter({$0.assetType! == .image || $0.assetType! == .video}) {
//Video speed level
let videoSpeed = Double(asset.videoSpeedLevel!)
if asset.assetType! == .video {
//Video asset
let ast = asset.asset!
let duration = asset.endTime! - asset.beginTime! //ast.duration
//Create AVMutableCompositionTrack object
guard let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
self.mergeError("Unable to create track.")
continue
}
//Add original video sound track
let originalSoundTrack: AVMutableCompositionTrack?
if asset.asset!.tracks(withMediaType: .audio).count > 0 {
originalSoundTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try originalSoundTrack?.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), of: ast.tracks(withMediaType: AVMediaType.audio)[0], at: insertTime)
} catch {
self.mergeError("Unable to create original audio track.")
continue
}
//Set video original sound track speed
originalSoundTrack?.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
let audioInputParams = AVMutableAudioMixInputParameters(track: originalSoundTrack)
audioInputParams.setVolume(asset.videoOriginalVolume!, at: CMTime.zero)
audioInputParams.trackID = originalSoundTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
}
//Set time range
do {
try track.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration),
of: ast.tracks(withMediaType: AVMediaType.video)[0],
at: insertTime)
} catch let err {
self.mergeError("Failed to load track: \(err.localizedDescription)")
continue
}
//Set video speed
track.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
insertTime = CMTimeAdd(insertTime, duration)
let instruction = self.videoCompositionInstruction(track, asset: ast, outputSize: outputSize)
// let instruction = videoCompositionInstructionForTrack(track: t, asset: ast, standardSize: outputSize, atTime: insertTime)
instruction.setOpacity(0.0, at: insertTime)
//HERE TO ADD THE ANIMATION
layerInstructions.append(instruction)
} else {
//Image data
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let defaultImageTime = CMTimeGetSeconds(asset.endTime!) - CMTimeGetSeconds(asset.beginTime!)
let duration = CMTime.init(seconds:defaultImageTime, preferredTimescale: assetForBackground.duration.timescale)
do {
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: duration),
of: trackForBackground!,
at: insertTime)
}
catch {
self.mergeError("Background time range error")
}
guard let image = UIImage(data: asset.imageData!) else { continue }
// Create Image layer
let imageLayer = CALayer()
imageLayer.frame = CGRect.init(origin: CGPoint.zero, size: outputSize)
imageLayer.contents = image.cgImage
imageLayer.opacity = 0
imageLayer.contentsGravity = layerContentsGravity
self.setOrientation(image: image, onLayer: imageLayer)
// Add Fade in & Fade out animation
let fadeInAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeInAnimation.duration = 1
fadeInAnimation.fromValue = NSNumber(value: 0)
fadeInAnimation.toValue = NSNumber(value: 1)
fadeInAnimation.isRemovedOnCompletion = false
fadeInAnimation.beginTime = CMTimeGetSeconds(insertTime) == 0 ? 0.05: CMTimeGetSeconds(insertTime)
fadeInAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeInAnimation, forKey: "opacityIN")
let fadeOutAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeOutAnimation.duration = 1
fadeOutAnimation.fromValue = NSNumber(value: 1)
fadeOutAnimation.toValue = NSNumber(value: 0)
fadeOutAnimation.isRemovedOnCompletion = false
fadeOutAnimation.beginTime = CMTimeGetSeconds(CMTimeAdd(insertTime, duration))
fadeOutAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeOutAnimation, forKey: "opacityOUT")
layerImages.append(imageLayer)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
}
// Add Image layers
for layer in layerImages {
parentlayer.addSublayer(layer)
}
//Add Water mark if Subscription not activated
if !AddManager.shared.hasActiveSubscription {
let imglogo = UIImage(named: "watermark")
let waterMarklayer = CALayer()
waterMarklayer.contents = imglogo?.cgImage
let sizeOfWaterMark = Utility.getWaterMarkSizeWithVideoSize(videoSize: outputSize, defaultSize: waterMarkSize)
debugPrint("sizeOfWaterMark=\(sizeOfWaterMark)")
waterMarklayer.frame = CGRect(x: outputSize.width - (sizeOfWaterMark.width+10), y: 5, width: sizeOfWaterMark.width, height: sizeOfWaterMark.height)
waterMarklayer.contentsGravity = .resizeAspect
waterMarklayer.opacity = 1.0
parentlayer.addSublayer(waterMarklayer)
}
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: insertTime)
mainInstruction.layerInstructions = layerInstructions
mainInstruction.backgroundColor = VideoSettings.shared.fetchVideoBackgroundColor().color.cgColor
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
mainComposition.renderScale = 1.0
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentlayer)
for audioAsset in allAssets.filter({$0.assetType! == .audio}) {
//NOTE: If you have requirement to increase/ decrease audio fade-in fade-out effect time, please modify fadeInFadeOutEffectTiming variable as second
let fadeInFadeOutEffectTiming = Double(3) //seconds
let volumeLevel = audioAsset.audioVolumeLevel!
let isFadeIn = audioAsset.audioFadeInEffect!
let isFadeOut = audioAsset.audioFadeOutEffect!
var audioBeginTime = audioAsset.beginTime!
var audioEndTime = audioAsset.endTime!
var audioTrackTime = audioAsset.audioTrackStartTime!
var trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
//If audio starting position (second) is greater than equals to zero (in order to video length)
if CMTimeGetSeconds(CMTimeAdd(audioTrackTime, audioBeginTime)) >= 0 {
//If audio starting position (second) more than video length, i.e. total video length is 20 second, but audio starting position is from 24 seconds, we sould not add the audio
if CMTimeCompare(CMTimeAdd(audioTrackTime, audioBeginTime), insertTime) == 1 {
trimmedAudioDuration = CMTime.zero
} else {
//If audio start position (seconds) + crop length is exceed total video length, we should add only the part within the video
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), insertTime) == 1 {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
trimmedAudioDuration = CMTimeSubtract(insertTime, audioTrackTime)
} else {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
}
}
}
//If audio start time is in negative (second)
else {
//If audio crop length is in negative (second)
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), CMTime.zero) == -1 {
trimmedAudioDuration = CMTime.zero
} else {
audioBeginTime = CMTime(seconds: abs(CMTimeGetSeconds(audioTrackTime)), preferredTimescale: audioTrackTime.timescale)
audioTrackTime = CMTime.zero
trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
if CMTimeCompare(trimmedAudioDuration, insertTime) == 1 {
trimmedAudioDuration = insertTime
}
}
}
if trimmedAudioDuration != CMTime.zero {
audioEndTime = CMTimeAdd(audioTrackTime, trimmedAudioDuration)
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try audioTrack?.insertTimeRange(CMTimeRangeMake(start: audioBeginTime , duration: trimmedAudioDuration),
of: audioAsset.asset!.tracks(withMediaType: AVMediaType.audio)[0] ,
at: audioTrackTime)
let audioInputParams = AVMutableAudioMixInputParameters(track: audioTrack)
var effectTime = CMTime(seconds: fadeInFadeOutEffectTiming, preferredTimescale: 600)
if CMTimeCompare(trimmedAudioDuration, CMTimeMultiply(effectTime, multiplier: 2)) == -1 {
effectTime = CMTime(seconds: CMTimeGetSeconds(trimmedAudioDuration) / 2, preferredTimescale: 600)
}
//Fade in effect
audioInputParams.setVolumeRamp(fromStartVolume: isFadeIn ? 0 : volumeLevel, toEndVolume: volumeLevel, timeRange: CMTimeRange(start: audioTrackTime, duration: effectTime))
//Fade out effect
audioInputParams.setVolumeRamp(fromStartVolume: volumeLevel, toEndVolume: isFadeOut ? 0 : volumeLevel, timeRange: CMTimeRange(start: CMTimeSubtract(audioEndTime, effectTime), duration: effectTime))
audioInputParams.trackID = audioTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
} catch {
print("Failed to load Audio track")
}
}
}
// 4 - Get path
guard let url = Utility.createFileAtDocumentDirectory(name: "mergeVideo-\(Date().timeIntervalSince1970).mp4") else {
debugPrint("Unable to file at document directory")
return
}
// 5 - Create Exporter
self.exporter = AVAssetExportSession(asset: mixComposition, presetName: videoPresetName)
guard let exp = self.exporter else {
debugPrint("Unable to export.")
return
}
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = audioMixInputParameters
exp.outputURL = url
exp.outputFileType = AVFileType.mp4
exp.shouldOptimizeForNetworkUse = true
exp.videoComposition = mainComposition
exp.audioMix = audioMix
//self.viewPieProgress.setProgress(0.0, animated: false)
//viewPieProgress.isHidden = isHDR
//timer for progress
self.timer = Timer.scheduledTimer(timeInterval: 1.0, target: self, selector: #selector(self.updateExportingProgress(timer:)), userInfo: exp, repeats: true)
// 6 - Perform the Export
exp.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exp)
}
}
}
I have tried many options but nothing meets my requirement. Please help me out.
If you required any other information from me, please feel free to comment on this post.
Thanks in advance.
I have to apply opacity in a video. I have to apply it before the end of the video of a second. I am using the "firstInstruction" to have the total duration of the video. however when I call the "firstInstruction.setOpacityRamp" method I can not subtract the second one..
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, CMTimeAdd(firstAsset.duration, secondAsset.duration))
let firstInstruction = VideoHelper.videoCompositionInstruction(firstTrack, asset: firstAsset)
firstInstruction.setOpacityRamp(fromStartOpacity: 1, toEndOpacity: 0.1, timeRange: mainInstruction.timeRange)
I would use three instructions to apply the cross-fade:
A “pass-through” instruction that shows only the first video track, until one second before the end of the first asset.
A cross-fade instruction that simultaneously shows the last second of the first video track and the first second of the second video track, with opacity ramps.
A “pass-through” instruction that shows only the second video track, starting from one second into the second video track.
So, first, let's get the tracks:
import AVFoundation
import CoreVideo
func crossFade(asset0: AVAsset, asset1: AVAsset, crossFadeDuration: CMTime, to outputUrl: URL) throws {
guard
let asset0Track = asset0.tracks(withMediaType: .video).first,
let asset1Track = asset1.tracks(withMediaType: .video).first,
case let composition = AVMutableComposition(),
case let compositionTrack0Id = composition.unusedTrackID(),
let compositionTrack0 = composition.addMutableTrack(
withMediaType: .video, preferredTrackID: compositionTrack0Id),
case let compositionTrack1Id = composition.unusedTrackID(),
let compositionTrack1 = composition.addMutableTrack(
withMediaType: .video, preferredTrackID: compositionTrack1Id)
else { return }
Now let's compute all of the times we need. First, the entire range of asset0Track in the composition, include both the pass-through and cross-fade periods:
// When does asset0Track start, in the composition?
let asset0TrackStartTime = CMTime.zero
// When does asset0Track end, in the composition?
let asset0TrackEndTime = asset0TrackStartTime + asset0Track.timeRange.duration
Next, the cross-fade's time range:
// When does the cross-fade end, in the composition?
// It should end exactly at the end of asset0's video track.
let crossFadeEndTime = asset0TrackEndTime
// When does the cross-fade start, in the composition?
let crossFadeStartTime = crossFadeEndTime - crossFadeDuration
// What is the entire time range of the cross-fade, in the composition?
let crossFadeTimeRange = CMTimeRangeMake(
start: crossFadeStartTime,
duration: crossFadeDuration)
Next, the entire range of asset1Track in the composition, include both the cross-fade and pass-through periods:
// When does asset1Track start, in the composition?
// It should start exactly at the start of the cross-fade.
let asset1TrackStartTime = crossFadeStartTime
// When does asset1Track end, in the composition?
let asset1TrackEndTime = asset1TrackStartTime + asset1Track.timeRange.duration
And finally, the two pass-through time ranges:
// What is the time range during which only asset0 is visible, in the composition?
let compositionTrack0PassThroughTimeRange = CMTimeRangeMake(
start: asset0TrackStartTime,
duration: crossFadeStartTime - asset0TrackStartTime)
// What is the time range during which only asset1 is visible, in the composition?
let compositionTrack1PassThroughTimeRange = CMTimeRangeMake(
start: crossFadeEndTime,
duration: asset1TrackEndTime - crossFadeEndTime)
Now we can insert the input tracks into the composition's tracks:
// Put asset0Track into compositionTrack0.
try compositionTrack0.insertTimeRange(
asset0Track.timeRange,of: asset0Track, at: asset0TrackStartTime)
// Put asset1Track into compositionTrack1.
try compositionTrack1.insertTimeRange(
asset1Track.timeRange, of: asset1Track, at: asset1TrackStartTime)
That is all we need to do for the AVMutableComposition. But we also need to make an AVMutableVideoComposition:
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration =
min(asset0Track.minFrameDuration, asset1Track.minFrameDuration)
videoComposition.renderSize = CGSize(
width: max(asset0Track.naturalSize.width, asset1Track.naturalSize.width),
height: max(asset0Track.naturalSize.height, asset1Track.naturalSize.height))
We need to set the video composition's instructions. The first instruction is to pass through just compositionTrack0 for the appropriate time range:
// I'm using a helper function defined below.
let compositionTrack0PassThroughInstruction = AVMutableVideoCompositionInstruction.passThrough(
trackId: compositionTrack0Id, timeRange: compositionTrack0PassThroughTimeRange)
The second instruction is for the cross-fade, so it's more complicated. It needs two child instructions, one for each layer in the cross-fade. Each layer instruction, and the overall cross-fade instruction, use the same time range:
let crossFadeLayer0Instruction = AVMutableVideoCompositionLayerInstruction()
crossFadeLayer0Instruction.trackID = compositionTrack0Id
crossFadeLayer0Instruction.setOpacityRamp(fromStartOpacity: 1, toEndOpacity: 0, timeRange: crossFadeTimeRange)
let crossFadeLayer1Instruction = AVMutableVideoCompositionLayerInstruction()
crossFadeLayer1Instruction.trackID = compositionTrack1Id
crossFadeLayer1Instruction.setOpacityRamp(fromStartOpacity: 0, toEndOpacity: 1, timeRange: crossFadeTimeRange)
let crossFadeInstruction = AVMutableVideoCompositionInstruction()
crossFadeInstruction.timeRange = crossFadeTimeRange
crossFadeInstruction.layerInstructions = [crossFadeLayer0Instruction, crossFadeLayer1Instruction]
The third instruction is to pass through just compositionTrack1 for the appropriate time range:
let compositionTrack1PassThroughInstruction = AVMutableVideoCompositionInstruction.passThrough(
trackId: compositionTrack1Id, timeRange: compositionTrack1PassThroughTimeRange)
Now that we have all three instruction, we can give them to the video composition:
videoComposition.instructions = [compositionTrack0PassThroughInstruction, crossFadeInstruction, compositionTrack1PassThroughInstruction]
And now we can use composition and videoComposition together, for example to export a new movie file:
let export = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetMediumQuality)!
export.outputURL = outputUrl
export.videoComposition = videoComposition
export.exportAsynchronously {
exit(0)
}
}
Here's the helper I used to create the pass-through instructions:
extension AVMutableVideoCompositionInstruction {
static func passThrough(trackId: CMPersistentTrackID, timeRange: CMTimeRange) -> AVMutableVideoCompositionInstruction {
let layerInstruction = AVMutableVideoCompositionLayerInstruction()
layerInstruction.trackID = trackId
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = timeRange
instruction.layerInstructions = [layerInstruction]
return instruction
}
}
And here's my test code. I used a macOS command-line app for testing:
let asset0 = AVURLAsset(url: URL(fileURLWithPath: "/tmp/asset0.mp4"))
let asset1 = AVURLAsset(url: URL(fileURLWithPath: "/tmp/asset1.mp4"))
let outputUrl = URL(fileURLWithPath: "/tmp/output.mp4")
try! crossFade(asset0: asset0, asset1: asset1, crossFadeDuration: CMTimeMake(value: 1, timescale: 1), to: outputUrl)
dispatchMain()
Result:
Note that I had to make the animation tiny and low color because of Stack Overflow's limit on image file size.
Input videos courtesy of Jeffrey Beach.
I am wondering what the best solution to looping audio for a defined duration on iOS is.
I am currently playing around with
AVAudioPlayer (where I can define a repeat count but can't define an end-time)
AVPlayer (where I can define a forwardPlaybackEndTime bot not a loop count)
AVPlayerLooper (that I don't yet fully understand)
So what I need is to define a duration for which a certain sound-file is repeated. F.e. I have a 8 second mp3 and want to play it for f.e one minute.
What would also be suuuuper great, is if I could cross-fade when it starts over again.
You were on the right track with AVPlayerLooper.
This is how you setup AVPlayerLooper
var playerLooper: AVPlayerLooper!
var player: AVQueuePlayer!
func play(_ url: URL) {
let asset = AVAsset(url: url)
let playerItem = AVPlayerItem(asset: asset)
player = AVQueuePlayer(playerItem: playerItem)
playerLooper = AVPlayerLooper(player: player, templateItem: playerItem)
player.play()
}
To stop the loop after a set amount of time you can use addBoundaryTimeObserver(forTimes:queue:using:)
For example:
let assetDuration = CMTimeGetSeconds(asset.duration)
let maxDuration = 60.0 // Define max duration
let maxLoops = floor(maxDuration / assetDuration)
let lastLoopDuration = maxDuration - (assetDuration * maxLoops)
let boundaryTime = CMTimeMakeWithSeconds(lastLoopDuration, preferredTimescale: 1)
let boundaryTimeValue = NSValue(time: boundaryTime)
player.addBoundaryTimeObserver(forTimes: [boundaryTimeValue], queue: DispatchQueue.main) { [weak self] in
if self?.playerLooper.loopCount == Int(maxLoops) {
self?.player.pause()
}
}
For fading in/out you have to set the audioMix property to your AVPlayerItem instance before using it.
let introRange = CMTimeRangeMake(start: CMTimeMakeWithSeconds(0, preferredTimescale: 1), duration: CMTimeMakeWithSeconds(1, preferredTimescale: 1))
let endingSecond = CMTimeRangeMake(start: CMTimeMakeWithSeconds(assetDuration - 1, preferredTimescale: 1), duration: CMTimeMakeWithSeconds(1, preferredTimescale: 1))
let inputParams = AVMutableAudioMixInputParameters(track: asset.tracks.first! as AVAssetTrack)
inputParams.setVolumeRamp(fromStartVolume: 0, toEndVolume: 1, timeRange: introRange)
inputParams.setVolumeRamp(fromStartVolume: 1, toEndVolume: 0, timeRange: endingSecond)
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = [inputParams]
playerItem.audioMix = audioMix
Complete function:
func play(_ url: URL) {
let asset = AVAsset(url: url)
let playerItem = AVPlayerItem(asset: asset)
let assetDuration = CMTimeGetSeconds(asset.duration)
let introRange = CMTimeRangeMake(start: CMTimeMakeWithSeconds(0, preferredTimescale: 1), duration: CMTimeMakeWithSeconds(1, preferredTimescale: 1))
let endingSecond = CMTimeRangeMake(start: CMTimeMakeWithSeconds(assetDuration - 1, preferredTimescale: 1), duration: CMTimeMakeWithSeconds(1, preferredTimescale: 1))
let inputParams = AVMutableAudioMixInputParameters(track: asset.tracks.first! as AVAssetTrack)
inputParams.setVolumeRamp(fromStartVolume: 0, toEndVolume: 1, timeRange: introRange)
inputParams.setVolumeRamp(fromStartVolume: 1, toEndVolume: 0, timeRange: endingSecond)
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = [inputParams]
playerItem.audioMix = audioMix
player = AVQueuePlayer(playerItem: playerItem)
playerLooper = AVPlayerLooper(player: player, templateItem: playerItem)
player.play()
let maxDuration = 60.0 // Define max duration
let maxLoops = floor(maxDuration / assetDuration)
let lastLoopDuration = maxDuration - (assetDuration * maxLoops)
let boundaryTime = CMTimeMakeWithSeconds(lastLoopDuration, preferredTimescale: 1)
let boundaryTimeValue = NSValue(time: boundaryTime)
player.addBoundaryTimeObserver(forTimes: [boundaryTimeValue], queue: DispatchQueue.main) { [weak self] in
if self?.playerLooper.loopCount == Int(maxLoops) {
self?.player.pause()
}
}
}
I am trying to play videos on avplayer uing the following code...but get nothing as result as it shows duration nan.
func setUpPlayer(fileURL:URL){
let playerItem:AVPlayerItem = AVPlayerItem(url: fileURL)
player = AVPlayer(playerItem: playerItem)
let playerLayer=AVPlayerLayer(player: player!)
playerLayer.frame=CGRect(x:self.videoContainer.frame.origin.x, y:self.videoContainer.frame.origin.y+20, width:self.videoContainer.frame.size.width, height:self.videoContainer.frame.size.height-40)
player?.addObserver(
self, forKeyPath:"currentItem", options:.initial, context:nil)
self.view.layer.addSublayer(playerLayer)
rangeSlider.setVideoURL(videoURL:fileURL)
rangeSlider.delegate = self
self.endTime = CMTimeGetSeconds((player?.currentItem?.duration)!)
let timeInterval: CMTime = CMTimeMakeWithSeconds(0.01, 100)
// let asset:AVURLAsset = AVURLAsset.init(url:videoURL)
// let videoDuration:CMTime = asset.duration;
//
//
// let timeInterval: CMTime = CMTimeMakeWithSeconds(videoDuration,100)
//CMTimeGetSeconds(videoDuration)
timeObserver = player?.addPeriodicTimeObserver(forInterval: timeInterval,
queue: DispatchQueue.main) { (elapsedTime: CMTime) -> Void in
self.observeTime(elapsedTime: elapsedTime)
} as AnyObject!
}
I am doing this first time.Kindly give some solution to resolve this problem.Thanks in advance!
you missed to write "player.play()" in your code
let timeRange = self.avPlayer.currentItem.loadedTimeRanges[0].CMTimeRangeValue
let duration = CMTimeGetSeconds(timeRange.duration)
try this it will definiyely helpful
I'm trying to do audio editing on an AVMutableComposition that I have build.
var commentaryTimeRange = CMTimeRange(start: commentaryItem.startTimeInTimeline, duration: commentaryItem.timeRange.duration)
if CMTimeCompare(CMTimeRangeGetEnd(commentaryTimeRange), composition.duration) == 1 {
commentaryTimeRange.duration = CMTimeSubtract(composition.duration, commentaryTimeRange.start);
commentaryItem.timeRange = commentaryTimeRange
}
// Add the commentary track
let compositionCommentaryTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let track = commentaryItem.asset.tracks(withMediaType: AVMediaTypeAudio).first!
try! compositionCommentaryTrack.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration:commentaryTimeRange.duration), of: track, at: commentaryTimeRange.start)
let tracksToDuck = composition.tracks(withMediaType: AVMediaTypeAudio)
var trackMixArray = [AVMutableAudioMixInputParameters]()
let rampDuration = CMTime(seconds: 1, preferredTimescale: 2)
for track in tracksToDuck {
let trackMix = AVMutableAudioMixInputParameters(track: track)
trackMix.setVolumeRamp(fromStartVolume: 1.0, toEndVolume: 0.2, timeRange: CMTimeRange(start: CMTimeSubtract(commentaryTimeRange.start, rampDuration), duration: CMTimeSubtract(commentaryTimeRange.duration, rampDuration)))
trackMix.setVolumeRamp(fromStartVolume: 0.2, toEndVolume: 1.0, timeRange: CMTimeRange(start: CMTimeRangeGetEnd(commentaryTimeRange), duration: rampDuration))
trackMixArray.append(trackMix)
}
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = trackMixArray
Basically I'm truing to add a commentary on a video track by ducking the original volume.
The audio is correctly mixed in the output, but audio instructions seem to ignored.
Of course the audiomix is passed to the AVPlayerItem, from debugging I can see that all the instructions are there and correctly passed to it.
func makePlayable() -> AVPlayerItem {
let playerItem = AVPlayerItem(asset: composition.copy() as! AVAsset, automaticallyLoadedAssetKeys: NewsPlayerViewController.assetKeysRequiredToPlay)
playerItem.videoComposition = videoComposition
playerItem.audioMix = audioMix?.copy() as! AVAudioMix?
if let overlayLayer = overlayLayer {
let syncLayer = AVSynchronizedLayer(playerItem: playerItem)
syncLayer.addSublayer(overlayLayer)
playerItem.syncLayer = syncLayer
}
return playerItem
}
I've found some answers that indicate as reason the lack of a track identifiers, or a sort of mismatch between composition that has one and a track that hasn't.
My composition doesn't use any track id, plus the AVEdit sample code from Apple doesn't use them and it works.
The solution was simply to count the tracks to duck BEFORE adding the commentary track.
let tracksToDuck = composition.tracks(withMediaType: AVMediaTypeAudio)// <- MOVE HERE, AT THE TOP
var commentaryTimeRange = CMTimeRange(start: commentaryItem.startTimeInTimeline, duration: commentaryItem.timeRange.duration)
if CMTimeCompare(CMTimeRangeGetEnd(commentaryTimeRange), composition.duration) == 1 {
commentaryTimeRange.duration = CMTimeSubtract(composition.duration, commentaryTimeRange.start);
commentaryItem.timeRange = commentaryTimeRange
}
// Add the commentary track
let compositionCommentaryTrack = composition.addMutableTrack(withMediaType: AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
let track = commentaryItem.asset.tracks(withMediaType: AVMediaTypeAudio).first!
try! compositionCommentaryTrack.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration:commentaryTimeRange.duration), of: track, at: commentaryTimeRange.start)
var trackMixArray = [AVMutableAudioMixInputParameters]()
let rampDuration = CMTime(seconds: 1, preferredTimescale: 2)
for track in tracksToDuck {
let trackMix = AVMutableAudioMixInputParameters(track: track)
trackMix.setVolumeRamp(fromStartVolume: 1.0, toEndVolume: 0.2, timeRange: CMTimeRange(start: CMTimeSubtract(commentaryTimeRange.start, rampDuration), duration: CMTimeSubtract(commentaryTimeRange.duration, rampDuration)))
trackMix.setVolumeRamp(fromStartVolume: 0.2, toEndVolume: 1.0, timeRange: CMTimeRange(start: CMTimeRangeGetEnd(commentaryTimeRange), duration: rampDuration))
trackMixArray.append(trackMix)
}
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = trackMixArray