I have a Swift class that takes an image and creates a 10 second video of that image.
I create the video using AVAssetWriter.
It outputs a ten seconds video. This all works as expected.
func setup(){
//Setup video size
self.videoSize = self.getVideoSizeForImage(self.videoImage!)
//Setup temp video path
self.tempVideoPath = self.getTempVideoPath()
//Setup video writer
var videoWriter = AVAssetWriter(
URL: NSURL(fileURLWithPath: self.tempVideoPath!),
fileType: AVFileTypeMPEG4,
error: nil)
//Setup video writer input
let videoSettings = [
AVVideoCodecKey: AVVideoCodecH264,
AVVideoWidthKey: self.videoSize!.width,
AVVideoHeightKey: self.videoSize!.height
]
let videoWriterInput = AVAssetWriterInput(
mediaType: AVMediaTypeVideo,
outputSettings: videoSettings as [NSObject : AnyObject]
)
videoWriterInput.expectsMediaDataInRealTime = true
//Setup video writer adaptor
let adaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: [
kCVPixelBufferPixelFormatTypeKey : kCVPixelFormatType_32ARGB,
]
)
videoWriter.addInput(videoWriterInput)
//Setup frame time
self.currentFrameTime = CMTimeMake(0, 10000)
//Start video writing session
videoWriter.startWriting()
videoWriter.startSessionAtSourceTime(self.currentFrameTime!)
self.currentVideoWriter = videoWriter
self.currentVideoWriterAdaptor = adaptor
}
func addImageToVideoWriter(image:UIImage, duration:CGFloat){
//Get image pixel buffer
let buffer = self.pixelBufferFromImage(image)
var frameTime = self.currentFrameTime!
//Add pixel buffer to video
let adaptor = self.currentVideoWriterAdaptor!
while(!adaptor.assetWriterInput.readyForMoreMediaData){}
if adaptor.assetWriterInput.readyForMoreMediaData{
adaptor.appendPixelBuffer(
buffer,
withPresentationTime: frameTime
)
let seconds = CGFloat(CMTimeGetSeconds(frameTime))+duration
var timescale = frameTime.timescale
var value = CGFloat(timescale) * seconds
frameTime.value = Int64(value)
frameTime.timescale = Int32(timescale)
self.currentFrameTime = frameTime
self.lastImage = image
}
}
func pixelBufferFromImage(image:UIImage) -> CVPixelBufferRef{
let size = image.size
var pixelBuffer: Unmanaged<CVPixelBuffer>?
let bufferOptions:CFDictionary = [
"kCVPixelBufferCGBitmapContextCompatibilityKey": NSNumber(bool: true),
"kCVPixelBufferCGImageCompatibilityKey": NSNumber(bool: true)
]
let status:CVReturn = CVPixelBufferCreate(
nil,
Int(size.width),
Int(size.height),
OSType(kCVPixelFormatType_32ARGB),
bufferOptions,
&pixelBuffer
)
let managedPixelBuffer = pixelBuffer!.takeRetainedValue()
let lockStatus = CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)
let pixelData = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(
pixelData,
Int(size.width),
Int(size.height),
8,
Int(4 * size.width),
rgbColorSpace,
bitmapInfo
)
CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), image.CGImage)
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
return managedPixelBuffer
}
func saveVideoWriterToDisk(){
self.addImageToVideoWriter(self.lastImage!, duration: 0)
self.currentVideoWriter?.endSessionAtSourceTime(self.currentFrameTime!)
self.currentVideoWriterAdaptor?.assetWriterInput.markAsFinished()
//Write video to disk
let semaphore = dispatch_semaphore_create(0)
self.currentVideoWriter?.finishWritingWithCompletionHandler({
dispatch_semaphore_signal(semaphore)
})
dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER)
}
I then use AVAssetExportSession and AVMutableComposition to add music to that video. This works most of the time.
func exportVideo(sourceFilePath:String, destinationFilePath:String) -> Bool{
let fileManager = NSFileManager()
var success = false
//Compile audio and video together
let composition = AVMutableComposition()
/*
//Setup audio track
let trackAudio:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(
AVMediaTypeAudio,
preferredTrackID: CMPersistentTrackID()
)
//Add audio file to audio track
let audioFileAsset = AVURLAsset(URL:self.audioFileURL, options:nil)
let audioFileAssetTrack = audioFileAsset.tracksWithMediaType(AVMediaTypeAudio).last as! AVAssetTrack
trackAudio.insertTimeRange(
audioFileAssetTrack.timeRange,
ofTrack: audioFileAssetTrack,
atTime: kCMTimeZero,
error: nil
)*/
//Setup video track
let trackVideo:AVMutableCompositionTrack = composition.addMutableTrackWithMediaType(
AVMediaTypeVideo,
preferredTrackID: CMPersistentTrackID()
)
//Add video file to video track
let videoFileAsset = AVURLAsset(URL: NSURL(fileURLWithPath: sourceFilePath), options: nil)
let videoFileAssetTracks = videoFileAsset.tracksWithMediaType(AVMediaTypeVideo)
if videoFileAssetTracks.count > 0{
let videoFileAssetTrack:AVAssetTrack = videoFileAssetTracks[0] as! AVAssetTrack
trackVideo.insertTimeRange(
videoFileAssetTrack.timeRange,
ofTrack: videoFileAssetTrack,
atTime: kCMTimeZero,
error: nil
)
}
//Export compiled video to disk
if fileManager.fileExistsAtPath(destinationFilePath){
fileManager.removeItemAtPath(destinationFilePath, error: nil)
}
let exporter = AVAssetExportSession(
asset: composition,
presetName: AVAssetExportPresetHighestQuality
)
exporter.outputFileType = AVFileTypeMPEG4
exporter.outputURL = NSURL(fileURLWithPath: destinationFilePath)
let semaphore = dispatch_semaphore_create(0)
exporter.exportAsynchronouslyWithCompletionHandler({
success = true
dispatch_semaphore_signal(semaphore)
})
dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER)
//Delete file at source path
fileManager.removeItemAtPath(sourceFilePath, error: nil)
return success
}
However.
When the image is quite large (in terms of resolution, i.e. 1600*1600 and larger) the video created with AVAssetExportSession is 8 seconds instead of 10. The time difference only occurs when using large images.
It's not a huge problem. As it is highly unlikely people will be using this class with images that large.
However I would still like to know what's going on and how to fix it.
Related
I have a code that applies cifilters to a video and play it once applied but for some reason, there is a noticeable lag as compared to playing it without filter.
func addComposition(editItem: EditItem, addCrop: Bool = true) {
pause()
let renderSize = editItem.getVideoRenderSize(track: track, editItem: editItem, addCrop: addCrop)
let videoComposition = AVMutableVideoComposition(asset: asset) { [weak self] (request) in
guard let self = self else { return }
let input = request.sourceImage.clampedToExtent()
let frameCounter = Float(CMTimeGetSeconds(request.compositionTime)) * self.fps
if !editItem.defersPreview {
let outputImage = editItem.executeVideoFilterPipeline(on: input, excludedTypes: addCrop ? [] : [.crop], frameIndex: frameCounter, request: request, renderSize: renderSize)
if let output = outputImage {
request.finish(with: output, context: self.context)
}
}
}
videoComposition.renderSize = renderSize
videoComposition.sourceTrackIDForFrameTiming = kCMPersistentTrackID_Invalid
let frameRateTooHighForPhone = CMTime(value: 1, timescale: 1000)
videoComposition.frameDuration = frameRateTooHighForPhone
player.currentItem?.videoComposition = videoComposition
composition = videoComposition
play()
}
I already tried updating the frameduration as I was hoping it will do the trick.
I am trying to merge images and video clips together. I kept an option to add animation in between videos and images. There has few options like fade-in, fade-out, rotate, slide-up, slide-down, left, right etc. For images I am able to add animation, but how to add animation for video? Specifically when a video clip is completed and another video clip is going to start that time I want to add animations. Now my merging functionality is working well. Only to add the animation in between the videos.
I have tried with:
instruction.setOpacityRamp(fromStartOpacity: <#T##Float#>, toEndOpacity: <#T##Float#>, timeRange: <#T##CMTimeRange#>)
but this option only showing fade-in/ fade-out effect. But other custom animation options where to add those effect and how?
Here is me source code for merging. Many dependant functions are there in the code. But I have posted only merging functionality code. I have commented with //HERE TO ADD THE ANIMATION. So that you can directly reach that point where I am trying to add animations.
func merge(allAssets: [MovieAssetPresentable], isHDR: Bool, success: #escaping (URL?) -> (Void), progress: #escaping (CGFloat) -> (Void), failed: #escaping (String?) -> (Void)) {
cancelExport()
let defaultSize = isHDR ? self.videoOutputResolution.HD : self.videoOutputResolution.lowQuality
let videoPresetName = self.getPresetName(resolution: defaultSize)
self.mergeSuccess = success
self.mergeError = failed
self.mergeProgress = progress
let mixComposition = AVMutableComposition()
let mainInstruction = AVMutableVideoCompositionInstruction()
var layerInstructions = [AVMutableVideoCompositionLayerInstruction]()
guard let urlVideoForBackground = Bundle.main.url(forResource: "black", withExtension: "mov") else {
self.mergeError("Need black background video !")
return
}
let assetForBackground = AVAsset(url: urlVideoForBackground)
let trackForBackground = assetForBackground.tracks(withMediaType: AVMediaType.video).first
//Set output size
var outputSize = CGSize.zero
for asset in allAssets.filter({$0.assetType! == .video}) {
guard let videoAsset = asset.asset else { continue }
// Get video track
guard let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video).first else { continue }
let assetInfo = self.orientationFromTransform(videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
if videoSize.height > outputSize.height {
outputSize = CGSize(width: defaultSize.width, height: ((videoSize.height / videoSize.width) * defaultSize.width))
}
}
if outputSize == CGSize.zero {
outputSize = defaultSize
}
debugPrint("OUTPUT SIZE: \(outputSize)")
let layerContentsGravity = VideoSettings.shared.fetchVideoFitClips()
var layerImages = [CALayer]()
var insertTime = CMTime.zero
var audioMixInputParameters = [AVMutableAudioMixInputParameters]()
// Init Video layer
let videoLayer = CALayer()
videoLayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
videoLayer.contentsGravity = layerContentsGravity
let parentlayer = CALayer()
parentlayer.frame = CGRect.init(x: 0, y: 0, width: outputSize.width, height: outputSize.height)
parentlayer.addSublayer(videoLayer)
for asset in allAssets.filter({$0.assetType! == .image || $0.assetType! == .video}) {
//Video speed level
let videoSpeed = Double(asset.videoSpeedLevel!)
if asset.assetType! == .video {
//Video asset
let ast = asset.asset!
let duration = asset.endTime! - asset.beginTime! //ast.duration
//Create AVMutableCompositionTrack object
guard let track = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else {
self.mergeError("Unable to create track.")
continue
}
//Add original video sound track
let originalSoundTrack: AVMutableCompositionTrack?
if asset.asset!.tracks(withMediaType: .audio).count > 0 {
originalSoundTrack = mixComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try originalSoundTrack?.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), of: ast.tracks(withMediaType: AVMediaType.audio)[0], at: insertTime)
} catch {
self.mergeError("Unable to create original audio track.")
continue
}
//Set video original sound track speed
originalSoundTrack?.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
let audioInputParams = AVMutableAudioMixInputParameters(track: originalSoundTrack)
audioInputParams.setVolume(asset.videoOriginalVolume!, at: CMTime.zero)
audioInputParams.trackID = originalSoundTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
}
//Set time range
do {
try track.insertTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration),
of: ast.tracks(withMediaType: AVMediaType.video)[0],
at: insertTime)
} catch let err {
self.mergeError("Failed to load track: \(err.localizedDescription)")
continue
}
//Set video speed
track.scaleTimeRange(CMTimeRange(start: asset.beginTime ?? CMTime.zero, duration: duration), toDuration: CMTime(value: CMTimeValue(Double(duration.value) * videoSpeed), timescale: duration.timescale))
insertTime = CMTimeAdd(insertTime, duration)
let instruction = self.videoCompositionInstruction(track, asset: ast, outputSize: outputSize)
// let instruction = videoCompositionInstructionForTrack(track: t, asset: ast, standardSize: outputSize, atTime: insertTime)
instruction.setOpacity(0.0, at: insertTime)
//HERE TO ADD THE ANIMATION
layerInstructions.append(instruction)
} else {
//Image data
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video,
preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let defaultImageTime = CMTimeGetSeconds(asset.endTime!) - CMTimeGetSeconds(asset.beginTime!)
let duration = CMTime.init(seconds:defaultImageTime, preferredTimescale: assetForBackground.duration.timescale)
do {
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(start: CMTime.zero, duration: duration),
of: trackForBackground!,
at: insertTime)
}
catch {
self.mergeError("Background time range error")
}
guard let image = UIImage(data: asset.imageData!) else { continue }
// Create Image layer
let imageLayer = CALayer()
imageLayer.frame = CGRect.init(origin: CGPoint.zero, size: outputSize)
imageLayer.contents = image.cgImage
imageLayer.opacity = 0
imageLayer.contentsGravity = layerContentsGravity
self.setOrientation(image: image, onLayer: imageLayer)
// Add Fade in & Fade out animation
let fadeInAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeInAnimation.duration = 1
fadeInAnimation.fromValue = NSNumber(value: 0)
fadeInAnimation.toValue = NSNumber(value: 1)
fadeInAnimation.isRemovedOnCompletion = false
fadeInAnimation.beginTime = CMTimeGetSeconds(insertTime) == 0 ? 0.05: CMTimeGetSeconds(insertTime)
fadeInAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeInAnimation, forKey: "opacityIN")
let fadeOutAnimation = CABasicAnimation.init(keyPath: "opacity")
fadeOutAnimation.duration = 1
fadeOutAnimation.fromValue = NSNumber(value: 1)
fadeOutAnimation.toValue = NSNumber(value: 0)
fadeOutAnimation.isRemovedOnCompletion = false
fadeOutAnimation.beginTime = CMTimeGetSeconds(CMTimeAdd(insertTime, duration))
fadeOutAnimation.fillMode = CAMediaTimingFillMode.forwards
imageLayer.add(fadeOutAnimation, forKey: "opacityOUT")
layerImages.append(imageLayer)
// Increase the insert time
insertTime = CMTimeAdd(insertTime, duration)
}
}
// Add Image layers
for layer in layerImages {
parentlayer.addSublayer(layer)
}
//Add Water mark if Subscription not activated
if !AddManager.shared.hasActiveSubscription {
let imglogo = UIImage(named: "watermark")
let waterMarklayer = CALayer()
waterMarklayer.contents = imglogo?.cgImage
let sizeOfWaterMark = Utility.getWaterMarkSizeWithVideoSize(videoSize: outputSize, defaultSize: waterMarkSize)
debugPrint("sizeOfWaterMark=\(sizeOfWaterMark)")
waterMarklayer.frame = CGRect(x: outputSize.width - (sizeOfWaterMark.width+10), y: 5, width: sizeOfWaterMark.width, height: sizeOfWaterMark.height)
waterMarklayer.contentsGravity = .resizeAspect
waterMarklayer.opacity = 1.0
parentlayer.addSublayer(waterMarklayer)
}
mainInstruction.timeRange = CMTimeRangeMake(start: CMTime.zero, duration: insertTime)
mainInstruction.layerInstructions = layerInstructions
mainInstruction.backgroundColor = VideoSettings.shared.fetchVideoBackgroundColor().color.cgColor
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(value: 1, timescale: 30)
mainComposition.renderSize = outputSize
mainComposition.renderScale = 1.0
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: parentlayer)
for audioAsset in allAssets.filter({$0.assetType! == .audio}) {
//NOTE: If you have requirement to increase/ decrease audio fade-in fade-out effect time, please modify fadeInFadeOutEffectTiming variable as second
let fadeInFadeOutEffectTiming = Double(3) //seconds
let volumeLevel = audioAsset.audioVolumeLevel!
let isFadeIn = audioAsset.audioFadeInEffect!
let isFadeOut = audioAsset.audioFadeOutEffect!
var audioBeginTime = audioAsset.beginTime!
var audioEndTime = audioAsset.endTime!
var audioTrackTime = audioAsset.audioTrackStartTime!
var trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
//If audio starting position (second) is greater than equals to zero (in order to video length)
if CMTimeGetSeconds(CMTimeAdd(audioTrackTime, audioBeginTime)) >= 0 {
//If audio starting position (second) more than video length, i.e. total video length is 20 second, but audio starting position is from 24 seconds, we sould not add the audio
if CMTimeCompare(CMTimeAdd(audioTrackTime, audioBeginTime), insertTime) == 1 {
trimmedAudioDuration = CMTime.zero
} else {
//If audio start position (seconds) + crop length is exceed total video length, we should add only the part within the video
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), insertTime) == 1 {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
trimmedAudioDuration = CMTimeSubtract(insertTime, audioTrackTime)
} else {
audioTrackTime = CMTimeAdd(audioTrackTime, audioBeginTime)
}
}
}
//If audio start time is in negative (second)
else {
//If audio crop length is in negative (second)
if CMTimeCompare(CMTimeAdd(CMTimeAdd(audioTrackTime, audioBeginTime), trimmedAudioDuration), CMTime.zero) == -1 {
trimmedAudioDuration = CMTime.zero
} else {
audioBeginTime = CMTime(seconds: abs(CMTimeGetSeconds(audioTrackTime)), preferredTimescale: audioTrackTime.timescale)
audioTrackTime = CMTime.zero
trimmedAudioDuration = CMTimeSubtract(audioEndTime, audioBeginTime)
if CMTimeCompare(trimmedAudioDuration, insertTime) == 1 {
trimmedAudioDuration = insertTime
}
}
}
if trimmedAudioDuration != CMTime.zero {
audioEndTime = CMTimeAdd(audioTrackTime, trimmedAudioDuration)
let audioTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try audioTrack?.insertTimeRange(CMTimeRangeMake(start: audioBeginTime , duration: trimmedAudioDuration),
of: audioAsset.asset!.tracks(withMediaType: AVMediaType.audio)[0] ,
at: audioTrackTime)
let audioInputParams = AVMutableAudioMixInputParameters(track: audioTrack)
var effectTime = CMTime(seconds: fadeInFadeOutEffectTiming, preferredTimescale: 600)
if CMTimeCompare(trimmedAudioDuration, CMTimeMultiply(effectTime, multiplier: 2)) == -1 {
effectTime = CMTime(seconds: CMTimeGetSeconds(trimmedAudioDuration) / 2, preferredTimescale: 600)
}
//Fade in effect
audioInputParams.setVolumeRamp(fromStartVolume: isFadeIn ? 0 : volumeLevel, toEndVolume: volumeLevel, timeRange: CMTimeRange(start: audioTrackTime, duration: effectTime))
//Fade out effect
audioInputParams.setVolumeRamp(fromStartVolume: volumeLevel, toEndVolume: isFadeOut ? 0 : volumeLevel, timeRange: CMTimeRange(start: CMTimeSubtract(audioEndTime, effectTime), duration: effectTime))
audioInputParams.trackID = audioTrack?.trackID ?? kCMPersistentTrackID_Invalid
audioMixInputParameters.append(audioInputParams)
} catch {
print("Failed to load Audio track")
}
}
}
// 4 - Get path
guard let url = Utility.createFileAtDocumentDirectory(name: "mergeVideo-\(Date().timeIntervalSince1970).mp4") else {
debugPrint("Unable to file at document directory")
return
}
// 5 - Create Exporter
self.exporter = AVAssetExportSession(asset: mixComposition, presetName: videoPresetName)
guard let exp = self.exporter else {
debugPrint("Unable to export.")
return
}
let audioMix = AVMutableAudioMix()
audioMix.inputParameters = audioMixInputParameters
exp.outputURL = url
exp.outputFileType = AVFileType.mp4
exp.shouldOptimizeForNetworkUse = true
exp.videoComposition = mainComposition
exp.audioMix = audioMix
//self.viewPieProgress.setProgress(0.0, animated: false)
//viewPieProgress.isHidden = isHDR
//timer for progress
self.timer = Timer.scheduledTimer(timeInterval: 1.0, target: self, selector: #selector(self.updateExportingProgress(timer:)), userInfo: exp, repeats: true)
// 6 - Perform the Export
exp.exportAsynchronously() {
DispatchQueue.main.async {
self.exportDidFinish(exp)
}
}
}
I have tried many options but nothing meets my requirement. Please help me out.
If you required any other information from me, please feel free to comment on this post.
Thanks in advance.
Everything works as expected if I turn off the mirroring on the front camera. However, if I turn it on, my final exported video has crucial resizing problems:
This is how I currently manage the mirroring for my videos:
if currentDevice == frontCamera {
if let connection = output.connections.first {
if connection.isVideoMirroringSupported {
connection.automaticallyAdjustsVideoMirroring = false
connection.isVideoMirrored = true //if true, this bug occurs.
}
}
}else {
//disabling photo mirroring on backCamera
if let connection = output.connections.first {
if connection.isVideoMirroringSupported {
connection.automaticallyAdjustsVideoMirroring = false
connection.isVideoMirrored = false
}
}
}
And this is how I export the video:
/// Create AVMutableComposition object. This object will hold the AVMutableCompositionTrack instances.
let mainMutableComposition = AVMutableComposition()
/// Creating an empty video track
let videoTrack = mainMutableComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: kCMPersistentTrackID_Invalid)
let videoAssetTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
do {
//Adding the video track
try videoTrack?.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration: videoAsset.duration), of: videoAsset.tracks(withMediaType: AVMediaType.video).first!, at: kCMTimeZero)
} catch {
completion(false, nil)
}
/// Adding audio if user wants to.
if withAudio {
do {
//Adding the video track
let audio = videoAsset.tracks(withMediaType: AVMediaType.audio).first
if audio != nil {
let audioTrack = mainMutableComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: kCMPersistentTrackID_Invalid)
try audioTrack?.insertTimeRange(CMTimeRange(start: kCMTimeZero, duration: videoAsset.duration), of: audio!, at: kCMTimeZero)
}
} catch {
completion(false, nil)
}
}
// * MARK - Composition is ready ----------
// Create AVMutableVideoCompositionInstruction
let compositionInstructions = AVMutableVideoCompositionInstruction()
compositionInstructions.timeRange = CMTimeRange(start: kCMTimeZero, duration: videoAsset.duration)
// Create an AvmutableVideoCompositionLayerInstruction
let videoLayerInstruction = AVMutableVideoCompositionLayerInstruction.init(assetTrack: videoTrack!)
videoLayerInstruction.setTransform(videoAssetTrack.preferredTransform, at: kCMTimeZero)
compositionInstructions.layerInstructions = [videoLayerInstruction]
//Add instructions
let videoComposition = AVMutableVideoComposition()
let naturalSize : CGSize = videoAssetTrack.naturalSize
///Rendering image into video
let renderWidth = naturalSize.width
let renderHeight = naturalSize.height
//Assigning instructions and rendering size
videoComposition.renderSize = CGSize(width: renderWidth, height: renderHeight)
videoComposition.instructions = [compositionInstructions]
videoComposition.frameDuration = CMTime(value: 1, timescale: Int32((videoTrack?.nominalFrameRate)!))
//Applying image to instruction
self.applyVideoImage(to: videoComposition, withSize: naturalSize, image: image)
// Getting the output path
let documentsURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first
let outputPath = documentsURL?.appendingPathComponent("lastEditedVideo.mp4")
if FileManager.default.fileExists(atPath: (outputPath?.path)!) {
do {
try FileManager.default.removeItem(atPath: (outputPath?.path)!)
}
catch {
completion(false, nil)
}
}
// Create exporter
let exporter = NextLevelSessionExporter(withAsset: mainMutableComposition)
exporter.outputURL = outputPath
exporter.outputFileType = AVFileType.mp4
exporter.videoComposition = videoComposition
let compressionDict: [String: Any] = [
AVVideoAverageBitRateKey: NSNumber(integerLiteral: 2300000),
AVVideoProfileLevelKey: AVVideoProfileLevelH264BaselineAutoLevel as String
]
exporter.videoOutputConfiguration = [
AVVideoCodecKey: AVVideoCodecType.h264,
AVVideoWidthKey: NSNumber(integerLiteral: Int(naturalSize.width)),
AVVideoHeightKey: NSNumber(integerLiteral: Int(naturalSize.height)),
AVVideoCompressionPropertiesKey: compressionDict
]
exporter.audioOutputConfiguration = [
AVFormatIDKey: kAudioFormatMPEG4AAC,
AVEncoderBitRateKey: NSNumber(integerLiteral: 128000),
AVNumberOfChannelsKey: NSNumber(integerLiteral: 2),
AVSampleRateKey: NSNumber(value: Float(44100))
]
completion(true, exporter)
}
I'm using the NextLevelSessionExporter to export the video. It doesn't matter if I use the default exporter or not, the resizing problems still occur.
There is an active bug that prevents you from exporting mirrored videos correctly. You need a few workarounds:
Turn off the mirroring on the movieOutputFile
Manually flip the video horizontally when needed:
if needsMirroring == true {
var transform:CGAffineTransform = CGAffineTransform(scaleX: -1.0, y: 1.0)
transform = transform.translatedBy(x: -naturalSize.width, y: 0.0)
transform = transform.rotated(by: CGFloat(Double.pi/2))
transform = transform.translatedBy(x: 0.0, y: -naturalSize.width)
videoTransform = transform
}
It took me days to figure this out, hope it helps.
I am developing a Video application in Swift3. Where I have to convert any text to Video and then have to add a Fade In and Fade Out effect and post the Fade effect Video to server. I don't have to use any Third Party Library for Fade effect.
I can able to convert my Text to a Video, My problem is how can I add Fade In and Fade Out on Video AVAsset.
Can anyone suggest me to achieve this. I cannot find any recent answers to this problem. Thanks for any help!
Fade Out effect
let parentLayer = CALayer()
let fadeOut = CABasicAnimation(keyPath: "opacity")
fadeOut.fromValue = 1.0
fadeOut.toValue = 0.0
fadeOut.duration = 5.0//This will video duration
fadeOut.setValue("video", forKey:"fadeOut")
fadeOut.isRemovedOnCompletion = false
fadeOut.fillMode = CAMediaTimingFillMode.forwards
parentLayer.add(fadeOut, forKey: "opacity")
Fade in effect
fadeIn.fromValue = 0.0
fadeIn.toValue = 1.0
Add to your player
self.playerView?.playerLayer?.add(fadeOut, forKey: nil)
Add to your assets
var startTime = CMTime.zero
var timeDuration = CMTimeMake(value: 3, timescale: 1)
let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
//MARK: Fade in effect
layerInstruction.setOpacityRamp(fromStartOpacity: 0.0, toEndOpacity: 1.0, timeRange: CMTimeRange(start: startTime, duration: timeDuration))
//MARK: Fade out effect
startTime = CMTimeSubtract(mutableComposition.duration, CMTimeMake(value: 3, timescale: 1))
timeDuration = CMTimeMake(value: 3, timescale: 1)
layerInstruction.setOpacityRamp(
fromStartOpacity: 1.0,
toEndOpacity: 0.0,
timeRange: CMTimeRangeMake(start: startTime, duration: timeDuration)
)
AVVideoCompositionLayerInstruction
An array of instances of AVVideoCompositionLayerInstruction that specify how video frames from source tracks should be layered and composed.
AVMutableVideoCompositionInstruction
An AVVideoComposition object maintains an array of instructions to perform its composition.
Example Swift4:
I merged videos with fade-in and fade-out effect and change sequence of audio
func doMerge(arrayVideos:[AVAsset], arrayAudios:[AVAsset], animation:Bool, completion:#escaping Completion) -> Void {
var insertTime = kCMTimeZero
var audioInsertTime = kCMTimeZero
var arrayLayerInstructions:[AVMutableVideoCompositionLayerInstruction] = []
var outputSize = CGSize.init(width: 0, height: 0)
// Determine video output size
for videoAsset in arrayVideos {
let videoTrack = videoAsset.tracks(withMediaType: AVMediaType.video)[0]
let assetInfo = orientationFromTransform(transform: videoTrack.preferredTransform)
var videoSize = videoTrack.naturalSize
if assetInfo.isPortrait == true {
videoSize.width = videoTrack.naturalSize.height
videoSize.height = videoTrack.naturalSize.width
}
outputSize = videoSize
}
// Init composition
let mixComposition = AVMutableComposition.init()
for index in 0..<arrayVideos.count {
// Get video track
guard let videoTrack = arrayVideos[index].tracks(withMediaType: AVMediaType.video).first else { continue }
// Get audio track
var audioTrack:AVAssetTrack?
if index < arrayAudios.count {
if arrayAudios[index].tracks(withMediaType: AVMediaType.audio).count > 0 {
audioTrack = arrayAudios[index].tracks(withMediaType: AVMediaType.audio).first
}
}
// Init video & audio composition track
let videoCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
let audioCompositionTrack = mixComposition.addMutableTrack(withMediaType: AVMediaType.audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid))
do {
let startTime = kCMTimeZero
let duration = arrayVideos[index].duration
// Add video track to video composition at specific time
try videoCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, duration), of: videoTrack, at: insertTime)
// Add audio track to audio composition at specific time
var audioDuration = kCMTimeZero
if index < arrayAudios.count {
audioDuration = arrayAudios[index].duration
}
if let audioTrack = audioTrack {
do {
try audioCompositionTrack?.insertTimeRange(CMTimeRangeMake(startTime, audioDuration), of: audioTrack, at: audioInsertTime)
}
catch {
print(error.localizedDescription)
}
}
// Add instruction for video track
let layerInstruction = videoCompositionInstructionForTrack(track: videoCompositionTrack!, asset: arrayVideos[index], standardSize: outputSize, atTime: insertTime)
// Hide video track before changing to new track
let endTime = CMTimeAdd(insertTime, duration)
if animation {
let timeScale = arrayVideos[index].duration.timescale
let durationAnimation = CMTime.init(seconds: 1, preferredTimescale: timeScale)
layerInstruction.setOpacityRamp (fromStartOpacity: 1.0, toEndOpacity: 0.0, timeRange: CMTimeRange.init(start: endTime, duration: durationAnimation))
}
else {
layerInstruction.setOpacity(0, at: endTime)
}
arrayLayerInstructions.append(layerInstruction)
// Increase the insert time
audioInsertTime = CMTimeAdd(audioInsertTime, audioDuration)
insertTime = CMTimeAdd(insertTime, duration)
}
catch {
print("Load track error")
}
}
// Main video composition instruction
let mainInstruction = AVMutableVideoCompositionInstruction()
mainInstruction.timeRange = CMTimeRangeMake(kCMTimeZero, insertTime)
mainInstruction.layerInstructions = arrayLayerInstructions
// Main video composition
let mainComposition = AVMutableVideoComposition()
mainComposition.instructions = [mainInstruction]
mainComposition.frameDuration = CMTimeMake(1, 30)
mainComposition.renderSize = outputSize
// Export to file
let path = NSTemporaryDirectory().appending("mergedVideo.mp4")
let exportURL = URL.init(fileURLWithPath: path)
// Remove file if existed
FileManager.default.removeItemIfExisted(exportURL)
// Init exporter
let exporter = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
exporter?.outputURL = exportURL
exporter?.outputFileType = AVFileType.mp4
exporter?.shouldOptimizeForNetworkUse = true
exporter?.videoComposition = mainComposition
// Do export
exporter?.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
self.exportDidFinish(exporter: exporter, videoURL: exportURL, completion: completion)
}
})
}
My app is for making a video file from many images that are produced by code.
When my code has finished making an image and put it in myImage, it toggles isImageReady to 'true'. And when self.i is set(or changed),by Property Observer,it starts making another image. and finally the self.iReset is set to 'true' when there's no more image to be produced.
but the app is terminated due to memory issue during the while-loop. I have commented out the if-statement that actually assembles video frames. and it still has a memory issue. so I think the poblem lives during the while-loop inside requestMediaDataWhenReadyOnQueue:usingBlock closure.
I have no idea how to solve the problem. please help me.
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 30
let frameDuration = CMTimeMake(1, fps)
var lastFrameTime:CMTime = CMTime()
var presentationTime:CMTime = CMTime()
while (self.iReset != true) {
if videoWriterInput.readyForMoreMediaData && self.isImageReady {
lastFrameTime = CMTimeMake(Int64(self.i), fps)
presentationTime = self.i == 1 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
//commented out for tracking
/* if !self.appendPixelBufferForImage(self.myImage, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(
domain: kErrorDomain,
code: kFailedToAppendPixelBufferError,
userInfo: [
"description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
"rawError": videoWriter.error ?? "(none)"])
break
} */
self.isImageReady = false
self.i++
}// if ..&&..
} //while loop ends
// Finish writing
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
print("Finished Making a Movie !!")
success(videoOutputURL)
}
self.videoWriter = nil
}
}) // requestMediaDataWhenReadyOnQueue ends
}
Probably too late but I had a similar issue to this (images in a loop) which caused me a major headache. My solution was to put an autoreleasepool within the loop which solved the issue.
As per #C. Carter has suggested you can use autoreleasepool to free the used memory like below:
autoreleasepool {
/* your code */
}
Other than that here is a code from which I am making movie using UIImages and Audio which works perfectly fine.
func build(chosenPhotos: [UIImage], audioURL: NSURL, completion: (NSURL) -> ()) {
showLoadingIndicator(appDel.window!.rootViewController!.view)
let outputSize = CGSizeMake(640, 480)
var choosenPhotos = chosenPhotos
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("OutputVideo.mp4")
print("Video Output URL \(videoOutputURL)")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeMPEG4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(outputSize.width)), AVVideoHeightKey : NSNumber(float: Float(outputSize.height))]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(unsignedInt: kCVPixelFormatType_32ARGB), kCVPixelBufferWidthKey as String: NSNumber(float: Float(outputSize.width)), kCVPixelBufferHeightKey as String: NSNumber(float: Float(outputSize.height))]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAddInput(videoWriterInput) {
videoWriter.addInput(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!choosenPhotos.isEmpty) {
if (videoWriterInput.readyForMoreMediaData) {
let nextPhoto = choosenPhotos.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(outputSize.width), Int(outputSize.height), 8, CVPixelBufferGetBytesPerRow(managedPixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextClearRect(context, CGRectMake(0, 0, CGFloat(outputSize.width), CGFloat(outputSize.height)))
let horizontalRatio = CGFloat(outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(outputSize.height) / nextPhoto.size.height
//aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize:CGSize = CGSizeMake(nextPhoto.size.width * aspectRatio, nextPhoto.size.height * aspectRatio)
let x = newSize.width < outputSize.width ? (outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < outputSize.height ? (outputSize.height - newSize.height) / 2 : 0
CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), nextPhoto.CGImage)
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, 0)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
print("FINISHED!!!!!")
self.compileToMakeMovie(videoOutputURL, audioOutPutURL: audioURL, completion: { url in
completion(url)
})
}
})
}
}
func compileToMakeMovie(videoOutputURL: NSURL, audioOutPutURL: NSURL, completion: (NSURL) -> ()){
let mixComposition = AVMutableComposition()
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let actualVideoURl = documentDirectory.URLByAppendingPathComponent("OutputVideoMusic.mp4")
print("Video Output URL \(actualVideoURl)")
if NSFileManager.defaultManager().fileExistsAtPath(actualVideoURl.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(actualVideoURl.path!)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
let nextClipStartTime = kCMTimeZero
let videoAsset = AVURLAsset(URL: videoOutputURL)
let video_timeRange = CMTimeRangeMake(kCMTimeZero,videoAsset.duration)
let a_compositionVideoTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeVideo, preferredTrackID: kCMPersistentTrackID_Invalid)
try! a_compositionVideoTrack.insertTimeRange(video_timeRange, ofTrack: videoAsset.tracksWithMediaType(AVMediaTypeVideo)[0], atTime: nextClipStartTime)
let audioAsset = AVURLAsset(URL: audioOutPutURL)
let audio_timeRange = CMTimeRangeMake(kCMTimeZero,audioAsset.duration)
let b_compositionAudioTrack = mixComposition.addMutableTrackWithMediaType(AVMediaTypeAudio, preferredTrackID: kCMPersistentTrackID_Invalid)
do {
try b_compositionAudioTrack.insertTimeRange(audio_timeRange, ofTrack: audioAsset.tracksWithMediaType(AVMediaTypeAudio)[0], atTime: nextClipStartTime)
}catch _ {}
let assetExport = AVAssetExportSession.init(asset: mixComposition, presetName: AVAssetExportPresetHighestQuality)
assetExport?.outputFileType = "com.apple.quicktime-movie"
assetExport?.outputURL = actualVideoURl
assetExport?.exportAsynchronouslyWithCompletionHandler({
completion(actualVideoURl)
})
}
Let me know, if you're still facing the issue.