I'm trying to convert an array of UIImages to video but I have a lot of black frames in resulting file (like, 4 black frames at the beginning, and 3 good frames after them, and after that 3 black frames and 2 good frames and this pattern is repeated till the end of the video).
My code is based on this solution but I believe that the main source of problem should be in this part of code:
func build(progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {
//videosizes and path to temp output file
let inputSize = CGSize(width: 568, height: 320)
let outputSize = CGSize(width: 568, height: 320)
var error: NSError?
let documentsPath = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0] as! NSString
let videoOutputURL = NSURL(fileURLWithPath: documentsPath.stringByAppendingPathComponent("TempVideo.mov"))!
NSFileManager.defaultManager().removeItemAtURL(videoOutputURL, error: nil)
videoWriter = AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeMPEG4, error: &error)
if let videoWriter = videoWriter {
let videoSettings: [NSObject : AnyObject] = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : outputSize.width,
AVVideoHeightKey : outputSize.height,
]
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: [
kCVPixelBufferPixelFormatTypeKey : kCVPixelFormatType_32ARGB,
kCVPixelBufferWidthKey : inputSize.width,
kCVPixelBufferHeightKey : inputSize.height,
]
)
assert(videoWriter.canAddInput(videoWriterInput))
videoWriter.addInput(videoWriterInput)
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 30
let frameDuration = CMTimeMake(1, fps)
let currentProgress = NSProgress(totalUnitCount: Int64(self.photoURLs.count))
var frameCount: Int64 = 0
for var i = 0; i < self.photoURLs.count - 1; i++ {
var currentFrame = self.photoURLs[i]
var lastFrameTime = CMTimeMake(Int64(i), fps)
var presentationTime = CMTimeAdd(lastFrameTime, frameDuration)
//this one is needed because sometimes videoWriter is not ready, and we have to wait for a while
while videoWriterInput.readyForMoreMediaData == false {
var maxDate = NSDate(timeIntervalSinceNow: 0.5)
var currentRunLoop = NSRunLoop()
currentRunLoop.runUntilDate(maxDate)
}
self.appendPixelBufferForImageAtURL(currentFrame, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime)
frameCount++
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
success(videoOutputURL)
}
}
})
} else {
error = NSError(
domain: kErrorDomain,
code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
}
if let error = error {
failure(error)
}
}
Obviously I'm doing something wrong but what? I think it should be here because some of the images don't have any problems with conversion, but there are two more functions for pixelbuffer:
func appendPixelBufferForImageAtURL(image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = true
autoreleasepool {
var pixelBuffer: Unmanaged<CVPixelBuffer>?
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(
kCFAllocatorDefault,
pixelBufferAdaptor.pixelBufferPool,
&pixelBuffer
)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer.takeRetainedValue()
fillPixelBufferFromImage(image, pixelBuffer: managedPixelBuffer)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(
managedPixelBuffer,
withPresentationTime: presentationTime
)
} else {
NSLog("error: Failed to allocate pixel buffer from pool")
}
}
return appendSucceeded
}
func fillPixelBufferFromImage(image: UIImage, pixelBuffer: CVPixelBufferRef) {
let imageData = CGDataProviderCopyData(CGImageGetDataProvider(image.CGImage))
let lockStatus:UInt8 = UInt8(CVPixelBufferLockBaseAddress(pixelBuffer, 0))
let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer)
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(
pixelData,
Int(568),
Int(320),
8,
Int(8 * 320),
rgbColorSpace,
bitmapInfo
)
var imageDataProvider = CGDataProviderCreateWithCFData(imageData)
var imageRef = CGImageCreateWithJPEGDataProvider(imageDataProvider, nil, true, kCGRenderingIntentDefault)
CGContextDrawImage(context, CGRectMake(0, 0, 568, 320), imageRef)
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
}
So I was able to solve this by rewriting the fillPixelBufferFromImage using an example I found here: CVPixelBufferPool Error ( kCVReturnInvalidArgument/-6661)
Here's the Swift 2 - Xcode 7 GM solution that's working for me:
public func build(progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {
let inputSize = CGSize(width: 600, height: 600)
let outputSize = CGSize(width: 600, height: 600)
var error: NSError?
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("AssembledVideo.mov")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
}catch{
fatalError("Unable to delete file: \(error) : \(__FUNCTION__).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeQuickTimeMovie) else{
fatalError("AVAssetWriter error")
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : NSNumber(float: Float(outputSize.width)),
AVVideoHeightKey : NSNumber(float: Float(outputSize.height)),
]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(unsignedInt: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(float: Float(inputSize.width)),
kCVPixelBufferHeightKey as String: NSNumber(float: Float(inputSize.height)),
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary
)
assert(videoWriter.canAddInput(videoWriterInput))
videoWriter.addInput(videoWriterInput)
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
let currentProgress = NSProgress(totalUnitCount: Int64(self.photoURLs.count))
var frameCount: Int64 = 0
var remainingPhotoURLs = [String](self.photoURLs)
while (videoWriterInput.readyForMoreMediaData && !remainingPhotoURLs.isEmpty) {
let nextPhotoURL = remainingPhotoURLs.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
if !self.appendPixelBufferForImageAtURL(nextPhotoURL, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(domain: kErrorDomain, code: kFailedToAppendPixelBufferError,
userInfo: [
"description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
"rawError": videoWriter.error ?? "(none)"
])
break
}
frameCount++
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
success(videoOutputURL)
}
}
})
} else {
error = NSError(domain: kErrorDomain, code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
if let error = error {
failure(error)
}
}
public func appendPixelBufferForImageAtURL(urlString: String, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = true
autoreleasepool {
if let image = UIImage(contentsOfFile: urlString) {
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
fillPixelBufferFromImage(image.CGImage!, pixelBuffer: managedPixelBuffer)
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
NSLog("error: Failed to allocate pixel buffer from pool")
}
}
}
return appendSucceeded
}
func fillPixelBufferFromImage(image: CGImage, pixelBuffer: CVPixelBuffer){
let frameSize = CGSizeMake(CGFloat(CGImageGetWidth(image)), CGFloat(CGImageGetHeight(image)))
CVPixelBufferLockBaseAddress(pixelBuffer, 0)
let data = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(data, Int(frameSize.width), Int(frameSize.height), 8, CVPixelBufferGetBytesPerRow(pixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(CGImageGetWidth(image)), CGFloat(CGImageGetHeight(image))), image)
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
}
Working project files here:
https://github.com/justinlevi/imagesToVideo
Related
I have an an app, where I get all the frames from camera using AVFoundation, and process using the code below. I was wondering if there is a way to make this part multi threaded, so it can run faster. Maybe putting each frame in a queue in one thread, another thread to process the queue, and one queue to show the output of each frame as the output? I don't know if this can be done, but this is because the processing of each frame might take more time to process, and the image freeze in the output as a result.
This is the code for CaptureManager class:
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
var isBackCamera = true
override init() {
super.init()
session = AVCaptureSession()
session?.sessionPreset = .high
//setup input
var device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let defaults = UserDefaults.standard
if let stringOne = defaults.string(forKey: defaultsKeys.rememberCamera) {
if(stringOne != "back"){
device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
}
}else{
defaults.set("back", forKey: defaultsKeys.rememberCamera)
}
if(device != nil){
device?.set(frameRate: 30)
let input = try! AVCaptureDeviceInput(device: device!)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.alwaysDiscardsLateVideoFrames = true
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable as! String: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}else{
print("no camera")
}
}
func startSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func switchCamera(){
//Remove existing input
guard let currentCameraInput: AVCaptureInput = session?.inputs.first else {
return
}
//Indicate that some changes will be made to the session
session?.beginConfiguration()
session?.removeInput(currentCameraInput)
let defaults = UserDefaults.standard
if let stringOne = defaults.string(forKey: defaultsKeys.rememberCamera) {
if(stringOne == "back"){
defaults.set("front", forKey: defaultsKeys.rememberCamera)
}else{
defaults.set("back", forKey: defaultsKeys.rememberCamera)
}
}
//Get new input
var newCamera: AVCaptureDevice! = nil
if let input = currentCameraInput as? AVCaptureDeviceInput {
if (input.device.position == .back) {
newCamera = cameraWithPosition(position: .front)
} else {
newCamera = cameraWithPosition(position: .back)
}
}
newCamera.set(frameRate: 30)
//Add input to session
var err: NSError?
var newVideoInput: AVCaptureDeviceInput!
do {
newVideoInput = try AVCaptureDeviceInput(device: newCamera)
} catch let err1 as NSError {
err = err1
newVideoInput = nil
}
if newVideoInput == nil || err != nil {
print("Error creating capture device input: \(err?.localizedDescription)")
} else {
session?.addInput(newVideoInput)
}
isBackCamera.toggle()
//Commit all the configuration changes at once
session?.commitConfiguration()
}
func cameraWithPosition(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .unspecified)
for device in discoverySession.devices {
if device.position == position {
return device
}
}
return nil
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
var image: UIImage
let defaults = UserDefaults.standard
if let stringOne = defaults.string(forKey: defaultsKeys.rememberCamera) {
if(stringOne == "back"){
image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
}else{
image = UIImage(cgImage: cgImage, scale: 1, orientation:.leftMirrored)
}
}else{
image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
}
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
This is the extention to process the each frame:
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Process function:
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = ...
//process image
}
}
And this is how its called in the ViewController:
CaptureManager.shared.startSession()
I fear that your question has more queue mentioned than in code samples.. but you don't need to fear anymore, we got this!
Before we modify any code, let's agree on this; camera itself deserves to have its own thread. Not on DispatchQueue.main, never.
Let's create a queue for our camera, something like;
var ourCameraQueue = DispatchQueue(label: "our-camera-queue-label")
Then use this queue over all the code you shared and wrap all the code inside each function in this;
func oneOfTheFuncs() {
ourCameraQueue.async {
...
}
}
and this should make things tiny bit faster.
One note is that you might want to initialize (or better inject but we will come that later, maybe..) ourCameraQueue as a first thing in init method. After initialization, make sure to wrap all remaining code in init method into ourCameraQueue.async {} as well.
Also skip the ViewController on wrapping and then read about code injection, that will help you on the future of your journey on this implementation.
I have An AVAsset and I use AVAssetReaderAudioMixOutput to get CMSampleBuffer,and I want to use this CMSampleBuffer to create the AVAudioPlayerNode to scheduleBuffer
How to do it,anyone help?
this might help
https://developer.apple.com/documentation/speech/sfspeechaudiobufferrecognitionrequest/1649395-appendaudiosamplebuffer
func appendAudioSampleBuffer(_ sampleBuffer: CMSampleBuffer)
extension AVAudioPCMBuffer {
static func create(from sampleBuffer: CMSampleBuffer) -> AVAudioPCMBuffer? {
guard let description: CMFormatDescription = CMSampleBufferGetFormatDescription(sampleBuffer),
let sampleRate: Float64 = description.audioStreamBasicDescription?.mSampleRate,
let channelsPerFrame: UInt32 = description.audioStreamBasicDescription?.mChannelsPerFrame /*,
let numberOfChannels = description.audioChannelLayout?.numberOfChannels */
else { return nil }
guard let blockBuffer: CMBlockBuffer = CMSampleBufferGetDataBuffer(sampleBuffer) else {
return nil
}
let samplesCount = CMSampleBufferGetNumSamples(sampleBuffer)
//let length: Int = CMBlockBufferGetDataLength(blockBuffer)
let audioFormat = AVAudioFormat(commonFormat: .pcmFormatFloat32, sampleRate: sampleRate, channels: AVAudioChannelCount(1), interleaved: false)
let buffer = AVAudioPCMBuffer(pcmFormat: audioFormat!, frameCapacity: AVAudioFrameCount(samplesCount))!
buffer.frameLength = buffer.frameCapacity
// GET BYTES
var dataPointer: UnsafeMutablePointer<Int8>?
CMBlockBufferGetDataPointer(blockBuffer, atOffset: 0, lengthAtOffsetOut: nil, totalLengthOut: nil, dataPointerOut: &dataPointer)
guard var channel: UnsafeMutablePointer<Float> = buffer.floatChannelData?[0],
let data = dataPointer else { return nil }
var data16 = UnsafeRawPointer(data).assumingMemoryBound(to: Int16.self)
for _ in 0...samplesCount - 1 {
channel.pointee = Float32(data16.pointee) / Float32(Int16.max)
channel += 1
for _ in 0...channelsPerFrame - 1 {
data16 += 1
}
}
return buffer
}
}
I already do a lot of search for that and do many experiment but I didn't get any proper solution.
i try to convert UIImages to Video. i have 250+ images array and i try to convert this images to video with 60FPS.
i put render code in autoreleasepool method and add some other code also add autoreleasepool but didn't effect.
Code.
import AVFoundation
import UIKit
import Photos
import AVKit
var tempurl = ""
struct RenderSettings {
var width: CGFloat = UIScreen.main.bounds.width * UIScreen.main.scale
var height: CGFloat = UIScreen.main.bounds.width * UIScreen.main.scale
var fps: Int32 = 60 //frames per second
var avCodecKey = AVVideoCodecType.h264
var videoFilename = "ImageToVideo"
var videoFilenameExt = "mp4"
var size: CGSize {
return CGSize(width: width, height: height)
}
var outputURL: URL {
let fileManager = FileManager.default
if let tmpDirURL = try? fileManager.url(for: .cachesDirectory, in: .userDomainMask, appropriateFor: nil, create: true) {
return tmpDirURL.appendingPathComponent(videoFilename).appendingPathExtension(videoFilenameExt) as URL
}
fatalError("URLForDirectory() failed")
}
}
class VideoWriter {
let renderSettings: RenderSettings
var videoWriter: AVAssetWriter!
var videoWriterInput: AVAssetWriterInput!
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor!
var isReadyForData: Bool {
return videoWriterInput?.isReadyForMoreMediaData ?? false
}
class func pixelBufferFromImage(image: UIImage, pixelBufferPool: CVPixelBufferPool, size: CGSize) -> CVPixelBuffer {
autoreleasepool {
var pixelBufferOut: CVPixelBuffer?
let status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBufferOut)
if status != kCVReturnSuccess {
fatalError("CVPixelBufferPoolCreatePixelBuffer() failed")
}
let pixelBuffer = pixelBufferOut!
CVPixelBufferLockBaseAddress(pixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(size.width), height: Int(size.height),
bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: size.width, height: size.height))
let horizontalRatio = size.width / image.size.width
let verticalRatio = size.height / image.size.height
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: image.size.width * aspectRatio, height: image.size.height * aspectRatio)
let x = newSize.width < size.width ? (size.width - newSize.width) / 2 : 0
let y = newSize.height < size.height ? (size.height - newSize.height) / 2 : 0
context!.concatenate(CGAffineTransform.identity)
context!.draw(image.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
return pixelBuffer
}
}
init(renderSettings: RenderSettings) {
self.renderSettings = renderSettings
}
func start() {
let avOutputSettings: [String: AnyObject] = [
AVVideoCodecKey: renderSettings.avCodecKey as AnyObject,
AVVideoWidthKey: NSNumber(value: Float(renderSettings.width)),
AVVideoHeightKey: NSNumber(value: Float(renderSettings.height))
]
func createPixelBufferAdaptor() {
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(renderSettings.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(renderSettings.height))
]
pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
}
func createAssetWriter(outputURL: URL) -> AVAssetWriter {
guard let assetWriter = try? AVAssetWriter(outputURL: outputURL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter() failed")
}
guard assetWriter.canApply(outputSettings: avOutputSettings, forMediaType: AVMediaType.video) else {
fatalError("canApplyOutputSettings() failed")
}
return assetWriter
}
videoWriter = createAssetWriter(outputURL: renderSettings.outputURL)
videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: avOutputSettings)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
else {
fatalError("canAddInput() returned false")
}
createPixelBufferAdaptor()
if videoWriter.startWriting() == false {
fatalError("startWriting() failed")
}
videoWriter.startSession(atSourceTime: CMTime.zero)
precondition(pixelBufferAdaptor.pixelBufferPool != nil, "nil pixelBufferPool")
}
func render(appendPixelBuffers: #escaping (VideoWriter)->Bool, completion: #escaping ()->Void) {
autoreleasepool {
precondition(videoWriter != nil, "Call start() to initialze the writer")
let queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: queue) {
let isFinished = appendPixelBuffers(self)
if isFinished {
self.videoWriterInput.markAsFinished()
self.videoWriter.finishWriting() {
DispatchQueue.main.async {
completion()
}
}
}
}
}
}
func addImage(image: UIImage, withPresentationTime presentationTime: CMTime) -> Bool {
autoreleasepool {
precondition(pixelBufferAdaptor != nil, "Call start() to initialze the writer")
let pixelBuffer = VideoWriter.pixelBufferFromImage(image: image, pixelBufferPool: pixelBufferAdaptor.pixelBufferPool!, size: renderSettings.size)
return pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
}
}
}
class ImageAnimator {
static let kTimescale: Int32 = 600
let settings: RenderSettings
let videoWriter: VideoWriter
var images: [UIImage]!
var frameNum = 0
class func removeFileAtURL(fileURL: URL) {
do {
try FileManager.default.removeItem(atPath: fileURL.path)
}
catch _ as NSError {
//
}
}
init(renderSettings: RenderSettings,imagearr: [UIImage]) {
settings = renderSettings
videoWriter = VideoWriter(renderSettings: settings)
images = imagearr
}
func render(completion: #escaping ()->Void) {
// The VideoWriter will fail if a file exists at the URL, so clear it out first.
ImageAnimator.removeFileAtURL(fileURL: settings.outputURL)
videoWriter.start()
videoWriter.render(appendPixelBuffers: appendPixelBuffers) {
let s: String = self.settings.outputURL.path
tempurl = s
completion()
}
}
func appendPixelBuffers(writer: VideoWriter) -> Bool {
let frameDuration = CMTimeMake(value: Int64(ImageAnimator.kTimescale / settings.fps), timescale: ImageAnimator.kTimescale)
while !images.isEmpty {
if writer.isReadyForData == false {
return false
}
let image = images.removeFirst()
let presentationTime = CMTimeMultiply(frameDuration, multiplier: Int32(frameNum))
let success = videoWriter.addImage(image: image, withPresentationTime: presentationTime)
if success == false {
fatalError("addImage() failed")
}
frameNum=frameNum+1
}
return true
}
}
Memory Usage :
Get Images Using This Code :
#objc public class Recorder: NSObject {
public var view : UIView?
var displayLink : CADisplayLink?
var referenceDate : NSDate?
var imageArray = [UIImage]()
public func start() {
self.imageArray.removeAll()
if (view == nil) {
NSException(name: NSExceptionName(rawValue: "No view set"), reason: "You must set a view before calling start.", userInfo: nil).raise()
}else {
displayLink = CADisplayLink(target: self, selector: #selector(self.handleDisplayLink(displayLink:)))
displayLink!.add(to: RunLoop.main, forMode: RunLoop.Mode.common)
referenceDate = NSDate()
}
}
#objc func handleDisplayLink(displayLink : CADisplayLink) {
if (view != nil) {
createImageFromView(captureView: view!)
}
}
func createImageFromView(captureView : UIView) {
UIGraphicsBeginImageContextWithOptions(captureView.bounds.size, false, 0)
captureView.drawHierarchy(in: captureView.bounds, afterScreenUpdates: false)
let image = UIGraphicsGetImageFromCurrentImageContext();
if let img = image {
self.imageArray.append(img)
}
UIGraphicsEndImageContext();
}
public func stop(completion: #escaping (_ saveURL: String) -> Void) {
displayLink?.invalidate()
let seconds = referenceDate?.timeIntervalSinceNow
if (seconds != nil) {
print("Image Count : \(self.imageArray.count)")
DispatchQueue.main.async {
let settings = RenderSettings()
let imageAnimator = ImageAnimator(renderSettings: settings,imagearr: self.imageArray)
imageAnimator.render() {
let u: String = tempurl
completion(u)
//self.saveVideoInPhotos()
}
}
}
}
}
Thank In Advance
I want to make custom video recorder in my app.
For now I can record the video and save it, but I want to add filters to the video when it recording and save the video with new filter to photos album. This is my code to record video and save it.
let captureSession = AVCaptureSession()
let fileOutput = AVCaptureMovieFileOutput()
func initVideoRecording() {
do {
try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryRecord)
try AVAudioSession.sharedInstance().setActive(true)
}catch {
print("error in audio")
}
let session = AVCaptureSession()
session.beginConfiguration()
session.sessionPreset = AVCaptureSessionPresetMedium
let videoLayer = AVCaptureVideoPreviewLayer(session: session)
videoLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
videoLayer.frame = myImage.bounds
myImage.layer.addSublayer(videoLayer)
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
let audio = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeAudio)
do
{
let input = try AVCaptureDeviceInput(device: backCamera)
let audioInput = try AVCaptureDeviceInput(device: audio)
session.addInput(input)
session.addInput(audioInput)
}
catch
{
print("can't access camera")
return
}
session.addOutput(fileOutput)
session.commitConfiguration()
session.startRunning()
}
#IBAction func recordFunc() {
if fileOutput.recording {
myButton.setTitle("record", forState: .Normal)
fileOutput.stopRecording()
}else{
let fileUrl = NSURL(fileURLWithPath: NSTemporaryDirectory()).URLByAppendingPathComponent("\(getCurrentDate())-capturedvideo.mp4")
fileOutput.startRecordingToOutputFileURL(fileUrl, recordingDelegate: self)
myButton.setTitle("stop", forState: .Normal)
}
}
func captureOutput(captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAtURL outputFileURL: NSURL!, fromConnections connections: [AnyObject]!, error: NSError!) {
//to save record video to photos album
UISaveVideoAtPathToSavedPhotosAlbum(outputFileURL.path!, self, "video:didFinishSavingWithError:contextInfo:", nil)
}
I try to use AVCaptureVideoDataOutput
And in its delegate I use this code
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
connection.videoOrientation = AVCaptureVideoOrientation.Portrait
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(CVPixelBuffer: pixelBuffer!)
let comicEffect = CIFilter(name: "CIComicEffect")
comicEffect!.setValue(cameraImage, forKey: kCIInputImageKey)
let filteredImage = UIImage(CIImage: comicEffect!.valueForKey(kCIOutputImageKey) as! CIImage!)
dispatch_async(dispatch_get_main_queue())
{
self.myImage.image = filteredImage
}
}
With this code it just display the filter but not record it.
=======================/ this is the solution for my question \================
please not that this code use swift 2 and Xcode 7.3
let captureSession = AVCaptureSession()
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
var adapter:AVAssetWriterInputPixelBufferAdaptor!
var record = false
var videoWriter:AVAssetWriter!
var writerInput:AVAssetWriterInput!
var audioWriterInput:AVAssetWriterInput!
var lastPath = ""
var starTime = kCMTimeZero
var outputSize = CGSizeMake(UIScreen.mainScreen().bounds.width, UIScreen.mainScreen().bounds.height)
override func viewDidAppear(animated: Bool) {
super.viewDidAppear(animated)
video()
}
func video() {
do {
try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryRecord)
try AVAudioSession.sharedInstance().setActive(true)
}catch {
print("error in audio")
}
captureSession.beginConfiguration()
captureSession.sessionPreset = AVCaptureSessionPresetMedium
let videoLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
//videoLayer.frame = myImage.bounds
//myImage.layer.addSublayer(videoLayer)
view.layer.addSublayer(videoLayer)
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
let audio = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeAudio)
do
{
let input = try AVCaptureDeviceInput(device: backCamera)
let audioInput = try AVCaptureDeviceInput(device: audio)
captureSession.addInput(input)
captureSession.addInput(audioInput)
}
catch
{
print("can't access camera")
return
}
let queue = dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL)
videoOutput.setSampleBufferDelegate(self,queue: queue)
audioOutput.setSampleBufferDelegate(self, queue: queue)
captureSession.addOutput(videoOutput)
captureSession.addOutput(audioOutput)
captureSession.commitConfiguration()
captureSession.startRunning()
}
#IBAction func recordFunc() {
if record {
myButton.setTitle("record", forState: .Normal)
record = false
self.writerInput.markAsFinished()
audioWriterInput.markAsFinished()
self.videoWriter.finishWritingWithCompletionHandler { () -> Void in
print("FINISHED!!!!!")
UISaveVideoAtPathToSavedPhotosAlbum(self.lastPath, self, "video:didFinishSavingWithError:contextInfo:", nil)
}
}else{
let fileUrl = NSURL(fileURLWithPath: NSTemporaryDirectory()).URLByAppendingPathComponent("\(getCurrentDate())-capturedvideo.MP4")
lastPath = fileUrl.path!
videoWriter = try? AVAssetWriter(URL: fileUrl, fileType: AVFileTypeMPEG4)
let outputSettings = [AVVideoCodecKey : AVVideoCodecH264, AVVideoWidthKey : NSNumber(float: Float(outputSize.width)), AVVideoHeightKey : NSNumber(float: Float(outputSize.height))]
writerInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
writerInput.expectsMediaDataInRealTime = true
audioWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio, outputSettings: DejalActivityView.getAudioDictionary() as? [String:AnyObject])
videoWriter.addInput(writerInput)
videoWriter.addInput(audioWriterInput)
adapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: writerInput, sourcePixelBufferAttributes: DejalActivityView.getAdapterDictionary() as? [String:AnyObject])
videoWriter.startWriting()
videoWriter.startSessionAtSourceTime(starTime)
record = true
myButton.setTitle("stop", forState: .Normal)
}
}
func getCurrentDate()->String{
let format = NSDateFormatter()
format.dateFormat = "dd-MM-yyyy hh:mm:ss"
format.locale = NSLocale(localeIdentifier: "en")
let date = format.stringFromDate(NSDate())
return date
}
extension newCustomCameraViewController:AVCaptureVideoDataOutputSampleBufferDelegate,AVCaptureAudioDataOutputSampleBufferDelegate{
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
starTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
if captureOutput == videoOutput {
connection.videoOrientation = AVCaptureVideoOrientation.Portrait
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(CVPixelBuffer: pixelBuffer!)
let comicEffect = CIFilter(name: "CIHexagonalPixellate")
comicEffect!.setValue(cameraImage, forKey: kCIInputImageKey)
let filteredImage = UIImage(CIImage: comicEffect!.valueForKey(kCIOutputImageKey) as! CIImage!)
//let filteredImage = UIImage(CIImage: cameraImage)
if self.record == true{
dispatch_sync(dispatch_queue_create("sample buffer append", DISPATCH_QUEUE_SERIAL), {
if self.record == true{
if self.writerInput.readyForMoreMediaData {
let bo = self.adapter.appendPixelBuffer(DejalActivityView.pixelBufferFromCGImage(self.convertCIImageToCGImage(comicEffect!.valueForKey(kCIOutputImageKey) as! CIImage!)).takeRetainedValue() as CVPixelBufferRef, withPresentationTime: self.starTime)
print("video is \(bo)")
}
}
})
}
dispatch_async(dispatch_get_main_queue())
{
self.myImage.image = filteredImage
}
}else if captureOutput == audioOutput{
if self.record == true{
let bo = audioWriterInput.appendSampleBuffer(sampleBuffer)
print("audio is \(bo)")
}
}
}
func convertCIImageToCGImage(inputImage: CIImage) -> CGImage! {
let context:CIContext? = CIContext(options: nil)
if context != nil {
return context!.createCGImage(inputImage, fromRect: inputImage.extent)
}
return nil
}
func video(videoPath: NSString, didFinishSavingWithError error: NSError?, contextInfo info: AnyObject) {
var title = "Success"
var message = "Video was saved"
if let saveError = error {
title = "Error"
message = "Video failed to save"
}
let alert = UIAlertController(title: title, message: message, preferredStyle: .Alert)
alert.addAction(UIAlertAction(title: "OK", style: UIAlertActionStyle.Cancel, handler: nil))
presentViewController(alert, animated: true, completion: nil)
}
these methods thats in DejalActivityView there in objective c and i couldn't convert it to Swift so if any one can convert it please edit my code and convert it
+ (CVPixelBufferRef )pixelBufferFromCGImage:(CGImageRef)image size:(CGSize)size
{
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey,
[NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey, nil];
CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, size.width, size.height, kCVPixelFormatType_32ARGB, (__bridge CFDictionaryRef) options, &pxbuffer);
// CVReturn status = CVPixelBufferPoolCreatePixelBuffer(NULL, adaptor.pixelBufferPool, &pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
NSParameterAssert(pxdata != NULL);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, size.width, size.height, 8, 4*size.width, rgbColorSpace, kCGImageAlphaPremultipliedFirst);
NSParameterAssert(context);
CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image), CGImageGetHeight(image)), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}
+(NSDictionary *)getAdapterDictionary{
NSDictionary *sourcePixelBufferAttributesDictionary = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, nil];
return sourcePixelBufferAttributesDictionary;
}
+(NSDictionary *) getAudioDictionary{
AudioChannelLayout acl;
bzero( &acl, sizeof(acl));
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Mono;
NSDictionary* audioOutputSettings = nil;
audioOutputSettings = [ NSDictionary dictionaryWithObjectsAndKeys:
[ NSNumber numberWithInt: kAudioFormatMPEG4AAC ], AVFormatIDKey,
//[ NSNumber numberWithInt: 16 ], AVEncoderBitDepthHintKey,
[ NSNumber numberWithFloat: 44100.0 ], AVSampleRateKey,
[ NSNumber numberWithInt: 1 ], AVNumberOfChannelsKey,
[ NSData dataWithBytes: &acl length: sizeof( acl ) ], AVChannelLayoutKey,
nil ];
// NSDictionary* audioOutputSettings = nil;
// audioOutputSettings = [ NSDictionary dictionaryWithObjectsAndKeys:
// [ NSNumber numberWithInt: kAudioFormatMPEG4AAC_HE_V2 ], AVFormatIDKey,
// [ NSNumber numberWithFloat: 44100.0], AVSampleRateKey,
// [ NSData dataWithBytes: &acl length: sizeof( acl ) ], AVChannelLayoutKey,
// nil ];
return audioOutputSettings;
}
You need to add a AVAssetWriter
var videoRecorder: AVAssetWriter?
Then in your delegate callback:
let timeStamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
if videoRecorder?.status == .Unknown {
startRecordingTime = timeStamp
videoRecorder?.startWriting()
videoRecorder?.startSessionAtSourceTime(timeStamp)
}
You will need to configure the recorder for each recording you wish to do, you will also need to add your inputs to the recorder.
You may start to encounter issues as you don't seem to have any queues setup yet which you will need but for reference this Github is a very good resource for it.
https://github.com/waleedka/rosywriterswift
EDIT: Additional Info
You need to init() the writer then add inputs AVAssetWriterInput for video / audio.
I've found export [UIImage] as movie, but its all in ObjectiveC and I can't figure it out for Swift.
I need to create a video from [UIImage]
Working on Zoul's answer from above link. part 1) Wire the writer
So Far I have:
let paths = NSFileManager.defaultManager().URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
let documentsURL = paths[0] as! NSURL
let videoWriter:AVAssetWriter = AVAssetWriter(URL: documentsURL, fileType: AVFileTypeQuickTimeMovie, error: nil)
var videoSettings: NSDictionary = NSDictionary(
I can't figure out the correct Swift version of his
NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys:
AVVideoCodecH264, AVVideoCodecKey,
[NSNumber numberWithInt:640], AVVideoWidthKey,
[NSNumber numberWithInt:480], AVVideoHeightKey,
nil];
I convert the objective-c code that posted by ’#Cameron E‘ to Swift 3, and It's working. the answer's link:#Cameron E's CEMovieMaker
following is CXEImagesToVideo class:
//
// CXEImagesToVideo.swift
// VideoAPPTest
//
// Created by Wulei on 16/12/14.
// Copyright © 2016 wulei. All rights reserved.
//
import Foundation
import AVFoundation
import UIKit
typealias CXEMovieMakerCompletion = (URL) -> Void
typealias CXEMovieMakerUIImageExtractor = (AnyObject) -> UIImage?
public class CXEImagesToVideo: NSObject{
var assetWriter:AVAssetWriter!
var writeInput:AVAssetWriterInput!
var bufferAdapter:AVAssetWriterInputPixelBufferAdaptor!
var videoSettings:[String : Any]!
var frameTime:CMTime!
var fileURL:URL!
var completionBlock: CXEMovieMakerCompletion?
var movieMakerUIImageExtractor:CXEMovieMakerUIImageExtractor?
public class func videoSettings(codec:String, width:Int, height:Int) -> [String: Any]{
if(Int(width) % 16 != 0){
print("warning: video settings width must be divisible by 16")
}
let videoSettings:[String: Any] = [AVVideoCodecKey: AVVideoCodecH264,
AVVideoWidthKey: width,
AVVideoHeightKey: height]
return videoSettings
}
public init(videoSettings: [String: Any]) {
super.init()
let paths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let tempPath = paths[0] + "/exprotvideo.mp4"
if(FileManager.default.fileExists(atPath: tempPath)){
guard (try? FileManager.default.removeItem(atPath: tempPath)) != nil else {
print("remove path failed")
return
}
}
self.fileURL = URL(fileURLWithPath: tempPath)
self.assetWriter = try! AVAssetWriter(url: self.fileURL, fileType: AVFileTypeQuickTimeMovie)
self.videoSettings = videoSettings
self.writeInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
assert(self.assetWriter.canAdd(self.writeInput), "add failed")
self.assetWriter.add(self.writeInput)
let bufferAttributes:[String: Any] = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32ARGB)]
self.bufferAdapter = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: self.writeInput, sourcePixelBufferAttributes: bufferAttributes)
self.frameTime = CMTimeMake(1, 10)
}
func createMovieFrom(urls: [URL], withCompletion: #escaping CXEMovieMakerCompletion){
self.createMovieFromSource(images: urls as [AnyObject], extractor:{(inputObject:AnyObject) ->UIImage? in
return UIImage(data: try! Data(contentsOf: inputObject as! URL))}, withCompletion: withCompletion)
}
func createMovieFrom(images: [UIImage], withCompletion: #escaping CXEMovieMakerCompletion){
self.createMovieFromSource(images: images, extractor: {(inputObject:AnyObject) -> UIImage? in
return inputObject as? UIImage}, withCompletion: withCompletion)
}
func createMovieFromSource(images: [AnyObject], extractor: #escaping CXEMovieMakerUIImageExtractor, withCompletion: #escaping CXEMovieMakerCompletion){
self.completionBlock = withCompletion
self.assetWriter.startWriting()
self.assetWriter.startSession(atSourceTime: kCMTimeZero)
let mediaInputQueue = DispatchQueue(label: "mediaInputQueue")
var i = 0
let frameNumber = images.count
self.writeInput.requestMediaDataWhenReady(on: mediaInputQueue){
while(true){
if(i >= frameNumber){
break
}
if (self.writeInput.isReadyForMoreMediaData){
var sampleBuffer:CVPixelBuffer?
autoreleasepool{
let img = extractor(images[i])
if img == nil{
i += 1
print("Warning: counld not extract one of the frames")
//continue
}
sampleBuffer = self.newPixelBufferFrom(cgImage: img!.cgImage!)
}
if (sampleBuffer != nil){
if(i == 0){
self.bufferAdapter.append(sampleBuffer!, withPresentationTime: kCMTimeZero)
}else{
let value = i - 1
let lastTime = CMTimeMake(Int64(value), self.frameTime.timescale)
let presentTime = CMTimeAdd(lastTime, self.frameTime)
self.bufferAdapter.append(sampleBuffer!, withPresentationTime: presentTime)
}
i = i + 1
}
}
}
self.writeInput.markAsFinished()
self.assetWriter.finishWriting {
DispatchQueue.main.sync {
self.completionBlock!(self.fileURL)
}
}
}
}
func newPixelBufferFrom(cgImage:CGImage) -> CVPixelBuffer?{
let options:[String: Any] = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true]
var pxbuffer:CVPixelBuffer?
let frameWidth = self.videoSettings[AVVideoWidthKey] as! Int
let frameHeight = self.videoSettings[AVVideoHeightKey] as! Int
let status = CVPixelBufferCreate(kCFAllocatorDefault, frameWidth, frameHeight, kCVPixelFormatType_32ARGB, options as CFDictionary?, &pxbuffer)
assert(status == kCVReturnSuccess && pxbuffer != nil, "newPixelBuffer failed")
CVPixelBufferLockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0))
let pxdata = CVPixelBufferGetBaseAddress(pxbuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: pxdata, width: frameWidth, height: frameHeight, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pxbuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
assert(context != nil, "context is nil")
context!.concatenate(CGAffineTransform.identity)
context!.draw(cgImage, in: CGRect(x: 0, y: 0, width: cgImage.width, height: cgImage.height))
CVPixelBufferUnlockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pxbuffer
}
}
Usage:
var uiImages = [UIImage]()
/** add image to uiImages */
let settings = CXEImagesToVideo.videoSettings(codec: AVVideoCodecH264, width: (uiImages[0].cgImage?.width)!, height: (uiImages[0].cgImage?.height)!)
let movieMaker = CXEImagesToVideo(videoSettings: settings)
movieMaker.createMovieFrom(images: uiImages){ (fileURL:URL) in
let video = AVAsset(url: fileURL)
let playerItem = AVPlayerItem(asset: video)
let avPlayer = AVPlayer(playerItem: playerItem)
let playerLayer = AVPlayerLayer(player: avPlayer)
playerLayer.frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.width * 3.0 / 4.0)
self.view.layer.addSublayer(playerLayer)
avPlayer.play()
}
export or play the video with fileURL.
There are two ways of async and sync. Gist:https://gist.github.com/Willib/b97b08d8d877ca5d875ff14abb4c3f1a
Constructing a Dictionary literal is straightforward:
import AVFoundation
let videoSettings = [
AVVideoCodecKey: AVVideoCodecH264,
AVVideoWidthKey: 640,
AVVideoHeightKey: 480
]
As for everything else, I would encourage you to read through Apple's The Swift Programming Language to establish fundamentals first, rather than relying on SO or tutorials that happen to cover what you want to do. "Teach a man to fish", as they say.
**for swift 4.2 **
generate video from images and manually save it
images come from prev
controller
// VideoMakerViewController.swift
// VideoMaker
//Created by ISHA PATEL on 05/10/18.
// Copyright © 2018 Isha Patel. All rights reserved.
import AVFoundation
import UIKit
import Photos
import AVKit
var tempurl=""
class VideoMakerViewController: UIViewController {
var images:[UIImage]=[]
#IBOutlet weak var videoview: UIView!
override func viewDidLoad() {
super.viewDidLoad()
DispatchQueue.main.async {
let settings = RenderSettings()
let imageAnimator = ImageAnimator(renderSettings: settings,imagearr: self.images)
imageAnimator.render() {
self.displayVideo()
}
}
}
func displayVideo()
{
let u:String=tempurl
let player = AVPlayer(url: URL(fileURLWithPath: u))
let playerController = AVPlayerViewController()
playerController.player = player
self.addChild(playerController)
videoview.addSubview(playerController.view)
playerController.view.frame.size=(videoview.frame.size)
playerController.view.contentMode = .scaleAspectFit
playerController.view.backgroundColor=UIColor.clear
videoview.backgroundColor=UIColor.clear
player.play()
}
#IBAction func save(_ sender: UIBarButtonItem) {
PHPhotoLibrary.requestAuthorization { status in
guard status == .authorized else { return }
let u:String=tempurl
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: URL(fileURLWithPath: u) as URL)
}) { success, error in
if !success {
print("Could not save video to photo library:", error!)
}
}
}
}
}
struct RenderSettings {
var width: CGFloat = 1500
var height: CGFloat = 844
var fps: Int32 = 2 // 2 frames per second
var avCodecKey = AVVideoCodecType.h264
var videoFilename = "renderExportVideo"
var videoFilenameExt = "mp4"
var size: CGSize {
return CGSize(width: width, height: height)
}
var outputURL: NSURL {
let fileManager = FileManager.default
if let tmpDirURL = try? fileManager.url(for: .cachesDirectory, in: .userDomainMask, appropriateFor: nil, create: true) {
return tmpDirURL.appendingPathComponent(videoFilename).appendingPathExtension(videoFilenameExt) as NSURL
}
fatalError("URLForDirectory() failed")
}
}
class VideoWriter {
let renderSettings: RenderSettings
var videoWriter: AVAssetWriter!
var videoWriterInput: AVAssetWriterInput!
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor!
var isReadyForData: Bool {
return videoWriterInput?.isReadyForMoreMediaData ?? false
}
class func pixelBufferFromImage(image: UIImage, pixelBufferPool: CVPixelBufferPool, size: CGSize) -> CVPixelBuffer {
var pixelBufferOut: CVPixelBuffer?
let status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBufferOut)
if status != kCVReturnSuccess {
fatalError("CVPixelBufferPoolCreatePixelBuffer() failed")
}
let pixelBuffer = pixelBufferOut!
CVPixelBufferLockBaseAddress(pixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(size.width), height: Int(size.height),
bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: size.width, height: size.height))
let horizontalRatio = size.width / image.size.width
let verticalRatio = size.height / image.size.height
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: image.size.width * aspectRatio, height: image.size.height * aspectRatio)
let x = newSize.width < size.width ? (size.width - newSize.width) / 2 : 0
let y = newSize.height < size.height ? (size.height - newSize.height) / 2 : 0
context!.concatenate(CGAffineTransform.identity)
context!.draw(image.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
return pixelBuffer
}
init(renderSettings: RenderSettings) {
self.renderSettings = renderSettings
}
func start() {
let avOutputSettings: [String: AnyObject] = [
AVVideoCodecKey: renderSettings.avCodecKey as AnyObject,
AVVideoWidthKey: NSNumber(value: Float(renderSettings.width)),
AVVideoHeightKey: NSNumber(value: Float(renderSettings.height))
]
func createPixelBufferAdaptor() {
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(renderSettings.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(renderSettings.height))
]
pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
}
func createAssetWriter(outputURL: NSURL) -> AVAssetWriter {
guard let assetWriter = try? AVAssetWriter(outputURL: outputURL as URL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter() failed")
}
guard assetWriter.canApply(outputSettings: avOutputSettings, forMediaType: AVMediaType.video) else {
fatalError("canApplyOutputSettings() failed")
}
return assetWriter
}
videoWriter = createAssetWriter(outputURL: renderSettings.outputURL)
videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: avOutputSettings)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
else {
fatalError("canAddInput() returned false")
}
createPixelBufferAdaptor()
if videoWriter.startWriting() == false {
fatalError("startWriting() failed")
}
videoWriter.startSession(atSourceTime: CMTime.zero)
precondition(pixelBufferAdaptor.pixelBufferPool != nil, "nil pixelBufferPool")
}
func render(appendPixelBuffers: #escaping (VideoWriter)->Bool, completion: #escaping ()->Void) {
precondition(videoWriter != nil, "Call start() to initialze the writer")
let queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: queue) {
let isFinished = appendPixelBuffers(self)
if isFinished {
self.videoWriterInput.markAsFinished()
self.videoWriter.finishWriting() {
DispatchQueue.main.async {
completion()
}
}
}
else {
}
}
}
func addImage(image: UIImage, withPresentationTime presentationTime: CMTime) -> Bool {
precondition(pixelBufferAdaptor != nil, "Call start() to initialze the writer")
let pixelBuffer = VideoWriter.pixelBufferFromImage(image: image, pixelBufferPool: pixelBufferAdaptor.pixelBufferPool!, size: renderSettings.size)
return pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
}
}
class ImageAnimator{
static let kTimescale: Int32 = 600
let settings: RenderSettings
let videoWriter: VideoWriter
var images: [UIImage]!
var frameNum = 0
class func removeFileAtURL(fileURL: NSURL) {
do {
try FileManager.default.removeItem(atPath: fileURL.path!)
}
catch _ as NSError {
//
}
}
init(renderSettings: RenderSettings,imagearr: [UIImage]) {
settings = renderSettings
videoWriter = VideoWriter(renderSettings: settings)
images = imagearr
}
func render(completion: #escaping ()->Void) {
// The VideoWriter will fail if a file exists at the URL, so clear it out first.
ImageAnimator.removeFileAtURL(fileURL: settings.outputURL)
videoWriter.start()
videoWriter.render(appendPixelBuffers: appendPixelBuffers) {
let s:String=self.settings.outputURL.path!
tempurl=s
completion()
}
}
func appendPixelBuffers(writer: VideoWriter) -> Bool {
let frameDuration = CMTimeMake(value: Int64(ImageAnimator.kTimescale / settings.fps), timescale: ImageAnimator.kTimescale)
while !images.isEmpty {
if writer.isReadyForData == false {
return false
}
let image = images.removeFirst()
let presentationTime = CMTimeMultiply(frameDuration, multiplier: Int32(frameNum))
let success = videoWriter.addImage(image: image, withPresentationTime: presentationTime)
if success == false {
fatalError("addImage() failed")
}
frameNum=frameNum+1
}
return true
}
}
for swift version 5
Ive made some minor changes in this very good answer. just simply copy and paste and dont forget to pass framesArray argument to "buildVideoFromImageArray function"
func saveVideoToLibrary(videoURL: URL) {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: videoURL)
}) { saved, error in
if let error = error {
print("Error saving video to librayr: \(error.localizedDescription)")
}
if saved {
print("Video save to library")
}
}
}
func buildVideoFromImageArray(framesArray:[UIImage]) {
var images = framesArray
let outputSize = CGSize(width:images[0].size.width, height: images[0].size.height)
let fileManager = FileManager.default
let urls = fileManager.urls(for: .cachesDirectory, in: .userDomainMask)
guard let documentDirectory = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.appendingPathComponent("OutputVideo.mp4")
if FileManager.default.fileExists(atPath: videoOutputURL.path) {
do {
try FileManager.default.removeItem(atPath: videoOutputURL.path)
} catch {
fatalError("Unable to delete file: \(error) : \(#function).")
}
}
guard let videoWriter = try? AVAssetWriter(outputURL: videoOutputURL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter error")
}
let outputSettings = [AVVideoCodecKey : AVVideoCodecType.h264, AVVideoWidthKey : NSNumber(value: Float(outputSize.width)), AVVideoHeightKey : NSNumber(value: Float(outputSize.height))] as [String : Any]
guard videoWriter.canApply(outputSettings: outputSettings, forMediaType: AVMediaType.video) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(outputSize.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(outputSize.height))
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
if videoWriter.startWriting() {
videoWriter.startSession(atSourceTime: CMTime.zero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = DispatchQueue(__label: "mediaInputQueue", attr: nil)
videoWriterInput.requestMediaDataWhenReady(on: media_queue, using: { () -> Void in
let fps: Int32 = 30//2
let frameDuration = CMTimeMake(value: 1, timescale: fps)
var frameCount: Int64 = 0
var appendSucceeded = true
while (!images.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextPhoto = images.remove(at: 0)
let lastFrameTime = CMTimeMake(value: frameCount, timescale: fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(outputSize.width), height: Int(outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context?.clear(CGRect(x: 0, y: 0, width: outputSize.width, height: outputSize.height))
let horizontalRatio = CGFloat(outputSize.width) / nextPhoto.size.width
let verticalRatio = CGFloat(outputSize.height) / nextPhoto.size.height
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: nextPhoto.size.width * aspectRatio, height: nextPhoto.size.height * aspectRatio)
let x = newSize.width < outputSize.width ? (outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < outputSize.height ? (outputSize.height - newSize.height) / 2 : 0
context?.draw(nextPhoto.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, [])
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
frameCount += 1
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
if !appendSucceeded {
break
}
//frameCount += 1
}
videoWriterInput.markAsFinished()
videoWriter.finishWriting { () -> Void in
print("Done saving")
self.saveVideoToLibrary(videoURL: videoOutputURL)
}
})
}
}