I'm creating a video from an array of uiimage.
I've used multiple stackoverflow posts and mixed and modified a lot of the provided code.
I will include the file that I'm currently using.
The images are currently coming out like this and I'm thinking it's because since they are turned into CGImage, they don't retain orientation?
Also, how do I handle the scaling for when a user takes a horizontal picture.
I basically want to show the images as they were taken.
The first two images were taken vertically and look flipped 90 degrees to the left and the last horizontally.
Thank you
Class:
import AVFoundation
import UIKit
let kErrorDomain = "TimeLapseBuilder"
let kFailedToStartAssetWriterError = 0
let kFailedToAppendPixelBufferError = 1
public class TimeLapseBuilder: NSObject {
var photos: [UIImage]
var videoWriter: AVAssetWriter?
var outputSize = CGSizeMake(1920, 1080)
public init(photos: [UIImage]) {
self.photos = photos
super.init()
}
public func build(outputSize outputSize: CGSize, progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {
self.outputSize = outputSize
var error: NSError?
let startTime = NSDate.timeIntervalSinceReferenceDate()
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("AssembledVideo.mov")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
}catch{
fatalError("Unable to delete file: \(error) : \(__FUNCTION__).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeQuickTimeMovie) else{
fatalError("AVAssetWriter error")
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : NSNumber(float: Float(outputSize.width)),
AVVideoHeightKey : NSNumber(float: Float(outputSize.height)),
]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(unsignedInt: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(float: Float(outputSize.width)),
kCVPixelBufferHeightKey as String: NSNumber(float: Float(outputSize.height)),
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary
)
assert(videoWriter.canAddInput(videoWriterInput))
videoWriter.addInput(videoWriterInput)
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
let currentProgress = NSProgress(totalUnitCount: Int64(self.photos.count))
var frameCount: Int64 = 0
while (!self.photos.isEmpty) {
if (videoWriterInput.readyForMoreMediaData) {
let nextPhoto = self.photos.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
if !self.appendPixelBufferForImage(nextPhoto, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(domain: kErrorDomain, code: kFailedToAppendPixelBufferError,
userInfo: [
"description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
"rawError": videoWriter.error ?? "(none)"
])
break
}
frameCount++
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
}
let endTime = NSDate.timeIntervalSinceReferenceDate()
let elapsedTime: NSTimeInterval = endTime - startTime
print("rendering time \(self.stringFromTimeInterval(elapsedTime))")
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
success(videoOutputURL)
}
}
})
} else {
error = NSError(domain: kErrorDomain, code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
if let error = error {
failure(error)
}
}
public func appendPixelBufferForImage(image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = true
autoreleasepool {
var pixelBuffer: CVPixelBuffer? = nil
let options: [NSObject : AnyObject] = [
kCVPixelBufferCGImageCompatibilityKey : Int(true),
kCVPixelBufferCGBitmapContextCompatibilityKey : Int(true)
]
let status: CVReturn = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, options as CFDictionaryRef, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
print("Scaleeee \(image.scale)")
print("Widthhhh \(image.size.width)")
print("Heighttt \(image.size.height)")
pixelBufferFromImage(image.CGImage!, pxbuffer: managedPixelBuffer, andSize: CGSize(width: image.size.width, height: image.size.height))
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
NSLog("error: Failed to allocate pixel buffer from pool")
}
}
return appendSucceeded
}
func pixelBufferFromImage(image: CGImageRef, pxbuffer: CVPixelBuffer, andSize size: CGSize){
CVPixelBufferLockBaseAddress(pxbuffer, 0)
let pxdata = CVPixelBufferGetBaseAddress(pxbuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(pxdata, Int(size.width), Int(size.height), 8, CVPixelBufferGetBytesPerRow(pxbuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextConcatCTM(context, CGAffineTransformMakeRotation(0))
CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), image)
CVPixelBufferUnlockBaseAddress(pxbuffer, 0)
}
// //Old fillpixel method
// func fillPixelBufferFromImage(image: UIImage, pixelBuffer: CVPixelBuffer, contentMode:UIViewContentMode){
//
// CVPixelBufferLockBaseAddress(pixelBuffer, 0)
////
// let data = CVPixelBufferGetBaseAddress(pixelBuffer)
// let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
// let context = CGBitmapContextCreate(data, Int(self.outputSize.width), Int(self.outputSize.height), 8, CVPixelBufferGetBytesPerRow(pixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
//
// CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.outputSize.width), CGFloat(self.outputSize.height)))
//
// let horizontalRatio = CGFloat(self.outputSize.width) / image.size.width
// let verticalRatio = CGFloat(self.outputSize.height) / image.size.height
// var ratio: CGFloat = 1
//
//// print("horizontal ratio \(horizontalRatio)")
//// print("vertical ratio \(verticalRatio)")
//// print("ratio \(ratio)")
//// print("Image Width - \(image.size.width). Image Height - \(image.size.height)")
//
// switch(contentMode) {
// case .ScaleAspectFill:
// ratio = max(horizontalRatio, verticalRatio)
// case .ScaleAspectFit:
// ratio = min(horizontalRatio, verticalRatio)
// default:
// ratio = min(horizontalRatio, verticalRatio)
// }
//
// let newSize:CGSize = CGSizeMake(image.size.width * ratio, image.size.height * ratio)
//
// let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
// let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
//
// CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), image.CGImage)
//
// CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
// }
func stringFromTimeInterval(interval: NSTimeInterval) -> String {
let ti = NSInteger(interval)
let ms = Int((interval % 1) * 1000)
let seconds = ti % 60
let minutes = (ti / 60) % 60
let hours = (ti / 3600)
if hours > 0 {
return NSString(format: "%0.2d:%0.2d:%0.2d.%0.2d", hours, minutes, seconds, ms) as String
}else if minutes > 0 {
return NSString(format: "%0.2d:%0.2d.%0.2d", minutes, seconds, ms) as String
}else {
return NSString(format: "%0.2d.%0.2d", seconds, ms) as String
}
}
}
This is a common issue with UIImage, as the orientation is usually derived from the EXIF data on the camera.
I have written an extension on UIImage that allows me to access an instance of the image in the correct orientation.
extension UIImage {
var fixedOrientation: UIImage {
if self.imageOrientation == .Up {
return self
}
var transform: CGAffineTransform = CGAffineTransformIdentity
switch (self.imageOrientation) {
case .Down:
transform = CGAffineTransformTranslate(transform, self.size.width, self.size.width)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
break
case .DownMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, self.size.width)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
break
case .Left:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
break
case .LeftMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
break
case .Right:
transform = CGAffineTransformTranslate(transform, 0, self.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
break
case .RightMirrored:
transform = CGAffineTransformTranslate(transform, 0, self.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
break
case .Up:
break
case .UpMirrored:
break
}
switch (self.imageOrientation) {
case .UpMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break;
case .DownMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break;
case .LeftMirrored:
transform = CGAffineTransformTranslate(transform, self.size.height, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break
case .RightMirrored:
transform = CGAffineTransformTranslate(transform, self.size.height, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break
case .Up:
break
case .Right:
break
case .Down:
break
case .Left:
break
}
let context = CGBitmapContextCreate(nil, Int(self.size.width), Int(self.size.height), CGImageGetBitsPerComponent(self.CGImage), 0, CGImageGetColorSpace(self.CGImage), CGImageGetBitmapInfo(self.CGImage).rawValue)
CGContextConcatCTM(context, transform)
switch (self.imageOrientation) {
case .Left:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
break
case .LeftMirrored:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
break
case .Right:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
break
case .RightMirrored:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
break
default:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.width, self.size.height), self.CGImage)
break
}
let cgImage = CGBitmapContextCreateImage(context)
let uiImage = UIImage.init(CGImage: cgImage!)
return uiImage
}
}
Similarly, if you know the desired CGSize you want your image to be (you can usually get away with using the current view's bounds) then you can use this as an extension of UIImage as well.
extension UIImage {
func resize(newSize: CGSize) -> UIImage {
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
self.drawInRect(CGRectMake(0, 0, newSize.width, newSize.height))
return UIGraphicsGetImageFromCurrentImageContext()
}
}
I am also curious to see if this can be refactored, but this always does the trick for me - hope this helps!
Related
I Have created Video successfully from array of images with frame time duration = 1 second for all images. but I want to use different duration time for each of images, exp : 0.1s, 0.2s But It's not work correctly.
while (!self.selectedPhotosArray.isEmpty) {
if (videoWriterInput.isReadyForMoreMediaData) {
let nextDicData = self.selectedPhotosArray.remove(at: 0)
if let nextImage = nextDicData["img"] as? UIImage
{
var frameDuration = CMTimeMake(Int64(0 * 10000), fps)
if let timeVl = nextDicData["time"] as? Float{
framePerSecond = Int64(timeVl * 10000)
}
frameDuration = CMTimeMake(framePerSecond ,fps)
let lastFrameTime = CMTimeMake(Int64(lastTimeVl), fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
var pixelBuffer: CVPixelBuffer? = nil
let status: CVReturn = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferAdaptor.pixelBufferPool!, &pixelBuffer)
if let pixelBuffer = pixelBuffer, status == 0 {
let managedPixelBuffer = pixelBuffer
CVPixelBufferLockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
let data = CVPixelBufferGetBaseAddress(managedPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(self.outputSize.width), height: Int(self.outputSize.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(managedPixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: CGFloat(self.outputSize.width), height: CGFloat(self.outputSize.height)))
let horizontalRatio = CGFloat(self.outputSize.width) / nextImage.size.width
let verticalRatio = CGFloat(self.outputSize.height) / nextImage.size.height
//let aspectRatio = max(horizontalRatio, verticalRatio) // ScaleAspectFill
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize: CGSize = CGSize(width: nextImage.size.width, height: nextImage.size.height)
let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
context?.draw(nextImage.cgImage!, in: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(managedPixelBuffer, CVPixelBufferLockFlags(rawValue: CVOptionFlags(0)))
appendSucceeded = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
} else {
print("Failed to allocate pixel buffer")
appendSucceeded = false
}
}
}
if !appendSucceeded {
break
}
frameCount += 1
lastTimeVl += framePerSecond
}
I am trying to get Apple's sample Core ML Models that were demoed at the 2017 WWDC to function correctly. I am using the GoogLeNet to try and classify images (see the Apple Machine Learning Page). The model takes a CVPixelBuffer as an input. I have an image called imageSample.jpg that I'm using for this demo. My code is below:
var sample = UIImage(named: "imageSample")?.cgImage
let bufferThree = getCVPixelBuffer(sample!)
let model = GoogLeNetPlaces()
guard let output = try? model.prediction(input: GoogLeNetPlacesInput.init(sceneImage: bufferThree!)) else {
fatalError("Unexpected runtime error.")
}
print(output.sceneLabel)
I am always getting the unexpected runtime error in the output rather than an image classification. My code to convert the image is below:
func getCVPixelBuffer(_ image: CGImage) -> CVPixelBuffer? {
let imageWidth = Int(image.width)
let imageHeight = Int(image.height)
let attributes : [NSObject:AnyObject] = [
kCVPixelBufferCGImageCompatibilityKey : true as AnyObject,
kCVPixelBufferCGBitmapContextCompatibilityKey : true as AnyObject
]
var pxbuffer: CVPixelBuffer? = nil
CVPixelBufferCreate(kCFAllocatorDefault,
imageWidth,
imageHeight,
kCVPixelFormatType_32ARGB,
attributes as CFDictionary?,
&pxbuffer)
if let _pxbuffer = pxbuffer {
let flags = CVPixelBufferLockFlags(rawValue: 0)
CVPixelBufferLockBaseAddress(_pxbuffer, flags)
let pxdata = CVPixelBufferGetBaseAddress(_pxbuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB();
let context = CGContext(data: pxdata,
width: imageWidth,
height: imageHeight,
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(_pxbuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
if let _context = context {
_context.draw(image, in: CGRect.init(x: 0, y: 0, width: imageWidth, height: imageHeight))
}
else {
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return nil
}
CVPixelBufferUnlockBaseAddress(_pxbuffer, flags);
return _pxbuffer;
}
return nil
}
I got this code from a previous StackOverflow post (last answer here). I recognize that the code may not be correct, but I have no idea of how to do this myself. I believe that this is the section that contains the error. The model calls for the following type of input: Image<RGB,224,224>
You don't need to do a bunch of image mangling yourself to use a Core ML model with an image — the new Vision framework can do that for you.
import Vision
import CoreML
let model = try VNCoreMLModel(for: MyCoreMLGeneratedModelClass().model)
let request = VNCoreMLRequest(model: model, completionHandler: myResultsMethod)
let handler = VNImageRequestHandler(url: myImageURL)
handler.perform([request])
func myResultsMethod(request: VNRequest, error: Error?) {
guard let results = request.results as? [VNClassificationObservation]
else { fatalError("huh") }
for classification in results {
print(classification.identifier, // the scene label
classification.confidence)
}
}
The WWDC17 session on Vision should have a bit more info — it's tomorrow afternoon.
You can use a pure CoreML, but you should resize an image to (224,224)
DispatchQueue.global(qos: .userInitiated).async {
// Resnet50 expects an image 224 x 224, so we should resize and crop the source image
let inputImageSize: CGFloat = 224.0
let minLen = min(image.size.width, image.size.height)
let resizedImage = image.resize(to: CGSize(width: inputImageSize * image.size.width / minLen, height: inputImageSize * image.size.height / minLen))
let cropedToSquareImage = resizedImage.cropToSquare()
guard let pixelBuffer = cropedToSquareImage?.pixelBuffer() else {
fatalError()
}
guard let classifierOutput = try? self.classifier.prediction(image: pixelBuffer) else {
fatalError()
}
DispatchQueue.main.async {
self.title = classifierOutput.classLabel
}
}
// ...
extension UIImage {
func resize(to newSize: CGSize) -> UIImage {
UIGraphicsBeginImageContextWithOptions(CGSize(width: newSize.width, height: newSize.height), true, 1.0)
self.draw(in: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
let resizedImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return resizedImage
}
func cropToSquare() -> UIImage? {
guard let cgImage = self.cgImage else {
return nil
}
var imageHeight = self.size.height
var imageWidth = self.size.width
if imageHeight > imageWidth {
imageHeight = imageWidth
}
else {
imageWidth = imageHeight
}
let size = CGSize(width: imageWidth, height: imageHeight)
let x = ((CGFloat(cgImage.width) - size.width) / 2).rounded()
let y = ((CGFloat(cgImage.height) - size.height) / 2).rounded()
let cropRect = CGRect(x: x, y: y, width: size.height, height: size.width)
if let croppedCgImage = cgImage.cropping(to: cropRect) {
return UIImage(cgImage: croppedCgImage, scale: 0, orientation: self.imageOrientation)
}
return nil
}
func pixelBuffer() -> CVPixelBuffer? {
let width = self.size.width
let height = self.size.height
let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue,
kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
var pixelBuffer: CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault,
Int(width),
Int(height),
kCVPixelFormatType_32ARGB,
attrs,
&pixelBuffer)
guard let resultPixelBuffer = pixelBuffer, status == kCVReturnSuccess else {
return nil
}
CVPixelBufferLockBaseAddress(resultPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
let pixelData = CVPixelBufferGetBaseAddress(resultPixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: pixelData,
width: Int(width),
height: Int(height),
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(resultPixelBuffer),
space: rgbColorSpace,
bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue) else {
return nil
}
context.translateBy(x: 0, y: height)
context.scaleBy(x: 1.0, y: -1.0)
UIGraphicsPushContext(context)
self.draw(in: CGRect(x: 0, y: 0, width: width, height: height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(resultPixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
return resultPixelBuffer
}
}
The expected image size for inputs you can find in the mimodel file:
A demo project that uses both pure CoreML and Vision variants you can find here: https://github.com/handsomecode/iOS11-Demos/tree/coreml_vision/CoreML/CoreMLDemo
If the input is UIImage, rather than an URL, and you want to use VNImageRequestHandler, you can use CIImage.
func updateClassifications(for image: UIImage) {
let orientation = CGImagePropertyOrientation(image.imageOrientation)
guard let ciImage = CIImage(image: image) else { return }
let handler = VNImageRequestHandler(ciImage: ciImage, orientation: orientation)
}
From Classifying Images with Vision and Core ML
I have a UIImage array with a lot of UIImage objects,and use the methods mentioned by the link to export the image array to a video. Everything works, but the performance of converting UIImage array to CVPixelBuffer is very terrible:
private func newPixelBufferFrom(cgImage:CGImage) -> CVPixelBuffer?{
let options:[String: Any] = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true]
var pxbuffer:CVPixelBuffer?
let frameWidth = self.videoSettings[AVVideoWidthKey] as! Int
let frameHeight = self.videoSettings[AVVideoHeightKey] as! Int
let status = CVPixelBufferCreate(kCFAllocatorDefault, frameWidth, frameHeight, kCVPixelFormatType_32ARGB, options as CFDictionary?, &pxbuffer)
assert(status == kCVReturnSuccess && pxbuffer != nil, "newPixelBuffer failed")
CVPixelBufferLockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0))
let pxdata = CVPixelBufferGetBaseAddress(pxbuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: pxdata, width: frameWidth, height: frameHeight, bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pxbuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
assert(context != nil, "context is nil")
context!.concatenate(CGAffineTransform.identity)
context!.draw(cgImage, in: CGRect(x: 0, y: 0, width: frameWidth, height: frameHeight))
CVPixelBufferUnlockBaseAddress(pxbuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pxbuffer
}
Could you give me some ideas?
Thanks!
I solved my problem.
In my video editing application like iMovie, I need to convert one image to a video and make the image(now it's a video) is movable.
A UIImage array is from a UIImage in essence. Therefore, I avoid call newPixelBufferFrom repeatedly but only once. the following codes will be faster:
var sampleBuffer:CVPixelBuffer?
var pxDataBuffer:CVPixelBuffer?
let options:[String: Any] = [kCVPixelBufferCGImageCompatibilityKey as String: true, kCVPixelBufferCGBitmapContextCompatibilityKey as String: true]
let frameHeight = self.videoSettings[AVVideoHeightKey] as! Int
let frameWidth = self.videoSettings[AVVideoWidthKey] as! Int
let originHeight = frameWidth * img!.cgImage!.height / img!.cgImage!.width
let heightDifference = originHeight - frameHeight
let frameCounts = self.duration * Int(self.frameTime.timescale)
let spacingOfHeight = heightDifference / frameCounts
sampleBuffer = self.newPixelBufferFrom(cgImage: img!.cgImage!)
assert(sampleBuffer != nil)
var presentTime = CMTimeMake(1, self.frameTime.timescale)
var stepRows = 0
for i in 0..<frameCounts {
CVPixelBufferLockBaseAddress(sampleBuffer!, CVPixelBufferLockFlags(rawValue: 0))
let pointer = CVPixelBufferGetBaseAddress(sampleBuffer!)
var pxData = pointer?.assumingMemoryBound(to: UInt8.self)
let bytes = CVPixelBufferGetBytesPerRow(sampleBuffer!) * stepRows
pxData = pxData?.advanced(by: bytes)
let status = CVPixelBufferCreateWithBytes(kCFAllocatorDefault, frameWidth, frameHeight, kCVPixelFormatType_32ARGB, pxData!, CVPixelBufferGetBytesPerRow(sampleBuffer!), nil, nil, options as CFDictionary?, &pxDataBuffer)
assert(status == kCVReturnSuccess && pxDataBuffer != nil, "newPixelBuffer failed")
CVPixelBufferUnlockBaseAddress(sampleBuffer!, CVPixelBufferLockFlags(rawValue: 0))
while !self.writeInput.isReadyForMoreMediaData {
usleep(100)
}
if (self.writeInput.isReadyForMoreMediaData){
if i == 0{
self.bufferAdapter.append(pxDataBuffer!, withPresentationTime: zeroTime)
}else{
self.bufferAdapter.append(pxDataBuffer!, withPresentationTime: presentTime)
}
presentTime = CMTimeAdd(presentTime, self.frameTime)
}
stepRows += spacingOfHeight
}
I want to create a video from image.
So, I was the source of the link to the reference.
Link:CVPixelBufferPool Error ( kCVReturnInvalidArgument/-6661)
func writeAnimationToMovie(path: String, size: CGSize, animation: Animation) -> Bool {
var error: NSError?
let writer = AVAssetWriter(URL: NSURL(fileURLWithPath: path), fileType: AVFileTypeQuickTimeMovie, error: &error)
let videoSettings = [AVVideoCodecKey: AVVideoCodecH264, AVVideoWidthKey: size.width, AVVideoHeightKey: size.height]
let input = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: input, sourcePixelBufferAttributes: nil)
input.expectsMediaDataInRealTime = true
writer.addInput(input)
writer.startWriting()
writer.startSessionAtSourceTime(kCMTimeZero)
var buffer: CVPixelBufferRef
var frameCount = 0
for frame in animation.frames {
let rect = CGRectMake(0, 0, size.width, size.height)
let rectPtr = UnsafeMutablePointer<CGRect>.alloc(1)
rectPtr.memory = rect
buffer = pixelBufferFromCGImage(frame.image.CGImageForProposedRect(rectPtr, context: nil, hints: nil).takeUnretainedValue(), size)
var appendOk = false
var j = 0
while (!appendOk && j < 30) {
if pixelBufferAdaptor.assetWriterInput.readyForMoreMediaData {
let frameTime = CMTimeMake(Int64(frameCount), 10)
appendOk = pixelBufferAdaptor.appendPixelBuffer(buffer, withPresentationTime: frameTime)
// appendOk will always be false
NSThread.sleepForTimeInterval(0.05)
} else {
NSThread.sleepForTimeInterval(0.1)
}
j++
}
if (!appendOk) {
println("Doh, frame \(frame) at offset \(frameCount) failed to append")
}
}
input.markAsFinished()
writer.finishWritingWithCompletionHandler({
if writer.status == AVAssetWriterStatus.Failed {
println("oh noes, an error: \(writer.error.description)")
} else {
println("hrmmm, there should be a movie?")
}
})
return true;
}
func pixelBufferFromCGImage(image: CGImageRef, size: CGSize) -> CVPixelBufferRef {
let options = [
kCVPixelBufferCGImageCompatibilityKey: true,
kCVPixelBufferCGBitmapContextCompatibilityKey: true]
var pixBufferPointer = UnsafeMutablePointer<Unmanaged<CVPixelBuffer>?>.alloc(1)
let status = CVPixelBufferCreate(
nil,
UInt(size.width), UInt(size.height),
OSType(kCVPixelFormatType_32ARGB),
options,
pixBufferPointer)
CVPixelBufferLockBaseAddress(pixBufferPointer.memory?.takeUnretainedValue(), 0)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapinfo = CGBitmapInfo.fromRaw(CGImageAlphaInfo.NoneSkipFirst.toRaw())
var pixBufferData:UnsafeMutablePointer<(Void)> = CVPixelBufferGetBaseAddress(pixBufferPointer.memory?.takeUnretainedValue())
let context = CGBitmapContextCreate(
pixBufferData,
UInt(size.width), UInt(size.height),
8, UInt(4 * size.width),
rgbColorSpace, bitmapinfo!)
CGContextConcatCTM(context, CGAffineTransformMakeRotation(0))
CGContextDrawImage(
context,
CGRectMake(0, 0, CGFloat(CGImageGetWidth(image)), CGFloat(CGImageGetHeight(image))),
image)
CVPixelBufferUnlockBaseAddress(pixBufferPointer.memory?.takeUnretainedValue(), 0)
return pixBufferPointer.memory!.takeUnretainedValue()
}
Something even after the Movies can be as under the image remains in memory.
I believe or not than is left PixcelBuffer.
I had a method CVPixelBufferRelease(buffer) to release the PixcelBuffer when the Objective-c, I'm no longer can use this in Swift. How do I release the PixcelBuffer doing?
If anyone could help, I'd really appreciate it.
1
2
When using CVPixelBufferCreate the UnsafeMutablePointer has to be destroyed after retrieving the memory of it.
When I create a CVPixelBuffer, I do it like this:
func allocPixelBuffer() -> CVPixelBuffer {
let pixelBufferAttributes : CFDictionary = [...]
let pixelBufferOut = UnsafeMutablePointer<CVPixelBuffer?>.alloc(1)
_ = CVPixelBufferCreate(kCFAllocatorDefault,
Int(Width),
Int(Height),
OSType(kCVPixelFormatType_32ARGB),
pixelBufferAttributes,
pixelBufferOut)
let pixelBuffer = pixelBufferOut.memory!
pixelBufferOut.destroy()
return pixelBuffer
}
I had same problem, but I have solved.
Use this: autoreleasepool
var boolWhile = true
while (boolWhile) {
autoreleasepool({() -> () in
if(input.readyForMoreMediaData) {
presentTime = CMTimeMake(Int64(ii), fps)
if(ii >= arrayImages.count){
...
Try changing
return pixBufferPointer.memory!.takeUnretainedValue()
to
return pixBufferPointer.memory!.takeRetainedValue()
to avoid leaking CVPixelBuffers
I'm trying to take an image and convert it into a video file in Swift. I was able to get the code to work in the iOS simulator, but it won't work on an actual device. It errors out when I try to append the pixel buffer to the AVAssetWriterInput. I was wondering if anyone here has run into the same issue previously. Here's the code:
func createVideoFromImage(img: CGImageRef) -> Void {
var error: NSError?
let frameSize = CGSizeMake(CGFloat(CGImageGetWidth(img)), CGFloat(CGImageGetHeight(img)))
let fileName = "\(uniqueString()).m4v"
let assetWriter = AVAssetWriter(URL: fileUrl(fileName), fileType: AVFileTypeAppleM4V, error: &error)
let outputSettings = [
AVVideoCodecKey: AVVideoCodecH264,
AVVideoWidthKey: frameSize.width,
AVVideoHeightKey: frameSize.height
]
let assetWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterInput, sourcePixelBufferAttributes: nil)
if assetWriter.canAddInput(assetWriterInput) {
assetWriter.addInput(assetWriterInput)
}
assetWriter.startWriting()
assetWriter.startSessionAtSourceTime(kCMTimeZero)
let buffer:CVPixelBufferRef = pixelBufferFromCGImage(img, frameSize: frameSize)
if pixelBufferAdaptor.assetWriterInput.readyForMoreMediaData {
let frameTime = CMTimeMakeWithSeconds(0, 30)
let pixelBufferAppend = pixelBufferAdaptor.appendPixelBuffer(buffer, withPresentationTime: frameTime)
if pixelBufferAppend {
assetWriterInput.markAsFinished()
assetWriter.finishWritingWithCompletionHandler { () -> Void in
switch assetWriter.status {
case AVAssetWriterStatus.Failed:
println("Error: \(assetWriter.error)")
default:
let path = self.fileUrl(fileName).path!
let content = NSFileManager.defaultManager().contentsAtPath(path)
println("Video: \(path) \(content?.length)")
}
}
} else {
println("failed to append pixel buffer")
}
}
}
func pixelBufferFromCGImage (img: CGImageRef, frameSize: CGSize) -> CVPixelBufferRef {
let options = [
"kCVPixelBufferCGImageCompatibilityKey": true,
"kCVPixelBufferCGBitmapContextCompatibilityKey": true
]
var pixelBufferPointer = UnsafeMutablePointer<Unmanaged<CVPixelBuffer>?>.alloc(1)
let buffered:CVReturn = CVPixelBufferCreate(kCFAllocatorDefault, UInt(frameSize.width), UInt(frameSize.height), OSType(kCVPixelFormatType_32ARGB), options, pixelBufferPointer)
let lockBaseAddress = CVPixelBufferLockBaseAddress(pixelBufferPointer.memory?.takeUnretainedValue(), 0)
var pixelData:UnsafeMutablePointer<(Void)> = CVPixelBufferGetBaseAddress(pixelBufferPointer.memory?.takeUnretainedValue())
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.NoneSkipFirst.rawValue)
let space:CGColorSpace = CGColorSpaceCreateDeviceRGB()
var context:CGContextRef = CGBitmapContextCreate(pixelData, UInt(frameSize.width), UInt(frameSize.height), 8, CVPixelBufferGetBytesPerRow(pixelBufferPointer.memory?.takeUnretainedValue()), space, bitmapInfo)
CGContextDrawImage(context, CGRectMake(0, 0, frameSize.width, frameSize.height), img)
CVPixelBufferUnlockBaseAddress(pixelBufferPointer.memory?.takeUnretainedValue(), 0)
return pixelBufferPointer.memory!.takeUnretainedValue()
}
pixelBufferAppend returns false! Any help would be greatly appreciated. I have been scratching my head on this all night!
I was finally able to figure what was going on. The device could only support encoding frames up to 1920x1080px - reduced the image size and it worked. Change frameSize to be a variable and use the following code:
if frameSize.width > maxImageSize.width || frameSize.height > maxImageSize.height {
let scale = min((maxImageSize.width/frameSize.width), (maxImageSize.height/frameSize.height))
frameSize = CGSizeMake((scale * frameSize.width), (scale * frameSize.height))
}