How to create CIImage from AVCaptureStillImageOutput in swift? - ios

So I am using some code that does this in Objective C and I have been translating it over to swift and I am struggling to create a CIImage from AVCaptureStillImageOutput. So if some one could look at this code and tell me where I am going wrong that would be great.
This is the Objective C code
- (void)captureImageWithCompletionHander:(void(^)(NSString *fullPath))completionHandler
{
dispatch_suspend(_captureQueue);
AVCaptureConnection *videoConnection = nil;
for (AVCaptureConnection *connection in self.stillImageOutput.connections)
{
for (AVCaptureInputPort *port in connection.inputPorts)
{
if ([port.mediaType isEqual:AVMediaTypeVideo] )
{
videoConnection = connection;
break;
}
}
if (videoConnection) break;
}
__weak typeof(self) weakSelf = self;
[self.stillImageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler: ^(CMSampleBufferRef imageSampleBuffer, NSError *error)
{
if (error)
{
dispatch_resume(_captureQueue);
return;
}
__block NSArray *filePath = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES); //create an array and store result of our search for the documents directory in it
NSString *documentsDirectory = [filePath objectAtIndex:0]; //create NSString object, that holds our exact path to the documents directory
NSString *fullPath = [documentsDirectory stringByAppendingPathComponent:[NSString stringWithFormat:#"/iScan_img_%i.pdf",(int)[NSDate date].timeIntervalSince1970]];
#autoreleasepool
{
NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer];
CIImage *enhancedImage = [[CIImage alloc] initWithData:imageData options:#{kCIImageColorSpace:[NSNull null]}];
imageData = nil;
if (weakSelf.cameraViewType == DocScannerCameraViewTypeBlackAndWhite)
{
enhancedImage = [self filteredImageUsingEnhanceFilterOnImage:enhancedImage];
}
else
{
enhancedImage = [self filteredImageUsingContrastFilterOnImage:enhancedImage];
}
if (weakSelf.isBorderDetectionEnabled && rectangleDetectionConfidenceHighEnough(_imageDedectionConfidence))
{
CIRectangleFeature *rectangleFeature = [self biggestRectangleInRectangles:[[self highAccuracyRectangleDetector] featuresInImage:enhancedImage]];
if (rectangleFeature)
{
enhancedImage = [self correctPerspectiveForImage:enhancedImage withFeatures:rectangleFeature];
}
}
CIFilter *transform = [CIFilter filterWithName:#"CIAffineTransform"];
[transform setValue:enhancedImage forKey:kCIInputImageKey];
NSValue *rotation = [NSValue valueWithCGAffineTransform:CGAffineTransformMakeRotation(-90 * (M_PI/180))];
[transform setValue:rotation forKey:#"inputTransform"];
enhancedImage = transform.outputImage;
if (!enhancedImage || CGRectIsEmpty(enhancedImage.extent)) return;
static CIContext *ctx = nil;
if (!ctx)
{
ctx = [CIContext contextWithOptions:#{kCIContextWorkingColorSpace:[NSNull null]}];
}
CGSize bounds = enhancedImage.extent.size;
bounds = CGSizeMake(floorf(bounds.width / 4) * 4,floorf(bounds.height / 4) * 4);
CGRect extent = CGRectMake(enhancedImage.extent.origin.x, enhancedImage.extent.origin.y, bounds.width, bounds.height);
static int bytesPerPixel = 8;
uint rowBytes = bytesPerPixel * bounds.width;
uint totalBytes = rowBytes * bounds.height;
uint8_t *byteBuffer = malloc(totalBytes);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
[ctx render:enhancedImage toBitmap:byteBuffer rowBytes:rowBytes bounds:extent format:kCIFormatRGBA8 colorSpace:colorSpace];
CGContextRef bitmapContext = CGBitmapContextCreate(byteBuffer,bounds.width,bounds.height,bytesPerPixel,rowBytes,colorSpace,kCGImageAlphaNoneSkipLast);
CGImageRef imgRef = CGBitmapContextCreateImage(bitmapContext);
CGColorSpaceRelease(colorSpace);
CGContextRelease(bitmapContext);
free(byteBuffer);
if (imgRef == NULL)
{
CFRelease(imgRef);
return;
}
saveCGImageAsJPEGToFilePath(imgRef, fullPath);
CFRelease(imgRef);
dispatch_async(dispatch_get_main_queue(), ^
{
completionHandler(fullPath);
dispatch_resume(_captureQueue);
});
_imageDedectionConfidence = 0.0f;
}
}];
}
Now basically it captures the content and if some if statements are true then it captures the content within the displayed CIRectangleFeature and then converts the CIImage to a CGImage to be called in a save function.
I have gotten it translated to swift like this.
func captureImage(completionHandler: #escaping (_ imageFilePath: String) -> Void) {
self.captureQueue?.suspend()
var videoConnection: AVCaptureConnection!
for connection in self.stillImageOutput.connections{
for port in (connection as! AVCaptureConnection).inputPorts {
if (port as! AVCaptureInputPort).mediaType.isEqual(AVMediaTypeVideo) {
videoConnection = connection as! AVCaptureConnection
break
}
}
if videoConnection != nil {
break
}
}
weak var weakSelf = self
self.stillImageOutput.captureStillImageAsynchronously(from: videoConnection) { (sampleBuffer, error) -> Void in
if error != nil {
self.captureQueue?.resume()
return
}
let filePath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let documentsDirectory: String = filePath[0]
let fullPath: String = URL(fileURLWithPath: documentsDirectory).appendingPathComponent("iScan_img_\(Int(Date().timeIntervalSince1970)).pdf").absoluteString
autoreleasepool {
let imageData = Data(AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer))
var enhancedImage = CIImage(data: imageData, options: [kCIImageColorSpace: NSNull()])
if weakSelf?.cameraViewType == DocScannerCameraViewType.blackAndWhite {
enhancedImage = self.filteredImageUsingEnhanceFilter(on: enhancedImage!)
}
else {
enhancedImage = self.filteredImageUsingContrastFilter(on: enhancedImage!)
}
if (weakSelf?.isEnableBorderDetection == true) && self.rectangleDetectionConfidenceHighEnough(confidence: self.imageDedectionConfidence) {
let rectangleFeature: CIRectangleFeature? = self.biggestRectangles(rectangles: self.highAccuracyRectangleDetector().features(in: enhancedImage!))
if rectangleFeature != nil {
enhancedImage = self.correctPerspective(for: enhancedImage!, withFeatures: rectangleFeature!)
}
}
let transform = CIFilter(name: "CIAffineTransform")
let rotation = NSValue(cgAffineTransform: CGAffineTransform(rotationAngle: -90 * (.pi / 180)))
transform?.setValue(rotation, forKey: "inputTransform")
enhancedImage = transform?.outputImage
if (enhancedImage == nil) || (enhancedImage?.extent.isEmpty)! {
return
}
var ctx: CIContext?
if (ctx != nil) {
ctx = CIContext(options: [kCIContextWorkingColorSpace: NSNull()])
}
var bounds: CGSize = (enhancedImage?.extent.size)!
bounds = CGSize(width: CGFloat((floorf(Float(bounds.width)) / 4) * 4), height: CGFloat((floorf(Float(bounds.height)) / 4) * 4))
let extent = CGRect(x: CGFloat((enhancedImage?.extent.origin.x)!), y: CGFloat((enhancedImage?.extent.origin.y)!), width: CGFloat(bounds.width), height: CGFloat(bounds.height))
let bytesPerPixel: CGFloat = 8
let rowBytes = bytesPerPixel * bounds.width
let totalBytes = rowBytes * bounds.height
let byteBuffer = malloc(Int(totalBytes))
let colorSpace = CGColorSpaceCreateDeviceRGB()
ctx!.render(enhancedImage!, toBitmap: byteBuffer!, rowBytes: Int(rowBytes), bounds: extent, format: kCIFormatRGBA8, colorSpace: colorSpace)
let bitmapContext = CGContext(data: byteBuffer, width: Int(bounds.width), height: Int(bounds.height), bitsPerComponent: Int(bytesPerPixel), bytesPerRow: Int(rowBytes), space: colorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue)
let imgRef = bitmapContext?.makeImage()
free(byteBuffer)
self.saveCGImageAsJPEGToFilePath(imgRef: imgRef!, filePath: fullPath)
DispatchQueue.main.async(execute: {() -> Void in
completionHandler(fullPath)
self.captureQueue?.resume()
})
self.imageDedectionConfidence = 0.0
}
}
}
So it takes the AVCaptureStillImageOutput converts it to CIImage for all the needed uses then converts it to CGImage for saving. What exactly am I doing wrong in the translation? Or is there a better way to do this?
I really didn't want to ask about this but I can't seem to find any questions like this one, or at least any that refer to capturing as a CIImage from AVCaptureStillImageOutput.
Thanks for any help!

This is the correct translation in swift Thanks again to Prientus for helping me find my mistake
func captureImage(completionHandler: #escaping (_ imageFilePath: String) -> Void) {
self.captureQueue?.suspend()
var videoConnection: AVCaptureConnection!
for connection in self.stillImageOutput.connections{
for port in (connection as! AVCaptureConnection).inputPorts {
if (port as! AVCaptureInputPort).mediaType.isEqual(AVMediaTypeVideo) {
videoConnection = connection as! AVCaptureConnection
break
}
}
if videoConnection != nil {
break
}
}
weak var weakSelf = self
self.stillImageOutput.captureStillImageAsynchronously(from: videoConnection) { (sampleBuffer: CMSampleBuffer?, error) -> Void in
if error != nil {
self.captureQueue?.resume()
return
}
let filePath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let documentsDirectory: String = filePath[0]
let fullPath: String = documentsDirectory.appending("/iScan_img_\(Int(Date().timeIntervalSince1970)).pdf")
autoreleasepool {
let imageData = Data(AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer))
var enhancedImage = CIImage(data: imageData, options: [kCIImageColorSpace: NSNull()])
if weakSelf?.cameraViewType == DocScannerCameraViewType.blackAndWhite {
enhancedImage = self.filteredImageUsingEnhanceFilter(on: enhancedImage!)
}
else {
enhancedImage = self.filteredImageUsingContrastFilter(on: enhancedImage!)
}
if (weakSelf?.isEnableBorderDetection == true) && self.rectangleDetectionConfidenceHighEnough(confidence: self.imageDedectionConfidence) {
let rectangleFeature: CIRectangleFeature? = self.biggestRectangles(rectangles: self.highAccuracyRectangleDetector().features(in: enhancedImage!))
if rectangleFeature != nil {
enhancedImage = self.correctPerspective(for: enhancedImage!, withFeatures: rectangleFeature!)
}
}
let transform = CIFilter(name: "CIAffineTransform")
transform?.setValue(enhancedImage, forKey: kCIInputImageKey)
let rotation = NSValue(cgAffineTransform: CGAffineTransform(rotationAngle: -90 * (.pi / 180)))
transform?.setValue(rotation, forKey: "inputTransform")
enhancedImage = (transform?.outputImage)!
if (enhancedImage == nil) || (enhancedImage?.extent.isEmpty)! {
return
}
var ctx: CIContext?
if (ctx == nil) {
ctx = CIContext(options: [kCIContextWorkingColorSpace: NSNull()])
}
var bounds: CGSize = (enhancedImage!.extent.size)
bounds = CGSize(width: CGFloat((floorf(Float(bounds.width)) / 4) * 4), height: CGFloat((floorf(Float(bounds.height)) / 4) * 4))
let extent = CGRect(x: CGFloat((enhancedImage?.extent.origin.x)!), y: CGFloat((enhancedImage?.extent.origin.y)!), width: CGFloat(bounds.width), height: CGFloat(bounds.height))
let bytesPerPixel: CGFloat = 8
let rowBytes = bytesPerPixel * bounds.width
let totalBytes = rowBytes * bounds.height
let byteBuffer = malloc(Int(totalBytes))
let colorSpace = CGColorSpaceCreateDeviceRGB()
ctx!.render(enhancedImage!, toBitmap: byteBuffer!, rowBytes: Int(rowBytes), bounds: extent, format: kCIFormatRGBA8, colorSpace: colorSpace)
let bitmapContext = CGContext(data: byteBuffer, width: Int(bounds.width), height: Int(bounds.height), bitsPerComponent: Int(bytesPerPixel), bytesPerRow: Int(rowBytes), space: colorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue)
let imgRef = bitmapContext?.makeImage()
free(byteBuffer)
if imgRef == nil {
return
}
self.saveCGImageAsJPEGToFilePath(imgRef: imgRef!, filePath: fullPath)
DispatchQueue.main.async(execute: {() -> Void in
completionHandler(fullPath)
self.captureQueue?.resume()
})
self.imageDedectionConfidence = 0.0
}
}
}

Try replacing your CIImage creation with the following lines:
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer), let enhancedImage = CIImage(cvPixelBuffer: pixelBuffer) else {
return
}

Related

Scale Uimage to Large size App be Crashed in Swift

I use VImage to scale Uimage to large size (1920 * 1080), The memory grow up to high and crash app because of memory issue.
I need use VImage because of it keep some better resolution after scale
thanks!
here my code:
func resizeAllImages(){
for i in 0..<arrNumImages.count {
let image = arrNumImages[I]
let imageXib = image.resizeImageUsingVImage(size: CGSize(width: 1920, height: 1080)) ?? UIImage()
}
// Resize extension of Uiimage
func resizeImageUsingVImage(size:CGSize) -> UIImage? {
let cgImage = self.cgImage!
var format = vImage_CGImageFormat(bitsPerComponent: 8, bitsPerPixel: 32, colorSpace: nil, bitmapInfo: CGBitmapInfo(rawValue: CGImageAlphaInfo.first.rawValue), version: 0, decode: nil, renderingIntent: CGColorRenderingIntent.defaultIntent)
var sourceBuffer = vImage_Buffer()
defer {
free(sourceBuffer.data)
}
var error = vImageBuffer_InitWithCGImage(&sourceBuffer, &format, nil, cgImage, numericCast(kvImageNoFlags))
guard error == kvImageNoError else { return nil }
// create a destination buffer
let scale = self.scale
let destWidth = Int(size.width)
let destHeight = Int(size.height)
let bytesPerPixel = self.cgImage!.bitsPerPixel/8
let destBytesPerRow = destWidth * bytesPerPixel
let destData = UnsafeMutablePointer<UInt8>.allocate(capacity: destHeight * destBytesPerRow)
defer {
destData.deallocate()
}
var destBuffer = vImage_Buffer(data: destData, height: vImagePixelCount(destHeight), width: vImagePixelCount(destWidth), rowBytes: destBytesPerRow)
// scale the image
error = vImageScale_ARGB8888(&sourceBuffer, &destBuffer, nil, numericCast(kvImageHighQualityResampling))
guard error == kvImageNoError else { return nil }
// create a CGImage from vImage_Buffer
var destCGImage = vImageCreateCGImageFromBuffer(&destBuffer, &format, nil, nil, numericCast(kvImageNoFlags), &error)?.takeRetainedValue()
guard error == kvImageNoError else { return nil }
// create a UIImage
let resizedImage = destCGImage.flatMap { UIImage(cgImage: $0, scale: 0.0, orientation: self.imageOrientation) }
destCGImage = nil
return resizedImage
}

Converting PDF to Image - Swift iOS

I am trying to convert a PDF file and all its pages to png images.
I have put together the code below filling the example on this thread
How to convert PDF to PNG efficiently?
When I run the code, it crashes on the pdf file source (sourceURL) there is definitely a file there. and when I print sourceURl it prints the URL to the file.
The crash says it found nil - My understanding is that means it could not find the file? even though I can physically see and open the file and also print the URL to the file.
Can someone point out what I'm doing wrong?
Code:
func convertPDFtoPNG() {
let sourceURL = pptURL
print("pptURL:", pptURL!)
let destinationURL = pngURL
let urls = try? convertPDF(at: sourceURL!, to: destinationURL!, fileType: .png, dpi: 200)
}
func convertPDF(at sourceURL: URL, to destinationURL: URL, fileType: ImageFileType, dpi: CGFloat = 200) throws -> [URL] {
let pdfDocument: CGPDFDocument! = CGPDFDocument(sourceURL as CFURL)! //Thread 1: Fatal error: Unexpectedly found nil while unwrapping an Optional value
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGImageAlphaInfo.noneSkipLast.rawValue
var urls = [URL](repeating: URL(fileURLWithPath : "/"), count: pdfDocument.numberOfPages)
DispatchQueue.concurrentPerform(iterations: pdfDocument.numberOfPages) { i in
let pdfPage = pdfDocument.page(at: i + 1)!
let mediaBoxRect = pdfPage.getBoxRect(.mediaBox)
let scale = dpi / 72.0
let width = Int(mediaBoxRect.width * scale)
let height = Int(mediaBoxRect.height * scale)
let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace, bitmapInfo: bitmapInfo)!
context.interpolationQuality = .high
context.fill(CGRect(x: 0, y: 0, width: width, height: height))
context.scaleBy(x: scale, y: scale)
context.drawPDFPage(pdfPage)
let image = context.makeImage()!
let imageName = sourceURL.deletingPathExtension().lastPathComponent
let imageURL = destinationURL.appendingPathComponent("\(imageName)-Page\(i+1).\(fileType.fileExtention)")
let imageDestination = CGImageDestinationCreateWithURL(imageURL as CFURL, fileType.uti, 1, nil)!
CGImageDestinationAddImage(imageDestination, image, nil)
CGImageDestinationFinalize(imageDestination)
urls[i] = imageURL
}
return urls
}
import Foundation
import Photos
// 1: 目前主要用来操作pdf转为图片
// 2: 图片保存到自定义相册中
struct HBPhotosAlbumHelperUtil {
static let shared = HBPhotosAlbumHelperUtil()
// url链接的pdf转为image
// pageNumber :表示pdf的对应的页面,默认为第一页
func drawToImagePDFFromURL(pdfurl url: String?, pageNumber index: Int = 1, scaleX scalex: CGFloat = 1.0, scaleY scaley: CGFloat = -1.0) -> UIImage? {
guard let pdfUrl = url, pdfUrl.count > 0, let formatterUrl = pdfUrl.urlValue else {
return nil
}
guard let document = CGPDFDocument(formatterUrl as CFURL) else {
return nil
}
guard let page = document.page(at: index) else {
return nil
}
let pageRect = page.getBoxRect(.mediaBox)
if #available(iOS 10.0, *) {
let renderGraph = UIGraphicsImageRenderer(size: pageRect.size)
let drawImage = renderGraph.image { context in
UIColor.white.set()
context.fill(pageRect)
context.cgContext.translateBy(x: 0.0, y: pageRect.size.height)
context.cgContext.scaleBy(x: scalex, y: scaley)
context.cgContext.drawPDFPage(page)
}
return drawImage
} else {
UIGraphicsBeginImageContextWithOptions(pageRect.size, false, 1.0)
let context = UIGraphicsGetCurrentContext()
context?.setFillColor(UIColor.white.cgColor)
context?.fill(pageRect)
context?.translateBy(x: 0.0, y: pageRect.size.height)
context?.scaleBy(x: scalex, y: scaley)
context?.drawPDFPage(page)
let pdfImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return pdfImage
}
}
}
// 用来表示保存图片到自定义相册或者系统相册的操作结果
enum HBPhotosAlbumUtilResult {
case success, error, denied
}
extension HBPhotosAlbumHelperUtil {
// 请求获取操作系统相册权限
// 返回true说明已经得到授权
static var photoAlbumAuthorized: Bool {
return PHPhotoLibrary.authorizationStatus() == .authorized || PHPhotoLibrary.authorizationStatus() == .notDetermined
}
// 保存图片到自定义相册中
func saveImageToCustomAlbum(saveImage markImage: UIImage, customAlbumName albumName: String = "丰巢管家电子发票", completion: ((_ result: HBPhotosAlbumUtilResult) -> Void)?) {
guard HBPhotosAlbumHelperUtil.photoAlbumAuthorized else {
completion?(.denied)
return
}
var assetAlbum: PHAssetCollection?
// 如果相册名称为空,则图片默认保存到系统相册里面
if albumName.isEmpty {
let assetCollection = PHAssetCollection.fetchAssetCollections(with: .smartAlbum, subtype: .smartAlbumUserLibrary,
options: nil)
assetAlbum = assetCollection.firstObject
} else {
// 获取指定的相册是否存在
let assetList = PHAssetCollection.fetchAssetCollections(with: .album, subtype: .any, options: nil)
assetList.enumerateObjects { albumOption, _, stop in
let assetCollection = albumOption
if albumName == assetCollection.localizedTitle {
assetAlbum = assetCollection
stop.initialize(to: true)
}
}
// 自定义相册不存在就创建
if assetAlbum == nil {
PHPhotoLibrary.shared().performChanges({
PHAssetCollectionChangeRequest.creationRequestForAssetCollection(withTitle: albumName)
}) { _, _ in
self.saveImageToCustomAlbum(saveImage: markImage, customAlbumName: albumName, completion: completion)
}
}
}
// 保存图片
PHPhotoLibrary.shared().performChanges({
let result = PHAssetChangeRequest.creationRequestForAsset(from: markImage)
if !albumName.isEmpty {
if let assetPlaceHolder = result.placeholderForCreatedAsset,
let lastAssetAlbum = assetAlbum,
let albumChangeRequset = PHAssetCollectionChangeRequest(for:
lastAssetAlbum) {
albumChangeRequset.addAssets([assetPlaceHolder] as NSArray)
}
}
}) { isSuccess, _ in
guard isSuccess else {
completion?(.error)
return
}
completion?(.success)
}
}
}
extension String {
/// URL legalization
public var urlValue: URL? {
if let url = URL(string: self) {
return url
}
var set = CharacterSet()
set.formUnion(.urlHostAllowed)
set.formUnion(.urlPathAllowed)
set.formUnion(.urlQueryAllowed)
set.formUnion(.urlFragmentAllowed)
return self.addingPercentEncoding(withAllowedCharacters: set).flatMap { URL(string: $0) }
}
}
You can use the api like this:
// Use this way to achieve pdf to image
HBPhotosAlbumHelperUtil.shared.drawToImagePDFFromURL(pdfurl: "link to pdf file")
// In this way, you can save pictures to the system custom album.
HBPhotosAlbumHelperUtil.shared.saveImageToCustomAlbum(saveImage: UIImage()) { (result) in
}
Make sure that your pptURL is file url.
URL(string: "path/to/pdf") and URL(fileURLWithPath: "path/to/pdf") are different things and you must use the last one while initiating your url.
The output should start with "file:///" prefix, f.e.
file:///Users/dev/Library/Developer/CoreSimulator/Devices/4FF18699-D82F-4308-88D6-44E3C11C955A/data/Containers/Bundle/Application/8F230041-AC15-45D9-863F-5778B565B12F/myApp.app/example.pdf

Swift how to display textStorage on watch

I have a textView with images, different text styles etc. I wanna display it on watch. Is there any way to do it?
It is not a code for displaying NSAttributedString on apple watch, but I use it for sending NSAttributedString with images in the form which will be easier to display.
Code may not be a high-quality(I am almost sure it isn't, especially scaling images).
func sendText(stext: NSMutableAttributedString) {
dispatch_async(dispatch_get_main_queue()) {
let range:NSRange = NSMakeRange(0, stext.length)
stext.enumerateAttributesInRange(range, options: NSAttributedStringEnumerationOptions.init(rawValue: 0), usingBlock: { (dic: [String:AnyObject]?, rang:NSRange!, stop: UnsafeMutablePointer<ObjCBool>) -> Void in
self.attachement = dic as NSDictionary!
self.attachement.enumerateKeysAndObjectsUsingBlock({ (key:AnyObject?, obj:AnyObject?, stop:UnsafeMutablePointer<ObjCBool>) -> Void in
let stri = key as! NSString!
if stri == "NSAttachment"{
let att:NSTextAttachment = obj as! NSTextAttachment
if att.image == nil{
print("no image")
}else{
var im:UIImage = att.image!
print("image exists")
if im.size.height > 340 && im.size.width > 272 {
let heightScale: CGFloat = self.textView.frame.size.height / im.size.height
let widthScale: CGFloat = self.textView.frame.size.width / im.size.width
if heightScale < widthScale {
let height = im.size.height * heightScale * 0.7
let width = im.size.width * heightScale * 0.7
UIGraphicsBeginImageContext(CGSizeMake(width, height))
im.drawInRect(CGRectMake(0, 0, width, height))
im = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
} else {
let height = im.size.height * widthScale * 0.7
let width = im.size.width * widthScale * 0.7
UIGraphicsBeginImageContext(CGSizeMake(width, height))
im.drawInRect(CGRectMake(0, 0, width, height))
im = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
}
}
self.lastRange = rang.location
self.finalForWatch.append(NSAttributedString(attributedString: stext.attributedSubstringFromRange(NSMakeRange(0, rang.location))))
self.finalForWatch.append(im)
}
}
if self.lastRange == 0 {
self.finalForWatch.append(stext)
} else {
self.finalForWatch.append(NSAttributedString(attributedString: stext.attributedSubstringFromRange(NSMakeRange(1, stext.length-1))))
}
})
})
let data: NSData = NSKeyedArchiver.archivedDataWithRootObject(self.finalForWatch)
let applicationDict = ["done" : data]
let documentDirectoryURL = try! NSFileManager.defaultManager().URLForDirectory(.DocumentDirectory, inDomain: .UserDomainMask, appropriateForURL: nil, create: true)
let fileDestinationUrl = documentDirectoryURL.URLByAppendingPathComponent("file.txt")
do{
try data.writeToURL(fileDestinationUrl, atomically: true)
} catch let error as NSError {
print("error writing to url \(fileDestinationUrl)")
print(error.localizedDescription)
}
self.session?.transferFile(fileDestinationUrl, metadata: nil)
}
}
I save everything into a file and use a sendFile method because appllicationContext, userInfo and messages always return "too big payloads".

I want to release the CVPixelBufferRef in swift

I want to create a video from image.
So, I was the source of the link to the reference.
Link:CVPixelBufferPool Error ( kCVReturnInvalidArgument/-6661)
func writeAnimationToMovie(path: String, size: CGSize, animation: Animation) -> Bool {
var error: NSError?
let writer = AVAssetWriter(URL: NSURL(fileURLWithPath: path), fileType: AVFileTypeQuickTimeMovie, error: &error)
let videoSettings = [AVVideoCodecKey: AVVideoCodecH264, AVVideoWidthKey: size.width, AVVideoHeightKey: size.height]
let input = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: videoSettings)
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: input, sourcePixelBufferAttributes: nil)
input.expectsMediaDataInRealTime = true
writer.addInput(input)
writer.startWriting()
writer.startSessionAtSourceTime(kCMTimeZero)
var buffer: CVPixelBufferRef
var frameCount = 0
for frame in animation.frames {
let rect = CGRectMake(0, 0, size.width, size.height)
let rectPtr = UnsafeMutablePointer<CGRect>.alloc(1)
rectPtr.memory = rect
buffer = pixelBufferFromCGImage(frame.image.CGImageForProposedRect(rectPtr, context: nil, hints: nil).takeUnretainedValue(), size)
var appendOk = false
var j = 0
while (!appendOk && j < 30) {
if pixelBufferAdaptor.assetWriterInput.readyForMoreMediaData {
let frameTime = CMTimeMake(Int64(frameCount), 10)
appendOk = pixelBufferAdaptor.appendPixelBuffer(buffer, withPresentationTime: frameTime)
// appendOk will always be false
NSThread.sleepForTimeInterval(0.05)
} else {
NSThread.sleepForTimeInterval(0.1)
}
j++
}
if (!appendOk) {
println("Doh, frame \(frame) at offset \(frameCount) failed to append")
}
}
input.markAsFinished()
writer.finishWritingWithCompletionHandler({
if writer.status == AVAssetWriterStatus.Failed {
println("oh noes, an error: \(writer.error.description)")
} else {
println("hrmmm, there should be a movie?")
}
})
return true;
}
func pixelBufferFromCGImage(image: CGImageRef, size: CGSize) -> CVPixelBufferRef {
let options = [
kCVPixelBufferCGImageCompatibilityKey: true,
kCVPixelBufferCGBitmapContextCompatibilityKey: true]
var pixBufferPointer = UnsafeMutablePointer<Unmanaged<CVPixelBuffer>?>.alloc(1)
let status = CVPixelBufferCreate(
nil,
UInt(size.width), UInt(size.height),
OSType(kCVPixelFormatType_32ARGB),
options,
pixBufferPointer)
CVPixelBufferLockBaseAddress(pixBufferPointer.memory?.takeUnretainedValue(), 0)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapinfo = CGBitmapInfo.fromRaw(CGImageAlphaInfo.NoneSkipFirst.toRaw())
var pixBufferData:UnsafeMutablePointer<(Void)> = CVPixelBufferGetBaseAddress(pixBufferPointer.memory?.takeUnretainedValue())
let context = CGBitmapContextCreate(
pixBufferData,
UInt(size.width), UInt(size.height),
8, UInt(4 * size.width),
rgbColorSpace, bitmapinfo!)
CGContextConcatCTM(context, CGAffineTransformMakeRotation(0))
CGContextDrawImage(
context,
CGRectMake(0, 0, CGFloat(CGImageGetWidth(image)), CGFloat(CGImageGetHeight(image))),
image)
CVPixelBufferUnlockBaseAddress(pixBufferPointer.memory?.takeUnretainedValue(), 0)
return pixBufferPointer.memory!.takeUnretainedValue()
}
Something even after the Movies can be as under the image remains in memory.
I believe or not than is left PixcelBuffer.
I had a method CVPixelBufferRelease(buffer) to release the PixcelBuffer when the Objective-c, I'm no longer can use this in Swift. How do I release the PixcelBuffer doing?
If anyone could help, I'd really appreciate it.
1
2
When using CVPixelBufferCreate the UnsafeMutablePointer has to be destroyed after retrieving the memory of it.
When I create a CVPixelBuffer, I do it like this:
func allocPixelBuffer() -> CVPixelBuffer {
let pixelBufferAttributes : CFDictionary = [...]
let pixelBufferOut = UnsafeMutablePointer<CVPixelBuffer?>.alloc(1)
_ = CVPixelBufferCreate(kCFAllocatorDefault,
Int(Width),
Int(Height),
OSType(kCVPixelFormatType_32ARGB),
pixelBufferAttributes,
pixelBufferOut)
let pixelBuffer = pixelBufferOut.memory!
pixelBufferOut.destroy()
return pixelBuffer
}
I had same problem, but I have solved.
Use this: autoreleasepool
var boolWhile = true
while (boolWhile) {
autoreleasepool({() -> () in
if(input.readyForMoreMediaData) {
presentTime = CMTimeMake(Int64(ii), fps)
if(ii >= arrayImages.count){
...
Try changing
return pixBufferPointer.memory!.takeUnretainedValue()
to
return pixBufferPointer.memory!.takeRetainedValue()
to avoid leaking CVPixelBuffers

Pixel Buffer will not append to AVAssetWriterInput on device

I'm trying to take an image and convert it into a video file in Swift. I was able to get the code to work in the iOS simulator, but it won't work on an actual device. It errors out when I try to append the pixel buffer to the AVAssetWriterInput. I was wondering if anyone here has run into the same issue previously. Here's the code:
func createVideoFromImage(img: CGImageRef) -> Void {
var error: NSError?
let frameSize = CGSizeMake(CGFloat(CGImageGetWidth(img)), CGFloat(CGImageGetHeight(img)))
let fileName = "\(uniqueString()).m4v"
let assetWriter = AVAssetWriter(URL: fileUrl(fileName), fileType: AVFileTypeAppleM4V, error: &error)
let outputSettings = [
AVVideoCodecKey: AVVideoCodecH264,
AVVideoWidthKey: frameSize.width,
AVVideoHeightKey: frameSize.height
]
let assetWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterInput, sourcePixelBufferAttributes: nil)
if assetWriter.canAddInput(assetWriterInput) {
assetWriter.addInput(assetWriterInput)
}
assetWriter.startWriting()
assetWriter.startSessionAtSourceTime(kCMTimeZero)
let buffer:CVPixelBufferRef = pixelBufferFromCGImage(img, frameSize: frameSize)
if pixelBufferAdaptor.assetWriterInput.readyForMoreMediaData {
let frameTime = CMTimeMakeWithSeconds(0, 30)
let pixelBufferAppend = pixelBufferAdaptor.appendPixelBuffer(buffer, withPresentationTime: frameTime)
if pixelBufferAppend {
assetWriterInput.markAsFinished()
assetWriter.finishWritingWithCompletionHandler { () -> Void in
switch assetWriter.status {
case AVAssetWriterStatus.Failed:
println("Error: \(assetWriter.error)")
default:
let path = self.fileUrl(fileName).path!
let content = NSFileManager.defaultManager().contentsAtPath(path)
println("Video: \(path) \(content?.length)")
}
}
} else {
println("failed to append pixel buffer")
}
}
}
func pixelBufferFromCGImage (img: CGImageRef, frameSize: CGSize) -> CVPixelBufferRef {
let options = [
"kCVPixelBufferCGImageCompatibilityKey": true,
"kCVPixelBufferCGBitmapContextCompatibilityKey": true
]
var pixelBufferPointer = UnsafeMutablePointer<Unmanaged<CVPixelBuffer>?>.alloc(1)
let buffered:CVReturn = CVPixelBufferCreate(kCFAllocatorDefault, UInt(frameSize.width), UInt(frameSize.height), OSType(kCVPixelFormatType_32ARGB), options, pixelBufferPointer)
let lockBaseAddress = CVPixelBufferLockBaseAddress(pixelBufferPointer.memory?.takeUnretainedValue(), 0)
var pixelData:UnsafeMutablePointer<(Void)> = CVPixelBufferGetBaseAddress(pixelBufferPointer.memory?.takeUnretainedValue())
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.NoneSkipFirst.rawValue)
let space:CGColorSpace = CGColorSpaceCreateDeviceRGB()
var context:CGContextRef = CGBitmapContextCreate(pixelData, UInt(frameSize.width), UInt(frameSize.height), 8, CVPixelBufferGetBytesPerRow(pixelBufferPointer.memory?.takeUnretainedValue()), space, bitmapInfo)
CGContextDrawImage(context, CGRectMake(0, 0, frameSize.width, frameSize.height), img)
CVPixelBufferUnlockBaseAddress(pixelBufferPointer.memory?.takeUnretainedValue(), 0)
return pixelBufferPointer.memory!.takeUnretainedValue()
}
pixelBufferAppend returns false! Any help would be greatly appreciated. I have been scratching my head on this all night!
I was finally able to figure what was going on. The device could only support encoding frames up to 1920x1080px - reduced the image size and it worked. Change frameSize to be a variable and use the following code:
if frameSize.width > maxImageSize.width || frameSize.height > maxImageSize.height {
let scale = min((maxImageSize.width/frameSize.width), (maxImageSize.height/frameSize.height))
frameSize = CGSizeMake((scale * frameSize.width), (scale * frameSize.height))
}

Resources