I have temporary variable tmpPixelBuffer with pixel buffer data, which is not nil, and when metadata objects are detected I want to create image from that buffer, so I could crop metadata images from that image.
Image is always nil, what do I do wrong?
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
tmpPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
let image = CIImage(CVPixelBuffer: tmpPixelBuffer)
let context = CIContext()
let cgiImage = context.createCGImage(image, fromRect: image.extent())
let capturedImage = UIImage(CGImage: cgiImage)
...
}
I also tried to do it like that:
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
let image = CIImage(CVPixelBuffer: tmpPixelBuffer)
let context = CIContext(options: nil)
let cgiImage = context.createCGImage(image, fromRect: CGRect(x: 0, y: 0, width: Int(CVPixelBufferGetWidth(tmpPixelBuffer)), height: Int(CVPixelBufferGetHeight(tmpPixelBuffer))))
...
}
But in this case UIImage is not readable.
I converted Andrea's answer into Swift 3.1:
static func DegreesToRadians(_ degrees: CGFloat) -> CGFloat { return CGFloat( (degrees * .pi) / 180 ) }
static func CreateCGImageFromCVPixelBuffer(pixelBuffer: CVPixelBuffer) -> CGImage? {
let bitmapInfo: CGBitmapInfo
let sourcePixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer)
if kCVPixelFormatType_32ARGB == sourcePixelFormat {
bitmapInfo = [.byteOrder32Big, CGBitmapInfo(rawValue: CGImageAlphaInfo.noneSkipFirst.rawValue)]
} else
if kCVPixelFormatType_32BGRA == sourcePixelFormat {
bitmapInfo = [.byteOrder32Little, CGBitmapInfo(rawValue: CGImageAlphaInfo.noneSkipFirst.rawValue)]
} else {
return nil
}
// only uncompressed pixel formats
let sourceRowBytes = CVPixelBufferGetBytesPerRow(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
print("Buffer image size \(width) height \(height)")
let val: CVReturn = CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
if val == kCVReturnSuccess,
let sourceBaseAddr = CVPixelBufferGetBaseAddress(pixelBuffer),
let provider = CGDataProvider(dataInfo: nil, data: sourceBaseAddr, size: sourceRowBytes * height, releaseData: {_,_,_ in })
{
let colorspace = CGColorSpaceCreateDeviceRGB()
let image = CGImage(width: width, height: height, bitsPerComponent: 8, bitsPerPixel: 32, bytesPerRow: sourceRowBytes,
space: colorspace, bitmapInfo: bitmapInfo, provider: provider, decode: nil,
shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)
CVPixelBufferUnlockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
return image
} else {
return nil
}
}
// utility used by newSquareOverlayedImageForFeatures for
static func CreateCGBitmapContextForSize(_ size: CGSize) -> CGContext? {
let bitmapBytesPerRow = Int(size.width * 4)
let colorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: nil, width: Int(size.width), height: Int(size.height), bitsPerComponent: 8,
bytesPerRow: bitmapBytesPerRow, space: colorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
else { return nil }
context.setAllowsAntialiasing(false)
return context
}
I don't know in SWIFT, but I think that you can easily convert, this C function that was taken from Apple and works perfectly. The problem using CIImage is that create a context is quite an expensive task, so if you want to go that way is better to build the context before everything and keep a strong reference to it.
Furthermore I dont' remeber if the default context is build for GPU or CPU, there are other subtile differences between the 2. For instance if you want to make the image creation on a background thread on GPU it won't work.
static CGFloat DegreesToRadians(CGFloat degrees) {return degrees * M_PI / 180;};
static void ReleaseCVPixelBuffer(void *pixel, const void *data, size_t size)
{
CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)pixel;
CVPixelBufferUnlockBaseAddress( pixelBuffer, 0 );
CVPixelBufferRelease( pixelBuffer );
}
// create a CGImage with provided pixel buffer, pixel buffer must be uncompressed kCVPixelFormatType_32ARGB or kCVPixelFormatType_32BGRA
static OSStatus CreateCGImageFromCVPixelBuffer(CVPixelBufferRef pixelBuffer, CGImageRef *imageOut)
{
OSStatus err = noErr;
OSType sourcePixelFormat;
size_t width, height, sourceRowBytes;
void *sourceBaseAddr = NULL;
CGBitmapInfo bitmapInfo;
CGColorSpaceRef colorspace = NULL;
CGDataProviderRef provider = NULL;
CGImageRef image = NULL;
sourcePixelFormat = CVPixelBufferGetPixelFormatType( pixelBuffer );
if ( kCVPixelFormatType_32ARGB == sourcePixelFormat )
bitmapInfo = kCGBitmapByteOrder32Big | kCGImageAlphaNoneSkipFirst;
else if ( kCVPixelFormatType_32BGRA == sourcePixelFormat )
bitmapInfo = kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst;
else
return -95014; // only uncompressed pixel formats
sourceRowBytes = CVPixelBufferGetBytesPerRow( pixelBuffer );
width = CVPixelBufferGetWidth( pixelBuffer );
height = CVPixelBufferGetHeight( pixelBuffer );
DLog(#"Buffer image size %zu e %zu",width,height );
CVReturn val = CVPixelBufferLockBaseAddress( pixelBuffer, 0 );
if (val == kCVReturnSuccess) {
sourceBaseAddr = CVPixelBufferGetBaseAddress( pixelBuffer );
colorspace = CGColorSpaceCreateDeviceRGB();
CVPixelBufferRetain( pixelBuffer );
provider = CGDataProviderCreateWithData( (void *)pixelBuffer, sourceBaseAddr, sourceRowBytes * height, ReleaseCVPixelBuffer);
image = CGImageCreate(width, height, 8, 32, sourceRowBytes, colorspace, bitmapInfo, provider, NULL, true, kCGRenderingIntentDefault);
}
bail:
if ( err && image ) {
CGImageRelease( image );
image = NULL;
}
if ( provider ) CGDataProviderRelease( provider );
if ( colorspace ) CGColorSpaceRelease( colorspace );
*imageOut = image;
return err;
}
// utility used by newSquareOverlayedImageForFeatures for
static CGContextRef CreateCGBitmapContextForSize(CGSize size)
{
CGContextRef context = NULL;
CGColorSpaceRef colorSpace;
int bitmapBytesPerRow;
bitmapBytesPerRow = (size.width * 4);
colorSpace = CGColorSpaceCreateDeviceRGB();
context = CGBitmapContextCreate (NULL,
size.width,
size.height,
8, // bits per component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedLast);
CGContextSetAllowsAntialiasing(context, NO);
CGColorSpaceRelease( colorSpace );
return context;
}
let cvImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
guard cvImageBuffer != nil else { return }
let attachments = CMCopyDictionaryOfAttachments(kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate)
let ciImage = CIImage(cvImageBuffer: cvImageBuffer!, options: attachments as! [String : Any]?)
let image = UIImage(ciImage: ciImage)
Related
I'm handling ReplayKit2 in iOS, for some reasons I need to rotate CMSampleBuffer from portrait to landscape, I found the result is not correct.
What I miss ?
this is original sample buffer
this is actual output buffer
width & height are dimensions of sampleBuffer
func rotation(sampleBuffer: CMSampleBuffer, width: Int, height: Int) -> CMSampleBuffer {
//create pixelbuffer from the delegate method samplebuffer
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags(rawValue: 0))
//create CI image from the buffer
let image = CIImage(cvImageBuffer: pixelBuffer)
let extent = CGRect(x: 0, y: 0, width: width, height: height)
var tx = CGAffineTransform(translationX: extent.midX, y: extent.midY)
tx = tx.rotated(by: CGFloat(Double.pi / 2))
tx = tx.translatedBy(x: -extent.midX, y: -extent.midY)
var transformImage = CIFilter(
name: "CIAffineTransform",
withInputParameters: [
kCIInputImageKey: image,
kCIInputTransformKey: NSValue.init(cgAffineTransform: tx)])!.outputImage!
//create empty pixelbuffer
var newPixelBuffer : CVPixelBuffer? = nil
CVPixelBufferCreate(kCFAllocatorDefault,
width,
height,
kCVPixelFormatType_32BGRA,
nil,
&newPixelBuffer)
//render the context to the new pixelbuffer, context is a global
//CIContext variable. creating a new one each frame is too CPU intensive
self.ciContext.render(transformImage, to: newPixelBuffer!)
//finally, write this to the pixelbufferadaptor
CVPixelBufferUnlockBaseAddress(pixelBuffer,CVPixelBufferLockFlags(rawValue: 0))
var videoInfo: CMVideoFormatDescription?
CMVideoFormatDescriptionCreateForImageBuffer(kCFAllocatorDefault, newPixelBuffer!, &videoInfo)
var sampleTimingInfo = CMSampleTimingInfo(duration: CMSampleBufferGetDuration(sampleBuffer), presentationTimeStamp: CMSampleBufferGetPresentationTimeStamp(sampleBuffer), decodeTimeStamp: CMSampleBufferGetDecodeTimeStamp(sampleBuffer))
var newSampleBuffer: CMSampleBuffer?
CMSampleBufferCreateForImageBuffer(kCFAllocatorDefault, newPixelBuffer!, true, nil, nil, videoInfo!, &sampleTimingInfo, &newSampleBuffer)
return newSampleBuffer!
}
just found a very sweet method in iOS 11!
/* Returns a new image representing the original image transformeded for the given CGImagePropertyOrientation */
#available(iOS 11.0, *)
open func oriented(_ orientation: CGImagePropertyOrientation) -> CIImage
May be it will be useful
func rotate(_ sampleBuffer: CMSampleBuffer) -> CVPixelBuffer? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
var newPixelBuffer: CVPixelBuffer?
let error = CVPixelBufferCreate(kCFAllocatorDefault,
CVPixelBufferGetHeight(pixelBuffer),
CVPixelBufferGetWidth(pixelBuffer),
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange,
nil,
&newPixelBuffer)
guard error == kCVReturnSuccess else {
return nil
}
let ciImage = CIImage(cvPixelBuffer: pixelBuffer).oriented(.right)
let context = CIContext(options: nil)
context.render(ciImage, to: newPixelBuffer!)
return newPixelBuffer
}
I'm trying to convert a CMSampleBuffer to an UIImage with Swift 3.0. A popular solution is to write an extension for the CMSampleBuffer class and add a getter to convert the buffer to an image. This is what it looks like:
import Foundation
import AVFoundation
extension CMSampleBuffer {
#available(iOS 9.0, *)
var uiImage: UIImage? {
guard let imageBuffer = CMSampleBufferGetImageBuffer(self) else { return nil }
let ciimage: CIImage = CIImage(cvImageBuffer: imageBuffer)
let image:UIImage = UIImage(ciImage: ciimage)
return image
}
}
It works fine but it's taking up a lot of memory, 40% of the total app memory. Is there a more memory efficient solution?
EDIT:
I have changed my code and it looks like this:
var uiImage: UIImage? {
guard let imageBuffer = CMSampleBufferGetImageBuffer(self) else { return nil }
CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))
let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
let width = CVPixelBufferGetWidth(imageBuffer)
let height = CVPixelBufferGetHeight(imageBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.noneSkipFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
var image: UIImage?
autoreleasepool(invoking: {() -> () in
guard let context = CGContext(data: baseAddress,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: bitmapInfo.rawValue) else { return }
guard let cgImage = context.makeImage() else { return }
image = UIImage(cgImage: cgImage)
})
CVPixelBufferUnlockBaseAddress(imageBuffer,CVPixelBufferLockFlags(rawValue: 0));
return image
}
The memory leak has something to do with the CGContext. Is there any other way I can free/release/deallocate it besides using an autoreleasepool?
I created two applications: one for mac and one for iPhone. iPhone sends the video frames it captured to mac using MultipeerConnectivity framework. I have managed to find code for converting an UIimage to grayscale using this code:
func convertToGrayScale(image: UIImage) -> UIImage {
let imageRect:CGRect = CGRectMake(0, 0, image.size.width, image.size.height)
let colorSpace = CGColorSpaceCreateDeviceGray()
let width = image.size.width
let height = image.size.height
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.None.rawValue)
let context = CGBitmapContextCreate(nil, Int(width), Int(height), 8, 0, colorSpace, bitmapInfo.rawValue)
CGContextDrawImage(context, imageRect, image.CGImage)
let imageRef = CGBitmapContextCreateImage(context)
let newImage = UIImage(CGImage: imageRef!)
return newImage
}
In the code below, it sends the video frame to Mac:
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
CVPixelBufferLockBaseAddress(imageBuffer!, kCVPixelBufferLock_ReadOnly)
let baseAddress = CVPixelBufferGetBaseAddress(imageBuffer!)
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer!)
let width = CVPixelBufferGetWidth(imageBuffer!)
let height = CVPixelBufferGetHeight(imageBuffer!)
CVPixelBufferUnlockBaseAddress(imageBuffer!, 0)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedLast.rawValue)
let context = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo.rawValue)
let quarzImage = CGBitmapContextCreateImage(context)
let image = UIImage(CGImage: quarzImage!)
let grayImage = convertToGrayScale(image)
let data: NSData = UIImagePNGRepresentation(grayImage)!
delegate?.recievedOutput(data)
}
The delegate method is just sending the data using session.sendData()
So, here comes to the Mac side. When mac received NSData, I created an NSImage from the data and created a .png image file using this code:
func session(session: MCSession, didReceiveData data: NSData, fromPeer peerID: MCPeerID) {
let image: NSImage = NSImage(data: data)!.imageRotatedByDegreess(270)
let cgRef = image.CGImageForProposedRect(nil, context: nil, hints: nil)
let representation = NSBitmapImageRep(CGImage: cgRef!)
let pngData = representation.representationUsingType(NSBitmapImageFileType.NSPNGFileType, properties: [NSImageCompressionFactor: 1.0])
pngData?.writeToFile("/Users/JunhongXu/Desktop/image/\(result.description).png", atomically: true)
result[4]++
self.delegate?.presentRecievedImage(image)
}
Although the image is like the picture below, when I checked my image file property, it is in RGB format. How can I change the ColorSpace of my NSImage to grayscale instead of RGB?
enter image description here
I have found a simple solution to my problem. Since it is already in grayscale when it transimitted to my Mac, I am able to use the code below to convert the image representation's ColorSpace to grayscale and save it as a .png file:
let newRep = representation.bitmapImageRepByConvertingToColorSpace(NSColorSpace.genericGrayColorSpace(), renderingIntent: NSColorRenderingIntent.Default)
let pngData = newRep!.representationUsingType(NSBitmapImageFileType.NSPNGFileType, properties: [NSImageCompressionFactor: 1.0])
pngData?.writeToFile("/Users/JunhongXu/Desktop/image/\(result.description).png", atomically: true)
I am displaying a video feed of CMSampleBuffers converted to UIImages inside a UIImageView. In the photo below, the background layer is an AVCapturePreviewLayer and the center is the buffer feed. My goal is to remove the blue tint.
Here is the CMSampleBuffer to UIImage code
extension CMSampleBuffer {
func imageRepresentation() -> UIImage? {
let imageBuffer: CVImageBufferRef = CMSampleBufferGetImageBuffer(self)!
CVPixelBufferLockBaseAddress(imageBuffer, 0)
let address = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0)
let bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer)
let width = CVPixelBufferGetWidth(imageBuffer)
let height = CVPixelBufferGetHeight(imageBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(address, width, height, 8, bytesPerRow, colorSpace, CGImageAlphaInfo.NoneSkipFirst.rawValue)
let imageRef = CGBitmapContextCreateImage(context)
CVPixelBufferUnlockBaseAddress(imageBuffer, 0)
let resultImage: UIImage = UIImage(CGImage: imageRef!)
return resultImage
}
}
AVCaptureVideoDataOutput setup:
class MovieRecorder: NSObject {
// vars
private let captureVideoDataOutput = AVCaptureVideoDataOutput()
// capture session boilerplate setup...
captureVideoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey: Int(kCVPixelFormatType_32BGRA)]
captureVideoDataOutput.alwaysDiscardsLateVideoFrames = true
captureVideoDataOutput.setSampleBufferDelegate(self, queue: captureDataOutputQueue)
}
The problem was with the bitmapInfo. This bitmap info fixed it.
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.NoneSkipFirst.rawValue | CGBitmapInfo.ByteOrder32Little.rawValue)
let context = CGBitmapContextCreate(address, width, height, 8, bytesPerRow, colorSpace, bitmapInfo.rawValue)
I'm trying to implement uncompressed photos in a little practice app of mine, but I can't get it to work for the life of me.
This is how I capture a photo:
print("Shutter!")
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) {
(imageDataSampleBuffer, error) -> Void in
if let cameraFrame = CMSampleBufferGetImageBuffer(imageDataSampleBuffer) {
CVPixelBufferLockBaseAddress(cameraFrame, 0)
let rawImageBytes = CVPixelBufferGetBaseAddress(cameraFrame)
let bytesPerRow = CVPixelBufferGetBytesPerRow(cameraFrame)
let width = CVPixelBufferGetWidth(cameraFrame)
let height = CVPixelBufferGetHeight(cameraFrame)
CVPixelBufferUnlockBaseAddress(cameraFrame, 0)
let provider = CGDataProviderCreateWithData(nil, rawImageBytes, width * height * 4, nil)
let bitsPerComponent = 8
let bitsPerPixel = 32
let colorSpaceRef = CGColorSpaceCreateDeviceRGB()
let bitmapInfo: CGBitmapInfo = .ByteOrderDefault
let renderingIntent: CGColorRenderingIntent = .RenderingIntentDefault
if let imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow * 4, colorSpaceRef, bitmapInfo, provider, nil, false, renderingIntent) {
print("Attempting to save image")
let tiffImage = UIImage(CGImage: imageRef)
UIImageWriteToSavedPhotosAlbum(tiffImage, nil, "image:didFinishSavingWithError:contextInfo:", nil)
} else {
print("CGImageCreate failed")
}
} else {
print("CMSampleBufferGetImageBuffer failed")
}
}
}
Executing that chunk of code prints "Shutter!" and "Attempting to save image" to Xcode's output panel, then crashes the app, causing Xcode to show me a bunch of what I think is Assembly code. Needless to say, this doesn't help me at all.
I have tried displaying the UIImage in a UIImageView, which also caused the app to crash. What's going on here?