look at the code snippet below. i don't understand the code between #ifdef TARGET_OS_IPHONE and #endif,should i redraw a new image from the imageSource? why?
- (void)connection:(NSURLConnection *)connection didReceiveData:(NSData *)data {
[self.imageData appendData:data];
if ((self.options & SDWebImageDownloaderProgressiveDownload) && self.expectedSize > 0 && self.completedBlock) {
// Get the total bytes downloaded
const NSInteger totalSize = self.imageData.length;
// Update the data source, we must pass ALL the data, not just the new bytes
CGImageSourceRef imageSource = CGImageSourceCreateWithData((__bridge CFDataRef)self.imageData, NULL);
// ...
if (width + height > 0 && totalSize < self.expectedSize) {
// Create the image
CGImageRef partialImageRef = CGImageSourceCreateImageAtIndex(imageSource, 0, NULL);
// here is my question.
#ifdef TARGET_OS_IPHONE
// Workaround for iOS anamorphic image
if (partialImageRef) {
const size_t partialHeight = CGImageGetHeight(partialImageRef);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef bmContext = CGBitmapContextCreate(NULL, width, height, 8, width * 4, colorSpace, kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedFirst);
CGColorSpaceRelease(colorSpace);
if (bmContext) {
CGContextDrawImage(bmContext, (CGRect){.origin.x = 0.0f, .origin.y = 0.0f, .size.width = width, .size.height = partialHeight}, partialImageRef);
CGImageRelease(partialImageRef);
partialImageRef = CGBitmapContextCreateImage(bmContext);
CGContextRelease(bmContext);
}
else {
CGImageRelease(partialImageRef);
partialImageRef = nil;
}
}
#endif
// ...
}
(the method is in SDWebImageDownloaderOperation.m, the version is SDWebImage 3.7.0).
anyone can explain it, thanks!
Related
I am trying to get a screenshot from a GLKView and save it to Photo Library. Unfortunately the quality of the saved photo is very low and I cannot figure why.
This is the link to the UIImage displayed on the screen using ImageView class: https://www.dropbox.com/s/d2cyb099n0ndqag/IMG_0148.PNG?dl=0
This is the link to the photo saved in Photo Albums:
https://www.dropbox.com/s/8pdaytonwxxd6wk/IMG_0147.JPG?dl=0
The code is as follows:
- (UIImage*)snapshot
{
GLint backingWidth, backingHeight;
GLint framebuff;
glGetIntegerv( GL_FRAMEBUFFER_BINDING, &framebuff);
glBindFramebuffer(GL_FRAMEBUFFER, framebuff);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &backingHeight);
NSInteger x = 0, y = 0, width = backingWidth, height = backingHeight;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
GLenum errCode;
if ((errCode = glGetError()) != GL_NO_ERROR) {
//errStr = gluErrorString(errCode);
printf("%u: OpenGL ERROR ",errCode);
}
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(dataLength);
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width * 4; x++)
{
buffer2[((height - 1) - y) * width * 4 + x] = data[y * 4 * width + x];
}
}
// Create a CGImage with the pixel data
// If your OpenGL ES content is opaque, use kCGImageAlphaNoneSkipLast to ignore the alpha channel
// otherwise, use kCGImageAlphaPremultipliedLast
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, buffer2, dataLength, NULL);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGImageRef iref = CGImageCreate(width, height, 8, 32, width * 4, colorspace, kCGBitmapByteOrderDefault, ref, NULL, YES, kCGRenderingIntentDefault);
// Retrieve the UIImage from the current context
UIImage *image = [UIImage imageWithCGImage:iref];
UIImageView *myImageView = [[UIImageView alloc] init];
myImageView.frame = CGRectMake(0, 0, fluid.gpu.Drawing.Pong->width,fluid.gpu.Drawing.Pong->height);
myImageView.image = image;
[self.view addSubview:myImageView];
CFRelease(colorspace);
CGImageRelease(iref);
free(data);
return image;
}
-(void)SavePhoto{
UIImage *temp = [self snapshot];
UIImageWriteToSavedPhotosAlbum(temp, self, #selector(imageSavedToPhotosAlbum: didFinishSavingWithError: contextInfo:), (__bridge void*)self.context);
}
- (void)imageSavedToPhotosAlbum:(UIImage *)image didFinishSavingWithError:(NSError *)error contextInfo:(void *)contextInfo {
NSString *message;
NSString *title;
if (!error) {
title = NSLocalizedString(#"Save picture", #"");
message = NSLocalizedString(#"Your screenshot was saved to Photos Album.", #"");
} else {
title = NSLocalizedString(#"There was an error saving screenshot.", #"");
message = [error description];
}
UIAlertView *alert = [[UIAlertView alloc] initWithTitle:title
message:message
delegate:nil
cancelButtonTitle:NSLocalizedString(#"OK", #"")
otherButtonTitles:nil];
[alert show];
}
I also tried the convenience method of [GLKView snapshot] but I get the same result. The problem is replicated on iPhone 4, iPhone 3Gs, IPad2
I manage to get a better quality by transforming my image into a PNG format and save this last one:
NSData* imdata = UIImagePNGRepresentation ( image ); // get PNG representation
UIImage* imgPng = [UIImage imageWithData:imdata]; // wrap UIImage
return imgPng;
I use opencv 2.4.9 for iOS and need help.
I would like to capture with CVVideoCamera during filming high-resolution photos. I need the video camera for the process image method to add a mature document capture via edge detection. This works great as well but i need a photo of the recognized high-resolution photo of the document once the document has been detected.
i found this http://code.opencv.org/svn/gsoc2012/ios/trunk/HelloWorld_iOS/HelloWorld_iOS/VideoCameraController.m
this works with photo and Video for the same time, but the process image Method is different as the from the cvvideocamera delegate and my algorithmus dont work with this class :- /
But i search this photocamera/videocamera solution for CVVideoCamera in OpenCV.
Hope for help and sorry for my english
I solved this problem adding some methods of cvVideoCamera class to cvPhotoCamera class. It works for me, but maybe need some improvements applying to you code.
CvPhotoCameraMod.h:
#import <UIKit/UIKit.h>
#import <opencv2/highgui/cap_ios.h>
#import <opencv2/highgui/ios.h>
#define DEGREES_RADIANS(angle) ((angle) / 180.0 * M_PI)
#class CvPhotoCameraMod;
#protocol CvPhotoCameraDelegateMod <CvPhotoCameraDelegate>
- (void)processImage:(cv::Mat&)image;
#end
#interface CvPhotoCameraMod : CvPhotoCamera <AVCaptureVideoDataOutputSampleBufferDelegate>
#property (nonatomic, retain) CALayer *customPreviewLayer;
#property (nonatomic, retain) AVCaptureVideoDataOutput *videoDataOutput;
#property (nonatomic, weak) id <CvPhotoCameraDelegateMod> delegate;
- (void)createCustomVideoPreview;
#end
CvPhotoCameraMod.mm:
#import "CvPhotoCameraMod.h"
#import <CoreGraphics/CoreGraphics.h>
#define DEGREES_RADIANS(angle) ((angle) / 180.0 * M_PI)
#implementation CvPhotoCameraMod
-(void)createCaptureOutput;
{
[super createCaptureOutput];
[self createVideoDataOutput];
}
- (void)createCustomVideoPreview;
{
[self.parentView.layer addSublayer:self.customPreviewLayer];
}
//Method mostly taken from this source: https://github.com/Itseez/opencv/blob/b46719b0931b256ab68d5f833b8fadd83737ddd1/modules/videoio/src/cap_ios_video_camera.mm
-(void)createVideoDataOutput{
// Make a video data output
self.videoDataOutput = [AVCaptureVideoDataOutput new];
//Drop grayscale support here
self.videoDataOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey];
// discard if the data output queue is blocked (as we process the still image)
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
if ( [self.captureSession canAddOutput:self.videoDataOutput] ) {
[self.captureSession addOutput:self.videoDataOutput];
}
[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:YES];
// set video mirroring for front camera (more intuitive)
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoMirroring) {
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = YES;
} else {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = NO;
}
}
// set default video orientation
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoOrientation) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoOrientation = self.defaultAVCaptureVideoOrientation;
}
// create a custom preview layer
self.customPreviewLayer = [CALayer layer];
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
self.customPreviewLayer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
// create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured
// a serial dispatch queue must be used to guarantee that video frames will be delivered in order
// see the header doc for setSampleBufferDelegate:queue: for more information
dispatch_queue_t videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
[self.videoDataOutput setSampleBufferDelegate:self queue:videoDataOutputQueue];
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
(void)captureOutput;
(void)connection;
if (self.delegate) {
// convert from Core Media to Core Video
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, 0);
void* bufferAddress;
size_t width;
size_t height;
size_t bytesPerRow;
CGColorSpaceRef colorSpace;
CGContextRef context;
int format_opencv;
OSType format = CVPixelBufferGetPixelFormatType(imageBuffer);
if (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
format_opencv = CV_8UC1;
bufferAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
width = CVPixelBufferGetWidthOfPlane(imageBuffer, 0);
height = CVPixelBufferGetHeightOfPlane(imageBuffer, 0);
bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);
} else { // expect kCVPixelFormatType_32BGRA
format_opencv = CV_8UC4;
bufferAddress = CVPixelBufferGetBaseAddress(imageBuffer);
width = CVPixelBufferGetWidth(imageBuffer);
height = CVPixelBufferGetHeight(imageBuffer);
bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
}
// delegate image processing to the delegate
cv::Mat image((int)height, (int)width, format_opencv, bufferAddress, bytesPerRow);
CGImage* dstImage;
if ([self.delegate respondsToSelector:#selector(processImage:)]) {
[self.delegate processImage:image];
}
// check if matrix data pointer or dimensions were changed by the delegate
bool iOSimage = false;
if (height == (size_t)image.rows && width == (size_t)image.cols && format_opencv == image.type() && bufferAddress == image.data && bytesPerRow == image.step) {
iOSimage = true;
}
// (create color space, create graphics context, render buffer)
CGBitmapInfo bitmapInfo;
// basically we decide if it's a grayscale, rgb or rgba image
if (image.channels() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
bitmapInfo = kCGImageAlphaNone;
} else if (image.channels() == 3) {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaNone;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaPremultipliedFirst;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
}
if (iOSimage) {
context = CGBitmapContextCreate(bufferAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo);
dstImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
} else {
NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
dstImage = CGImageCreate(image.cols, // width
image.rows, // height
8, // bits per component
8 * image.elemSize(), // bits per pixel
image.step, // bytesPerRow
colorSpace, // colorspace
bitmapInfo, // bitmap info
provider, // CGDataProviderRef
NULL, // decode
false, // should interpolate
kCGRenderingIntentDefault // intent
);
CGDataProviderRelease(provider);
}
// render buffer
dispatch_sync(dispatch_get_main_queue(), ^{
self.customPreviewLayer.contents = (__bridge id)dstImage;
});
// cleanup
CGImageRelease(dstImage);
CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
}
}
#end
Below code is to save each frame of a movie to Photo Album in iPad device... After about 60 frames saved into Image.. it gives me 'Received Memory Warning.." error and my app crashes eventually.. what goes wrong here? Am I missing something..? I use iOS 7...
-(UIImage *) getImageFromGLBuffer : GLuint frameBuffer : (CGCize) screenSize
{
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer);
int width = screenSize.width;
int height = screenSize.height;
NSInteger iDataLength = width * height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc( iDataLength );
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
glBindFramebuffer( GL_FRAMEBUFFER, 0);
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(iDataLength);
for(int y = 0; y <height; y++)
{
for(int x = 0; x < width * 4; x++)
{
buffer2[(int)((height - 1 - y) * width * 4 + x)] = buffer[(int)(y * 4 * width + x)];
}
}
// Release the first buffer
free((void*)buffer);
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, iDataLength, releaseBufferData);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
// then make the UIImage from that
UIImage *image = [UIImage imageWithCGImage:imageRef]; //[[UIImage alloc] initWithCGImage:imageRef];
NSData* imageData = UIImagePNGRepresentation(image); // get png representation
UIImage* pngImage = [UIImage imageWithData:imageData];
CGImageRelease(imageRef);
return pngImage;
}
// callback for CGDataProviderCreateWithData
void releaseBufferData(void *info, const void *data, size_t dataSize)
{
NSLog(#"releaseBufferData\n");
// free the buffer
free((void*)data);
}
- (void)saveImageToPhotoAlbum : GLuint frameBuffer : (CGCize) screenSize
{
if( iqFrameBuffer != nil )
{
UIImage* image = [self getImageFromGLBuffer:frameBuffer : screenSize];
if( image != nil)
{
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
UIImageWriteToSavedPhotosAlbum(image, self, #selector(image:didFinishSavingWithError:contextInfo:), nil);
});
}
else
{
NSLog(#"Couldn't save to Photo Album due to invalid image..");
}
}
else
{
NSLog(#"Frame buffer is invalid. Couldn't save to image..");
}
}
// callback for UIImageWriteToSavedPhotosAlbum
- (void)image:(UIImage *)image didFinishSavingWithError:(NSError *)error contextInfo:(void *)contextInfo
{
NSLog(#"Image has been saved to Photo Album successfully..\n");
// [image release]; // release image
image = nil;
}
I have followed the tutorial on how to use the CvVideoCamera object to play a video and apply some processing.
I am now interested to use the CvPhotoCamera object to capture a high definition picture by using the takePicture method. However, the tutorial uses CvVideoCamera and does not make any references to CvPhotoCamera.
Both CvPhotoCamera and CvVideoCamera are inheriting from CvAbstractCamera. They are sister classes so I have no idea on how can I use the takePicture function.
My solution was to create a new class named CvVideoPhotoCamera which inherits from CvAbstractCamera. The content of CvVideoPhotoCamera was made from a merge between CvVideoCamera and CvPhotoCamera.
Do the same as #poiuytrez: add modification to cvPhotoCamera
CvPhotoCameraMod.h:
#import <UIKit/UIKit.h>
#import <opencv2/highgui/cap_ios.h>
#import <opencv2/highgui/ios.h>
#define DEGREES_RADIANS(angle) ((angle) / 180.0 * M_PI)
#class CvPhotoCameraMod;
#protocol CvPhotoCameraDelegateMod <CvPhotoCameraDelegate>
- (void)processImage:(cv::Mat&)image;
#end
#interface CvPhotoCameraMod : CvPhotoCamera <AVCaptureVideoDataOutputSampleBufferDelegate>
#property (nonatomic, retain) CALayer *customPreviewLayer;
#property (nonatomic, retain) AVCaptureVideoDataOutput *videoDataOutput;
#property (nonatomic, weak) id <CvPhotoCameraDelegateMod> delegate;
- (void)createCustomVideoPreview;
#end
CvPhotoCameraMod.mm:
#import "CvPhotoCameraMod.h"
#import <CoreGraphics/CoreGraphics.h>
#define DEGREES_RADIANS(angle) ((angle) / 180.0 * M_PI)
#implementation CvPhotoCameraMod
-(void)createCaptureOutput;
{
[super createCaptureOutput];
[self createVideoDataOutput];
}
- (void)createCustomVideoPreview;
{
[self.parentView.layer addSublayer:self.customPreviewLayer];
}
//Method mostly taken from this source: https://github.com/Itseez/opencv/blob/b46719b0931b256ab68d5f833b8fadd83737ddd1/modules/videoio/src/cap_ios_video_camera.mm
-(void)createVideoDataOutput{
// Make a video data output
self.videoDataOutput = [AVCaptureVideoDataOutput new];
//Drop grayscale support here
self.videoDataOutput.videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey];
// discard if the data output queue is blocked (as we process the still image)
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES];
if ( [self.captureSession canAddOutput:self.videoDataOutput] ) {
[self.captureSession addOutput:self.videoDataOutput];
}
[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:YES];
// set video mirroring for front camera (more intuitive)
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoMirroring) {
if (self.defaultAVCaptureDevicePosition == AVCaptureDevicePositionFront) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = YES;
} else {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoMirrored = NO;
}
}
// set default video orientation
if ([self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].supportsVideoOrientation) {
[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo].videoOrientation = self.defaultAVCaptureVideoOrientation;
}
// create a custom preview layer
self.customPreviewLayer = [CALayer layer];
self.customPreviewLayer.bounds = CGRectMake(0, 0, self.parentView.frame.size.width, self.parentView.frame.size.height);
self.customPreviewLayer.position = CGPointMake(self.parentView.frame.size.width/2., self.parentView.frame.size.height/2.);
// create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured
// a serial dispatch queue must be used to guarantee that video frames will be delivered in order
// see the header doc for setSampleBufferDelegate:queue: for more information
dispatch_queue_t videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
[self.videoDataOutput setSampleBufferDelegate:self queue:videoDataOutputQueue];
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
(void)captureOutput;
(void)connection;
if (self.delegate) {
// convert from Core Media to Core Video
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer, 0);
void* bufferAddress;
size_t width;
size_t height;
size_t bytesPerRow;
CGColorSpaceRef colorSpace;
CGContextRef context;
int format_opencv;
OSType format = CVPixelBufferGetPixelFormatType(imageBuffer);
if (format == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange) {
format_opencv = CV_8UC1;
bufferAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);
width = CVPixelBufferGetWidthOfPlane(imageBuffer, 0);
height = CVPixelBufferGetHeightOfPlane(imageBuffer, 0);
bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);
} else { // expect kCVPixelFormatType_32BGRA
format_opencv = CV_8UC4;
bufferAddress = CVPixelBufferGetBaseAddress(imageBuffer);
width = CVPixelBufferGetWidth(imageBuffer);
height = CVPixelBufferGetHeight(imageBuffer);
bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
}
// delegate image processing to the delegate
cv::Mat image((int)height, (int)width, format_opencv, bufferAddress, bytesPerRow);
CGImage* dstImage;
if ([self.delegate respondsToSelector:#selector(processImage:)]) {
[self.delegate processImage:image];
}
// check if matrix data pointer or dimensions were changed by the delegate
bool iOSimage = false;
if (height == (size_t)image.rows && width == (size_t)image.cols && format_opencv == image.type() && bufferAddress == image.data && bytesPerRow == image.step) {
iOSimage = true;
}
// (create color space, create graphics context, render buffer)
CGBitmapInfo bitmapInfo;
// basically we decide if it's a grayscale, rgb or rgba image
if (image.channels() == 1) {
colorSpace = CGColorSpaceCreateDeviceGray();
bitmapInfo = kCGImageAlphaNone;
} else if (image.channels() == 3) {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaNone;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
} else {
colorSpace = CGColorSpaceCreateDeviceRGB();
bitmapInfo = kCGImageAlphaPremultipliedFirst;
if (iOSimage) {
bitmapInfo |= kCGBitmapByteOrder32Little;
} else {
bitmapInfo |= kCGBitmapByteOrder32Big;
}
}
if (iOSimage) {
context = CGBitmapContextCreate(bufferAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo);
dstImage = CGBitmapContextCreateImage(context);
CGContextRelease(context);
} else {
NSData *data = [NSData dataWithBytes:image.data length:image.elemSize()*image.total()];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((__bridge CFDataRef)data);
// Creating CGImage from cv::Mat
dstImage = CGImageCreate(image.cols, // width
image.rows, // height
8, // bits per component
8 * image.elemSize(), // bits per pixel
image.step, // bytesPerRow
colorSpace, // colorspace
bitmapInfo, // bitmap info
provider, // CGDataProviderRef
NULL, // decode
false, // should interpolate
kCGRenderingIntentDefault // intent
);
CGDataProviderRelease(provider);
}
// render buffer
dispatch_sync(dispatch_get_main_queue(), ^{
self.customPreviewLayer.contents = (__bridge id)dstImage;
});
// cleanup
CGImageRelease(dstImage);
CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
}
}
#end
I'm build an iOS app that does some basic detection.
I get the raw frames from AVCaptureVideoDataOutput, convert the CMSampleBufferRef to a UIImage, resize the UIImage, then convert it to a CVPixelBufferRef.
As far as I can detect with Instruments the leak is the last part where I convert the CGImage to a CVPixelBufferRef.
Here's the code I use:
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
videof = [[ASMotionDetect alloc] initWithSampleImage:[self resizeSampleBuffer:sampleBuffer]];
// ASMotionDetect is my class for detection and I use videof to calculate the movement
}
-(UIImage*)resizeSampleBuffer:(CMSampleBufferRef) sampleBuffer {
UIImage *img;
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0); // Lock the image buffer
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0); // Get information of the image
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
/* CVBufferRelease(imageBuffer); */ // do not call this!
img = [UIImage imageWithCGImage:newImage];
CGImageRelease(newImage);
newContext = nil;
img = [self resizeImageToSquare:img];
return img;
}
-(UIImage*)resizeImageToSquare:(UIImage*)_temp {
UIImage *img;
int w = _temp.size.width;
int h = _temp.size.height;
CGRect rect;
if (w>h) {
rect = CGRectMake((w-h)/2,0,h,h);
} else {
rect = CGRectMake(0, (h-w)/2, w, w);
}
//
img = [self crop:_temp inRect:rect];
return img;
}
-(UIImage*) crop:(UIImage*)image inRect:(CGRect)rect{
UIImage *sourceImage = image;
CGRect selectionRect = rect;
CGRect transformedRect = TransformCGRectForUIImageOrientation(selectionRect, sourceImage.imageOrientation, sourceImage.size);
CGImageRef resultImageRef = CGImageCreateWithImageInRect(sourceImage.CGImage, transformedRect);
UIImage *resultImage = [[UIImage alloc] initWithCGImage:resultImageRef scale:1.0 orientation:image.imageOrientation];
CGImageRelease(resultImageRef);
return resultImage;
}
And in my detection class I have:
- (id)initWithSampleImage:(UIImage*)sampleImage {
if ((self = [super init])) {
_frame = new CVMatOpaque();
_histograms = new CVMatNDOpaque[kGridSize *
kGridSize];
[self extractFrameFromImage:sampleImage];
}
return self;
}
- (void)extractFrameFromImage:(UIImage*)sampleImage {
CGImageRef imageRef = [sampleImage CGImage];
CVImageBufferRef imageBuffer = [self pixelBufferFromCGImage:imageRef];
CVPixelBufferLockBaseAddress(imageBuffer, 0);
// Collect some information required to extract the frame.
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
// Extract the frame, convert it to grayscale, and shove it in _frame.
cv::Mat frame(height, width, CV_8UC4, baseAddress, bytesPerRow);
cv::cvtColor(frame, frame, CV_BGR2GRAY);
_frame->matrix = frame;
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
CGImageRelease(imageRef);
}
- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
CVPixelBufferRef pxbuffer = NULL;
int width = CGImageGetWidth(image)*2;
int height = CGImageGetHeight(image)*2;
NSMutableDictionary *attributes = [NSMutableDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, [NSNumber numberWithInt:width], kCVPixelBufferWidthKey, [NSNumber numberWithInt:height], kCVPixelBufferHeightKey, nil];
CVPixelBufferPoolRef pixelBufferPool;
CVReturn theError = CVPixelBufferPoolCreate(kCFAllocatorDefault, NULL, (__bridge CFDictionaryRef) attributes, &pixelBufferPool);
NSParameterAssert(theError == kCVReturnSuccess);
CVReturn status = CVPixelBufferPoolCreatePixelBuffer(NULL, pixelBufferPool, &pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
NSParameterAssert(pxdata != NULL);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, width,
height, 8, width*4, rgbColorSpace,
kCGImageAlphaNoneSkipFirst);
NSParameterAssert(context);
/* here is the problem: */
CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}
With Instrument I found out that the problem is with CVPixelBufferRef allocations but I don't understand why - can someone see the problem?
Thank you
In -pixelBufferFromCGImage:, both pxBuffer and pixelBufferPool are not released. That makes sense for pxBuffer, as it is a return value, but not for pixelBufferPool – you create and leak one per call of the method.
A quick fix should be to
Release pixelBufferPool in -pixelBufferFromCGImage:
Release pxBuffer (the return value of -pixelBufferFromCGImage:) in -extractFrameFromImage:
You should also rename -pixelBufferFromCGImage: to -createPixelBufferFromCGImage: to make clear that it returns a retained object.