I am trying to get a screenshot from a GLKView and save it to Photo Library. Unfortunately the quality of the saved photo is very low and I cannot figure why.
This is the link to the UIImage displayed on the screen using ImageView class: https://www.dropbox.com/s/d2cyb099n0ndqag/IMG_0148.PNG?dl=0
This is the link to the photo saved in Photo Albums:
https://www.dropbox.com/s/8pdaytonwxxd6wk/IMG_0147.JPG?dl=0
The code is as follows:
- (UIImage*)snapshot
{
GLint backingWidth, backingHeight;
GLint framebuff;
glGetIntegerv( GL_FRAMEBUFFER_BINDING, &framebuff);
glBindFramebuffer(GL_FRAMEBUFFER, framebuff);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &backingWidth);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &backingHeight);
NSInteger x = 0, y = 0, width = backingWidth, height = backingHeight;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
GLenum errCode;
if ((errCode = glGetError()) != GL_NO_ERROR) {
//errStr = gluErrorString(errCode);
printf("%u: OpenGL ERROR ",errCode);
}
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(dataLength);
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width * 4; x++)
{
buffer2[((height - 1) - y) * width * 4 + x] = data[y * 4 * width + x];
}
}
// Create a CGImage with the pixel data
// If your OpenGL ES content is opaque, use kCGImageAlphaNoneSkipLast to ignore the alpha channel
// otherwise, use kCGImageAlphaPremultipliedLast
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, buffer2, dataLength, NULL);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGImageRef iref = CGImageCreate(width, height, 8, 32, width * 4, colorspace, kCGBitmapByteOrderDefault, ref, NULL, YES, kCGRenderingIntentDefault);
// Retrieve the UIImage from the current context
UIImage *image = [UIImage imageWithCGImage:iref];
UIImageView *myImageView = [[UIImageView alloc] init];
myImageView.frame = CGRectMake(0, 0, fluid.gpu.Drawing.Pong->width,fluid.gpu.Drawing.Pong->height);
myImageView.image = image;
[self.view addSubview:myImageView];
CFRelease(colorspace);
CGImageRelease(iref);
free(data);
return image;
}
-(void)SavePhoto{
UIImage *temp = [self snapshot];
UIImageWriteToSavedPhotosAlbum(temp, self, #selector(imageSavedToPhotosAlbum: didFinishSavingWithError: contextInfo:), (__bridge void*)self.context);
}
- (void)imageSavedToPhotosAlbum:(UIImage *)image didFinishSavingWithError:(NSError *)error contextInfo:(void *)contextInfo {
NSString *message;
NSString *title;
if (!error) {
title = NSLocalizedString(#"Save picture", #"");
message = NSLocalizedString(#"Your screenshot was saved to Photos Album.", #"");
} else {
title = NSLocalizedString(#"There was an error saving screenshot.", #"");
message = [error description];
}
UIAlertView *alert = [[UIAlertView alloc] initWithTitle:title
message:message
delegate:nil
cancelButtonTitle:NSLocalizedString(#"OK", #"")
otherButtonTitles:nil];
[alert show];
}
I also tried the convenience method of [GLKView snapshot] but I get the same result. The problem is replicated on iPhone 4, iPhone 3Gs, IPad2
I manage to get a better quality by transforming my image into a PNG format and save this last one:
NSData* imdata = UIImagePNGRepresentation ( image ); // get PNG representation
UIImage* imgPng = [UIImage imageWithData:imdata]; // wrap UIImage
return imgPng;
Related
Below code is to save each frame of a movie to Photo Album in iPad device... After about 60 frames saved into Image.. it gives me 'Received Memory Warning.." error and my app crashes eventually.. what goes wrong here? Am I missing something..? I use iOS 7...
-(UIImage *) getImageFromGLBuffer : GLuint frameBuffer : (CGCize) screenSize
{
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer);
int width = screenSize.width;
int height = screenSize.height;
NSInteger iDataLength = width * height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc( iDataLength );
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
glBindFramebuffer( GL_FRAMEBUFFER, 0);
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(iDataLength);
for(int y = 0; y <height; y++)
{
for(int x = 0; x < width * 4; x++)
{
buffer2[(int)((height - 1 - y) * width * 4 + x)] = buffer[(int)(y * 4 * width + x)];
}
}
// Release the first buffer
free((void*)buffer);
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, iDataLength, releaseBufferData);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
// then make the UIImage from that
UIImage *image = [UIImage imageWithCGImage:imageRef]; //[[UIImage alloc] initWithCGImage:imageRef];
NSData* imageData = UIImagePNGRepresentation(image); // get png representation
UIImage* pngImage = [UIImage imageWithData:imageData];
CGImageRelease(imageRef);
return pngImage;
}
// callback for CGDataProviderCreateWithData
void releaseBufferData(void *info, const void *data, size_t dataSize)
{
NSLog(#"releaseBufferData\n");
// free the buffer
free((void*)data);
}
- (void)saveImageToPhotoAlbum : GLuint frameBuffer : (CGCize) screenSize
{
if( iqFrameBuffer != nil )
{
UIImage* image = [self getImageFromGLBuffer:frameBuffer : screenSize];
if( image != nil)
{
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
UIImageWriteToSavedPhotosAlbum(image, self, #selector(image:didFinishSavingWithError:contextInfo:), nil);
});
}
else
{
NSLog(#"Couldn't save to Photo Album due to invalid image..");
}
}
else
{
NSLog(#"Frame buffer is invalid. Couldn't save to image..");
}
}
// callback for UIImageWriteToSavedPhotosAlbum
- (void)image:(UIImage *)image didFinishSavingWithError:(NSError *)error contextInfo:(void *)contextInfo
{
NSLog(#"Image has been saved to Photo Album successfully..\n");
// [image release]; // release image
image = nil;
}
Capturing screenshot using opengl works fine for iOS version before 7.
But while running app in ios7 it returns black screen.
Following is the piece of code which i am working with:
-(UIImage *) glToUIImage {
CGSize size = self.view.frame.size;
int image_height = (int)size.width;
int image_width = (int)size.height;
NSInteger myDataLength = image_width * image_height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, image_width, image_height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < image_height; y++)
{
for(int x = 0; x < image_width * 4; x++)
{
buffer2[(image_height - 1 - y) * image_width * 4 + x] = buffer[y * 4 * image_width + x];
}
}
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * image_width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(image_width, image_height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
return myImage;
}
// screenshot function, combined my opengl image with background image and
// saved into Photos.
-(UIImage*)screenshot
{
UIImage *image = [self glToUIImage];
CGRect pos = CGRectMake(0, 0, image.size.width, image.size.height);
UIGraphicsBeginImageContextWithOptions(image.size, NO, [[UIScreen mainScreen]scale]);
[image drawInRect:pos];
UIImage* final = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
final = [[UIImage alloc] initWithCGImage: final.CGImage
scale: 1.0
orientation: UIImageOrientationRight];
// final picture I saved into Photos.
UIImageWriteToSavedPhotosAlbum(final, self, #selector(image:didFinishSavingWithError:contextInfo:), nil);
return final;
}
Has anybody captured screenshot using opengl for ios7?
Have resolved the issue. Updated the drawableproperties function of eaglview class:
eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:FALSE],
kEAGLDrawablePropertyRetainedBacking,
kEAGLColorFormatRGBA8,
kEAGLDrawablePropertyColorFormat,
nil];
:) :)
I am using screen Recorder to capute screen. It is perfectly working when a view was filled in iphone screen. when the AVCaptureVideoPreviewLayer was displayed with overlay buttons, then the saved screen captured video shows overlay buttons without AVCaptureVideoPreviewLayer. I have used this tutorial for adding overlays. How to fix this?
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
#autoreleasepool {
if ([connection isVideoOrientationSupported])
[connection setVideoOrientation:[self cameraOrientation]];
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
/*Lock the image buffer*/
CVPixelBufferLockBaseAddress(imageBuffer,0);
/*Get information about the image*/
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
/*Create a CGImageRef from the CVImageBufferRef*/
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
/*We release some components*/
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
UIImage *image= [UIImage imageWithCGImage:newImage scale:1.0 orientation:UIImageOrientationRight];
image1= [UIImage imageWithCGImage:newImage];
/*We relase the CGImageRef*/
CGImageRelease(newImage);
dispatch_sync(dispatch_get_main_queue(), ^{
[self.imageView setImage:image1];
});
}
}
run the writeaSample using NSTimer.
-(void) writeSample: (NSTimer*) _timer {
if (assetWriterInput.readyForMoreMediaData) {
// CMSampleBufferRef sample = nil;
#autoreleasepool {
CVReturn cvErr = kCVReturnSuccess;
// get screenshot image!
UIGraphicsBeginImageContext(baseViewOne.frame.size);
[[baseViewOne layer] renderInContext:UIGraphicsGetCurrentContext()];
screenshota = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
//CGImageRef image = (CGImageRef) [[self screenshot] CGImage];
CGImageRef image = (CGImageRef) [screenshota CGImage];
//NSLog (#"made screenshot");
// prepare the pixel buffer
CVPixelBufferRef pixelBuffer = NULL;
CFDataRef imageData= CGDataProviderCopyData(CGImageGetDataProvider(image));
//NSLog (#"copied image data");
cvErr = CVPixelBufferCreateWithBytes(kCFAllocatorDefault,
baseViewOne.frame.size.width,baseViewOne.frame.size.height,
kCVPixelFormatType_32BGRA,
(void*)CFDataGetBytePtr(imageData),
CGImageGetBytesPerRow(image),
NULL,
NULL,
NULL,
&pixelBuffer);
//NSLog (#"CVPixelBufferCreateWithBytes returned %d", cvErr);
// calculate the time
CMTime presentationTime;
CFAbsoluteTime thisFrameWallClockTime = CFAbsoluteTimeGetCurrent();
elapsedTime = thisFrameWallClockTime - (firstFrameWallClockTime+pausedFrameTime);
// NSLog (#"elapsedTime: %f", elapsedTime);
presentationTime = CMTimeMake (elapsedTime * TIME_SCALE, TIME_SCALE);
BOOL appended = [assetWriterPixelBufferAdaptor appendPixelBuffer:pixelBuffer withPresentationTime:presentationTime];
if (appended) {
CVPixelBufferRelease( pixelBuffer );
CFRelease(imageData);
pixelBuffer = nil;
//NSLog (#"appended sample at time %lf", CMTimeGetSeconds(presentationTime));
} else {
[self stopRecording];
}
}
}
}
i need help converting an 24/32 bit RGB raw image to uiimage.
I've tried the examples here from Paul Solt and others, but nothing work. Anybody could please show an example or tutorial?
The image data is hold in nsdata and i would like to have a jpg or png image.
Thx
Thorsten
I'm using the code by Paul Solt, it does something, but the image looks like it have four times the image information in one image. i cant post an image here:
EDIT: i added the lines at the beginning of the method between the comments, now it works :-)
+ (UIImage *) convertBitmapRGBA8ToUIImage:(unsigned char *) buffer
withWidth:(int) width
withHeight:(int) height {
// added code
char* rgba = (char*)malloc(width*height*4);
for(int i=0; i < width*height; ++i) {
rgba[4*i] = buffer[3*i];
rgba[4*i+1] = buffer[3*i+1];
rgba[4*i+2] = buffer[3*i+2];
rgba[4*i+3] = 255;
}
//
size_t bufferLength = width * height * 4;
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, rgba, bufferLength, NULL);
size_t bitsPerComponent = 8;
size_t bitsPerPixel = 32;
size_t bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
if(colorSpaceRef == NULL) {
NSLog(#"Error allocating color space");
CGDataProviderRelease(provider);
return nil;
}
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedLast;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef iref = CGImageCreate(width,
height,
bitsPerComponent,
bitsPerPixel,
bytesPerRow,
colorSpaceRef,
bitmapInfo,
provider, // data provider
NULL, // decode
YES, // should interpolate
renderingIntent);
uint32_t* pixels = (uint32_t*)malloc(bufferLength);
if(pixels == NULL) {
NSLog(#"Error: Memory not allocated for bitmap");
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
return nil;
}
CGContextRef context = CGBitmapContextCreate(pixels,
width,
height,
bitsPerComponent,
bytesPerRow,
colorSpaceRef,
bitmapInfo);
if(context == NULL) {
NSLog(#"Error context not created");
free(pixels);
}
UIImage *image = nil;
if(context) {
CGContextDrawImage(context, CGRectMake(0.0f, 0.0f, width, height), iref);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
// Support both iPad 3.2 and iPhone 4 Retina displays with the correct scale
if([UIImage respondsToSelector:#selector(imageWithCGImage:scale:orientation:)]) {
float scale = [[UIScreen mainScreen] scale];
image = [UIImage imageWithCGImage:imageRef scale:scale orientation:UIImageOrientationUp];
} else {
image = [UIImage imageWithCGImage:imageRef];
}
CGImageRelease(imageRef);
CGContextRelease(context);
}
CGColorSpaceRelease(colorSpaceRef);
CGImageRelease(iref);
CGDataProviderRelease(provider);
if(pixels) {
free(pixels);
}
return image;
}
solution:
my bitmap image data has only 3 bytes, but ios wants 4 bytes, the fourth is for alpha. so inserting the following code which added a 4th byte fixed the problem.
char* rgba = (char*)malloc(width*height*4);
for(int i=0; i < width*height; ++i) {
rgba[4*i] = buffer[3*i];
rgba[4*i+1] = buffer[3*i+1];
rgba[4*i+2] = buffer[3*i+2];
rgba[4*i+3] = 255;
}
Here is Conversion of NSData to UIImage :
NSData *imageData = UIImagePNGRepresentation(image);
UIImage *image=[UIImage imageWithData:data];
I have created PNG image using this code, hope it will works for you also.
I'm build an iOS app that does some basic detection.
I get the raw frames from AVCaptureVideoDataOutput, convert the CMSampleBufferRef to a UIImage, resize the UIImage, then convert it to a CVPixelBufferRef.
As far as I can detect with Instruments the leak is the last part where I convert the CGImage to a CVPixelBufferRef.
Here's the code I use:
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
videof = [[ASMotionDetect alloc] initWithSampleImage:[self resizeSampleBuffer:sampleBuffer]];
// ASMotionDetect is my class for detection and I use videof to calculate the movement
}
-(UIImage*)resizeSampleBuffer:(CMSampleBufferRef) sampleBuffer {
UIImage *img;
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(imageBuffer,0); // Lock the image buffer
uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0); // Get information of the image
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);
CGImageRef newImage = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
CGColorSpaceRelease(colorSpace);
CVPixelBufferUnlockBaseAddress(imageBuffer,0);
/* CVBufferRelease(imageBuffer); */ // do not call this!
img = [UIImage imageWithCGImage:newImage];
CGImageRelease(newImage);
newContext = nil;
img = [self resizeImageToSquare:img];
return img;
}
-(UIImage*)resizeImageToSquare:(UIImage*)_temp {
UIImage *img;
int w = _temp.size.width;
int h = _temp.size.height;
CGRect rect;
if (w>h) {
rect = CGRectMake((w-h)/2,0,h,h);
} else {
rect = CGRectMake(0, (h-w)/2, w, w);
}
//
img = [self crop:_temp inRect:rect];
return img;
}
-(UIImage*) crop:(UIImage*)image inRect:(CGRect)rect{
UIImage *sourceImage = image;
CGRect selectionRect = rect;
CGRect transformedRect = TransformCGRectForUIImageOrientation(selectionRect, sourceImage.imageOrientation, sourceImage.size);
CGImageRef resultImageRef = CGImageCreateWithImageInRect(sourceImage.CGImage, transformedRect);
UIImage *resultImage = [[UIImage alloc] initWithCGImage:resultImageRef scale:1.0 orientation:image.imageOrientation];
CGImageRelease(resultImageRef);
return resultImage;
}
And in my detection class I have:
- (id)initWithSampleImage:(UIImage*)sampleImage {
if ((self = [super init])) {
_frame = new CVMatOpaque();
_histograms = new CVMatNDOpaque[kGridSize *
kGridSize];
[self extractFrameFromImage:sampleImage];
}
return self;
}
- (void)extractFrameFromImage:(UIImage*)sampleImage {
CGImageRef imageRef = [sampleImage CGImage];
CVImageBufferRef imageBuffer = [self pixelBufferFromCGImage:imageRef];
CVPixelBufferLockBaseAddress(imageBuffer, 0);
// Collect some information required to extract the frame.
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
size_t height = CVPixelBufferGetHeight(imageBuffer);
size_t width = CVPixelBufferGetWidth(imageBuffer);
// Extract the frame, convert it to grayscale, and shove it in _frame.
cv::Mat frame(height, width, CV_8UC4, baseAddress, bytesPerRow);
cv::cvtColor(frame, frame, CV_BGR2GRAY);
_frame->matrix = frame;
CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
CGImageRelease(imageRef);
}
- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
CVPixelBufferRef pxbuffer = NULL;
int width = CGImageGetWidth(image)*2;
int height = CGImageGetHeight(image)*2;
NSMutableDictionary *attributes = [NSMutableDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, [NSNumber numberWithInt:width], kCVPixelBufferWidthKey, [NSNumber numberWithInt:height], kCVPixelBufferHeightKey, nil];
CVPixelBufferPoolRef pixelBufferPool;
CVReturn theError = CVPixelBufferPoolCreate(kCFAllocatorDefault, NULL, (__bridge CFDictionaryRef) attributes, &pixelBufferPool);
NSParameterAssert(theError == kCVReturnSuccess);
CVReturn status = CVPixelBufferPoolCreatePixelBuffer(NULL, pixelBufferPool, &pxbuffer);
NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);
CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
NSParameterAssert(pxdata != NULL);
CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, width,
height, 8, width*4, rgbColorSpace,
kCGImageAlphaNoneSkipFirst);
NSParameterAssert(context);
/* here is the problem: */
CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);
CVPixelBufferUnlockBaseAddress(pxbuffer, 0);
return pxbuffer;
}
With Instrument I found out that the problem is with CVPixelBufferRef allocations but I don't understand why - can someone see the problem?
Thank you
In -pixelBufferFromCGImage:, both pxBuffer and pixelBufferPool are not released. That makes sense for pxBuffer, as it is a return value, but not for pixelBufferPool – you create and leak one per call of the method.
A quick fix should be to
Release pixelBufferPool in -pixelBufferFromCGImage:
Release pxBuffer (the return value of -pixelBufferFromCGImage:) in -extractFrameFromImage:
You should also rename -pixelBufferFromCGImage: to -createPixelBufferFromCGImage: to make clear that it returns a retained object.