The following is code I use for reading an image from an OpenGL ES scene:
-(UIImage *)getImage{
GLint width;
GLint height;
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_WIDTH, &width);
glGetRenderbufferParameteriv(GL_RENDERBUFFER, GL_RENDERBUFFER_HEIGHT, &height);
NSLog(#"%d %d",width,height);
NSInteger myDataLength = width * height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width * 4; x++)
{
buffer2[((height - 1) - y) * width * 4 + x] = buffer[y * 4 * width + x];
}
}
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
free(buffer);
free(buffer2);
return myImage;
}
This is working in iOS 5.x and lower versions, but on iOS 6.0 this is now returning a black image. Why is glReadPixels() failing on iOS 6.0?
CAEAGLLayer *eaglLayer = (CAEAGLLayer *) self.layer;
eaglLayer.drawableProperties = #{
kEAGLDrawablePropertyRetainedBacking: [NSNumber numberWithBool:YES],
kEAGLDrawablePropertyColorFormat: kEAGLColorFormatRGBA8
};
set
kEAGLDrawablePropertyRetainedBacking = YES
(I do not know why this tip is going well..///)
bro try this method to get your screenshot image. The output image is MailImage
- (UIImage*)screenshot
{
// Create a graphics context with the target size
// On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
// On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
CGSize imageSize = [[UIScreen mainScreen] bounds].size;
if (NULL != UIGraphicsBeginImageContextWithOptions)
UIGraphicsBeginImageContextWithOptions(imageSize, NO, 0);
else
UIGraphicsBeginImageContext(imageSize);
CGContextRef context = UIGraphicsGetCurrentContext();
// Iterate over every window from back to front
for (UIWindow *window in [[UIApplication sharedApplication] windows])
{
if (![window respondsToSelector:#selector(screen)] || [window screen] == [UIScreen mainScreen])
{
// -renderInContext: renders in the coordinate space of the layer,
// so we must first apply the layer's geometry to the graphics context
CGContextSaveGState(context);
// Center the context around the window's anchor point
CGContextTranslateCTM(context, [window center].x, [window center].y);
// Apply the window's transform about the anchor point
CGContextConcatCTM(context, [window transform]);
// Offset by the portion of the bounds left of and above the anchor point
CGContextTranslateCTM(context,
-[window bounds].size.width * [[window layer] anchorPoint].x,
-[window bounds].size.height * [[window layer] anchorPoint].y);
// Render the layer hierarchy to the current context
[[window layer] renderInContext:context];
// Restore the context
CGContextRestoreGState(context);
}
}
// Retrieve the screenshot image
Mailimage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return Mailimage;
}
Related
I got camera preview layer where the camera preset is 1280x720.
above the preview layer i added a square UIView with border.
My goal is getting cropped Image from camera.
Method to extract data from camera
-(CGImageRef)createImageFromBuffer:(CVImageBufferRef)buffer
left:(size_t)left
top:(size_t)top
width:(size_t)width
height:(size_t)height CF_RETURNS_RETAINED {
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(buffer);
size_t dataWidth = CVPixelBufferGetWidth(buffer);
size_t dataHeight = CVPixelBufferGetHeight(buffer);
if (left + width > dataWidth ||
top + height > dataHeight) {
[NSException raise:NSInvalidArgumentException format:#"Crop rectangle does not fit within image data."];
}
size_t newBytesPerRow = ((width*4+0xf)>>4)<<4;
CVPixelBufferLockBaseAddress(buffer,0);
int8_t *baseAddress = (int8_t *)CVPixelBufferGetBaseAddress(buffer);
size_t size = newBytesPerRow*height;
int8_t *bytes = (int8_t *)malloc(size * sizeof(int8_t));
if (newBytesPerRow == bytesPerRow) {
memcpy(bytes, baseAddress+top*bytesPerRow, size * sizeof(int8_t));
} else {
for (int y=0; y<height; y++) {
memcpy(bytes+y*newBytesPerRow,
baseAddress+left*4+(top+y)*bytesPerRow,
newBytesPerRow * sizeof(int8_t));
}
}
CVPixelBufferUnlockBaseAddress(buffer, 0);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(bytes,
width,
height,
8,
newBytesPerRow,
colorSpace,
kCGBitmapByteOrder32Little|
kCGImageAlphaNoneSkipFirst);
CGColorSpaceRelease(colorSpace);
CGImageRef result = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
free(bytes);
return result;
}
code to rotate image
- (CGImageRef)createRotatedImage:(CGImageRef)original degrees:(float)degrees CF_RETURNS_RETAINED {
if (degrees == 0.0f) {
CGImageRetain(original);
return original;
} else {
double radians = degrees * M_PI / 180;
#if TARGET_OS_EMBEDDED || TARGET_IPHONE_SIMULATOR
radians = -1 * radians;
#endif
size_t _width = CGImageGetWidth(original);
size_t _height = CGImageGetHeight(original);
CGRect imgRect = CGRectMake(0, 0, _width, _height);
CGAffineTransform __transform = CGAffineTransformMakeRotation(radians);
CGRect rotatedRect = CGRectApplyAffineTransform(imgRect, __transform);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL,
rotatedRect.size.width,
rotatedRect.size.height,
CGImageGetBitsPerComponent(original),
0,
colorSpace,
kCGBitmapAlphaInfoMask & kCGImageAlphaPremultipliedFirst);
CGContextSetAllowsAntialiasing(context, FALSE);
CGContextSetInterpolationQuality(context, kCGInterpolationNone);
CGColorSpaceRelease(colorSpace);
CGContextTranslateCTM(context,
+(rotatedRect.size.width/2),
+(rotatedRect.size.height/2));
CGContextRotateCTM(context, radians);
CGContextDrawImage(context, CGRectMake(-imgRect.size.width/2,
-imgRect.size.height/2,
imgRect.size.width,
imgRect.size.height),
original);
CGImageRef rotatedImage = CGBitmapContextCreateImage(context);
CFRelease(context);
return rotatedImage;
}
}
Extracting the data:
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
if(self.lastDecodeTime && [self.lastDecodeTime timeIntervalSinceNow]>-DECODE_LIMIT_TIME){
return;
}
if ( self.scannerDisabled)
return;
self.lastDecodeTime=[NSDate date];
CVImageBufferRef videoFrame = CMSampleBufferGetImageBuffer(sampleBuffer);
CGFloat cameraFrameWidth = CVPixelBufferGetWidth(videoFrame);
CGFloat cameraFrameHeight = CVPixelBufferGetHeight(videoFrame);
CGPoint rectPoint = self.rectangleView.frame.origin;
rectPoint = [self.previewLayer convertPoint:rectPoint fromLayer:self.view.layer];
CGPoint cameraPoint = [self.previewLayer captureDevicePointOfInterestForPoint:rectPoint];
CGPoint matrixPoint = CGPointMake(cameraPoint.x*cameraFrameWidth,cameraPoint.x*cameraFrameHeight);
CGFloat D = self.rectangleView.frame.size.width*2.0;
CGRect matrixRect = CGRectMake(matrixPoint.x, matrixPoint.y, D, D);
CGImageRef videoFrameImage = [self createImageFromBuffer:videoFrame left:matrixRect.origin.x top:matrixRect.origin.y width:matrixRect.size.width height:matrixRect.size.height];
CGImageRef rotatedImage = [self createRotatedImage:videoFrameImage degrees:self.rotationDeg];
CGImageRelease(videoFrameImage);
...
...
...
}
for debugging i added a small image view at top left to see the cropped result..
you can see i'm in the right direction BUT there is some kind of offset.
i'm assuming since the camera buffer is 1280x720 and the iphone screen got different aspect so there is some kind of crop which may be the offset i'm dealing with..
attached the screenshot, you can see the crop image isn't centred
p.s here is the output settings
AVCaptureVideoDataOutput *output = [AVCaptureVideoDataOutput new];
NSDictionary *rgbOutputSettings = [NSDictionary dictionaryWithObject:
[NSNumber numberWithInt:kCMPixelFormat_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey];
[output setVideoSettings:rgbOutputSettings];
Any ideas ?
Just try this to get cropped image from whole image
[self.view resizableSnapshotViewFromRect:requiredRectToCrop afterScreenUpdates:YES withCapInsets:UIEdgeInsetsZero];
I'm using VLCKit to play video in my app, and I need to be able to take a screenshot of the video at certain points. This is the code I'm using:
-(NSData*)generateThumbnail{
int s = 1;
UIScreen* screen = [UIScreen mainScreen];
if ([screen respondsToSelector:#selector(scale)]) {
s = (int) [screen scale];
}
GLint viewport[4];
glGetIntegerv(GL_VIEWPORT, viewport);
int width = viewport[2];
int height = [_profile.resolution integerValue];//viewport[3];
int myDataLength = width * height * 4;
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
for(int y1 = 0; y1 < height; y1++) {
for(int x1 = 0; x1 <width * 4; x1++) {
buffer2[(height - 1 - y1) * width * 4 + x1] = buffer[y1 * 4 * width + x1];
}
}
free(buffer);
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
CGColorSpaceRelease(colorSpaceRef);
CGDataProviderRelease(provider);
UIImage *image = [ UIImage imageWithCGImage:imageRef scale:s orientation:UIImageOrientationUp ];
NSData *thumbAsData = UIImageJPEGRepresentation(image, 5);
return thumbAsData;
}
To be honest, I have no idea how most of this works. I copied it from somewhere a while ago (I don't remember the source). It mostly works, however frequently it seems parts of the image are missing.
Can someone point me in the right direction? Most of the other posts I see regarding OpenGL screenshots are fairly old, and don't seem to apply.
Thanks.
I wrote a class to work around this problem.
Basically you take directly a screenshot from the screen, and then if you want you can take just a part of the image and also scale it.
Taking a screenshot from the screen, you take every stuff. UIKit , OpenGL, AVFoundation, etc.
Here the class: https://github.com/matteogobbi/MGScreenshotHelper/
Below useful functions, but i suggest you to download (and star :D) directly my helper class ;)
/* Get the screenshot of the screen (useful when you have UIKit elements and OpenGL or AVFoundation stuff */
+ (UIImage *)screenshotFromScreen
{
CGImageRef UIGetScreenImage(void);
CGImageRef screen = UIGetScreenImage();
UIImage* screenImage = [UIImage imageWithCGImage:screen];
CGImageRelease(screen);
return screenImage;
}
/* Get the screenshot of a determinate rect of the screen, and scale it to the size that you want. */
+ (UIImage *)getScreenshotFromScreenWithRect:(CGRect)captureRect andScaleToSize:(CGSize)newSize
{
UIImage *image = [[self class] screenshotFromScreen];
image = [[self class] cropImage:image withRect:captureRect];
image = [[self class] scaleImage:image toSize:newSize];
return image;
}
#pragma mark - Other methods
/* Get a portion of an image */
+ (UIImage *)cropImage:(UIImage *)image withRect:(CGRect)rect
{
CGImageRef imageRef = CGImageCreateWithImageInRect([image CGImage], rect);
UIImage *cropedImage = [UIImage imageWithCGImage:imageRef];
return cropedImage;
}
/* Scale an image */
+ (UIImage *)scaleImage:(UIImage *)image toSize:(CGSize)newSize
{
UIGraphicsBeginImageContextWithOptions(newSize, YES, 0.0);
[image drawInRect:CGRectMake(0, 0, newSize.width, newSize.height)];
UIImage *scaledImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return scaledImage;
}
Ok turns out this is a problem only in the Simulator. On a device it seems to work 98% of the time.
I've tried to create a screenshot by using UIActivityViewController and saved into Photos in iPhone/iPad device. However, in simulator everything shows correctly, but when I switched to device, it only shows part. Here is the screenshot:
In simulator (You can see there is one background, one green line and one star image)
In real device (You can see there is only one star image, and everything else is gone)
I merged all of those three different UIImages into one image so that I can take a screenshot.
I first merge the background image (bridge UIImage) with star image.
-(UIImage*)mergeUIImageView:(UIImage*)bkgound
FrontPic:(UIImage*)fnt
FrontPicX:(CGFloat)xPos
FrontPicY:(CGFloat)yPos
FrontPicWidth:(CGFloat)picWidth
FrontPicHeight:(CGFloat)picHeight
FinalSize:(CGSize)finalSize
{
UIGraphicsBeginImageContext(CGSizeMake([UIScreen mainScreen].bounds.size.height, [UIScreen mainScreen].bounds.size.width));
// bkgound - is the bridge image
[bkgound drawInRect:CGRectMake(0, 0, [UIScreen mainScreen].bounds.size.height, [UIScreen mainScreen].bounds.size.width)];
// fnt - is the star image
[fnt drawInRect:CGRectMake(xPos, yPos, picWidth, picHeight)];
// merged image
UIImage* newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
Then I merged this picture with opengl rendered picture which is the green line.
a) I first change the opengGL image to UIImage by using this function
-(UIImage *) glToUIImage {
float scaleFactor = [[UIScreen mainScreen] scale];
CGRect screen = [[UIScreen mainScreen] bounds];
CGFloat image_height = screen.size.width * scaleFactor;
CGFloat image_width = screen.size.height * scaleFactor;
NSInteger myDataLength = image_width * image_height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, image_width, image_height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < image_height; y++)
{
for(int x = 0; x < image_width * 4; x++)
{
buffer2[(int)((image_height - 1 - y) * image_width * 4 + x)] = buffer[(int)(y * 4 * image_width + x)];
}
}
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * image_width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(image_width, image_height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
return myImage;
}
b)Then I merge this opengl image with my above image(bridge + star) by using the same function above
-(UIImage*)screenshot
{
// get opengl image from above function
UIImage *image = [self glToUIImage];
CGRect pos = CGRectMake(0, 0, image.size.width, image.size.height);
UIGraphicsBeginImageContext(image.size);
[image drawInRect:pos];
[self.background.image drawInRect:pos];
UIImage* final = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return final;
}
And it works great in (iPhone, iPad, iPhone with retina and iPad with retina)simulator (version 6.0). However, when I switched to real device (iPhone 4/4s/5, iPad (2/mini/retina)) it only shows star image. The xcode version is 4.6.3 and base SDK is latest IOS(IOS 6.1) and IOS deployment target is 5.0. Could you guys tell me how to fix it? Thanks.
The problem is IOS 6.0 will not keep the buffer all the time, it will erase it. However, when you do screenshot, you are getting data from buffer so that's why I keep getting black background. So add category to let device keep buffer image instead will solve this problem.
#interface CAEAGLLayer (Retained)
#end
#implementation CAEAGLLayer (Retained)
- (NSDictionary*) drawableProperties
{
return #{kEAGLDrawablePropertyRetainedBacking : #(YES)};
}
#end
I have a UIScrollView, with multiple drawable views as subviews. I am using Apple's GLPaint code. My problem is when I capture the UIScrollView's content to form an UIImage, the state of my drawing view is not captured. Below is the code I use for capturing.
//this code captures the UIScrollView including the invisible part
-(UIImage*)captureImage {
UIImage* image = nil;
UIGraphicsBeginImageContext(_scrollView.contentSize);
{
CGPoint savedContentOffset = _scrollView.contentOffset;
CGRect savedFrame = _scrollView.frame;
_scrollView.contentOffset = CGPointZero;
_scrollView.frame = CGRectMake(0, 0, _scrollView.contentSize.width, _scrollView.contentSize.height);
[_scrollView.layer renderInContext: UIGraphicsGetCurrentContext()];
image = UIGraphicsGetImageFromCurrentImageContext();
_scrollView.contentOffset = savedContentOffset;
_scrollView.frame = savedFrame;
}
UIGraphicsEndImageContext();
return image;
}
Can anyone help me with this problem please?
Thanks!!
you can add this function to your canvas class:
- (UIImage*)snapshot
{
GLint backingWidth1, backingHeight1;
// Bind the color renderbuffer used to render the OpenGL ES view
// If your application only creates a single color renderbuffer which is already bound at this point,
// this call is redundant, but it is needed if you're dealing with multiple renderbuffers.
// Note, replace "_colorRenderbuffer" with the actual name of the renderbuffer object defined in your class.
glBindRenderbufferOES(GL_RENDERBUFFER_OES, viewRenderbuffer);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &backingWidth1);
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &backingHeight1);
NSInteger x = 0, y = 0, width = backingWidth1, height = backingHeight1;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
// Create a CGImage with the pixel data
// If your OpenGL ES content is opaque, use kCGImageAlphaNoneSkipLast to ignore the alpha channel
// otherwise, use kCGImageAlphaPremultipliedLast
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, data, dataLength, NULL);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGImageRef iref = CGImageCreate(width, height, 8, 32, width * 4, colorspace, kCGBitmapByteOrder32Big | kCGImageAlphaNoneSkipLast,
ref, NULL, true, kCGRenderingIntentDefault);
// OpenGL ES measures data in PIXELS
// Create a graphics context with the target size measured in POINTS
NSInteger widthInPoints, heightInPoints;
if (NULL != UIGraphicsBeginImageContextWithOptions) {
// On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
// Set the scale parameter to your OpenGL ES view's contentScaleFactor
// so that you get a high-resolution snapshot when its value is greater than 1.0
CGFloat scale = self.contentScaleFactor;
widthInPoints = width / scale;
heightInPoints = height / scale;
UIGraphicsBeginImageContextWithOptions(CGSizeMake(widthInPoints, heightInPoints), NO, scale);
}
else {
// On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
widthInPoints = width;
heightInPoints = height;
UIGraphicsBeginImageContext(CGSizeMake(widthInPoints, heightInPoints));
}
CGContextRef cgcontext = UIGraphicsGetCurrentContext();
// UIKit coordinate system is upside down to GL/Quartz coordinate system
// Flip the CGImage by rendering it to the flipped bitmap context
// The size of the destination area is measured in POINTS
CGContextSetBlendMode(cgcontext, kCGBlendModeCopy);
CGContextDrawImage(cgcontext, CGRectMake(0.0, 0.0, widthInPoints, heightInPoints), iref);
// Retrieve the UIImage from the current context
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
// Clean up
free(data);
CFRelease(ref);
CFRelease(colorspace);
CGImageRelease(iref);
return image;
}
and invoke it from the scrollview
i am having this strange problem...
i had to capture the screen data and convert it into an image using the following code..this code is working fine over iphone/ipad simulator and on iphone device but not on iPad only .
iphone device is having ios version 3.1.1 and ipad is ios 4.2...
- (UIImage *)screenshotImage {
CGRect screenBounds = [[UIScreen mainScreen] bounds];
int backingWidth = screenBounds.size.width;
int backingHeight =screenBounds.size.height;
NSInteger myDataLength = backingWidth * backingHeight * 4;
GLuint *buffer = (GLuint *) malloc(myDataLength);
glReadPixels(0, 0, backingWidth, backingHeight, GL_RGBA4, GL_UNSIGNED_BYTE, buffer);
for(int y = 0; y < backingHeight / 2; y++) {
for(int xt = 0; xt < backingWidth; xt++) {
GLuint top = buffer[y * backingWidth + xt];
GLuint bottom = buffer[(backingHeight - 1 - y) * backingWidth + xt];
buffer[(backingHeight - 1 - y) * backingWidth + xt] = top;
buffer[y * backingWidth + xt] = bottom;
}
}
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer, myDataLength, releaseScreenshotData);
const int bitsPerComponent = 8;
const int bitsPerPixel = 4 * bitsPerComponent;
const int bytesPerRow = 4 * backingWidth;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef imageRef = CGImageCreate(backingWidth,backingHeight, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
CGColorSpaceRelease(colorSpaceRef);
CGDataProviderRelease(provider);
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
// myImage = [self addIconToImage:myImage];
return myImage;}
Any idea whats going wrong ..??
Those two lines don't match
NSInteger myDataLength = backingWidth * backingHeight * 4;
glReadPixels(0, 0, backingWidth, backingHeight, GL_RGBA4, GL_UNSIGNED_BYTE, buffer);
GL_RGB4 means 4 bits per channel, however you're allocating for 8 bits per channel. The proper token is GL_RGB8. On the iPhone GL_RGB4 may be unsupported and falls back to GL_RGBA.
Also make sure you're reading from the correct buffer (front vs. left vs. any (accidently) bound FBOs). I recommend reading from the back buffer before doing the buffer swap.
for ios 4 or later i m using Multi-Sampling technique for anti-aliasing ....glReadpixels() cannot read directly from multiSampled FBO you need to resolve it to Single sampled buffer and then try reading it...Please refer to the following post :-
Reading data using glReadPixel() with multisampling
Screenshot from openGL ES apple documentation
- (UIImage*)snapshot:(UIView*)eaglview
{
GLint backingWidth, backingHeight;
// Bind the color renderbuffer used to render the OpenGL ES view
// If your application only creates a single color renderbuffer which is already bound at this point,
// this call is redundant, but it is needed if you're dealing with multiple renderbuffers.
// Note, replace "_colorRenderbuffer" with the actual name of the renderbuffer object defined in your class.
glBindRenderbufferOES(GL_RENDERBUFFER_OES, _colorRenderbuffer);
// Get the size of the backing CAEAGLLayer
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_WIDTH_OES, &backingWidth);
glGetRenderbufferParameterivOES(GL_RENDERBUFFER_OES, GL_RENDERBUFFER_HEIGHT_OES, &backingHeight);
NSInteger x = 0, y = 0, width = backingWidth, height = backingHeight;
NSInteger dataLength = width * height * 4;
GLubyte *data = (GLubyte*)malloc(dataLength * sizeof(GLubyte));
// Read pixel data from the framebuffer
glPixelStorei(GL_PACK_ALIGNMENT, 4);
glReadPixels(x, y, width, height, GL_RGBA, GL_UNSIGNED_BYTE, data);
// Create a CGImage with the pixel data
// If your OpenGL ES content is opaque, use kCGImageAlphaNoneSkipLast to ignore the alpha channel
// otherwise, use kCGImageAlphaPremultipliedLast
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, data, dataLength, NULL);
CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceRGB();
CGImageRef iref = CGImageCreate(width, height, 8, 32, width * 4, colorspace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast,
ref, NULL, true, kCGRenderingIntentDefault);
// OpenGL ES measures data in PIXELS
// Create a graphics context with the target size measured in POINTS
NSInteger widthInPoints, heightInPoints;
if (NULL != UIGraphicsBeginImageContextWithOptions) {
// On iOS 4 and later, use UIGraphicsBeginImageContextWithOptions to take the scale into consideration
// Set the scale parameter to your OpenGL ES view's contentScaleFactor
// so that you get a high-resolution snapshot when its value is greater than 1.0
CGFloat scale = eaglview.contentScaleFactor;
widthInPoints = width / scale;
heightInPoints = height / scale;
UIGraphicsBeginImageContextWithOptions(CGSizeMake(widthInPoints, heightInPoints), NO, scale);
}
else {
// On iOS prior to 4, fall back to use UIGraphicsBeginImageContext
widthInPoints = width;
heightInPoints = height;
UIGraphicsBeginImageContext(CGSizeMake(widthInPoints, heightInPoints));
}
CGContextRef cgcontext = UIGraphicsGetCurrentContext();
// UIKit coordinate system is upside down to GL/Quartz coordinate system
// Flip the CGImage by rendering it to the flipped bitmap context
// The size of the destination area is measured in POINTS
CGContextSetBlendMode(cgcontext, kCGBlendModeCopy);
CGContextDrawImage(cgcontext, CGRectMake(0.0, 0.0, widthInPoints, heightInPoints), iref);
// Retrieve the UIImage from the current context
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
// Clean up
free(data);
CFRelease(ref);
CFRelease(colorspace);
CGImageRelease(iref);
return image;
}