Related
I am trying to create a UIImage using core graphics.
My wish is to draw an image divided into 4 different gray scale areas/pixels.
....White....Gray.....
.....Gray.....Black...
So using core graphics, i would like to define an array of 4 different int8_t which correspond to the desired image:
int8_t data[] = {
255, 122,
122, 0,
};
255 is white,
122 is gray,
0 is black
The best reference for a similar code that i could have found is here
This reference refers to an RGB image, so came up with this code per my own common sense (sorry for objective-C french - this is not my reference :)):
- (UIImage *)getImageFromGrayScaleArray {
int width = 2;
int height = 2;
int8_t data[] = {
255, 122,
122, 0,
};
CGDataProviderRef provider = CGDataProviderCreateWithData (NULL,
&data[0],
width * height,
NULL);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
CGImageRef imageRef = CGImageCreate (width,
height,
[self bitsPerComponent],
[self bitsPerPixel],
width * [self bytesPerPixel],
colorSpaceRef,
kCGBitmapByteOrderDefault,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
return image;
}
- (int)bitsPerPixel {
return 8 * [self bytesPerPixel];;
}
- (int)bytesPerPixel {
return [self bytesPerComponent] * [self componentsPerPixel];
}
- (int)componentsPerPixel {
return 1;
}
- (int)bytesPerComponent {
return 1;
}
- (int)bitsPerComponent {
return 8 * [self bytesPerComponent];
}
But... this code gives me this whole black UIImage:
Can someone please reference me to a point where i can read and understand how to do such a task ? The amount of data about core graphics seem to be quite scarce when trying to do such a task. And the time for all these guesses... forever :)
You're close...
Gray scale images need TWO components per pixel: brightness and alpha.
So, with just a couple changes (see the comments):
- (UIImage *)getImageFromGrayScaleArray {
int width = 2;
int height = 2;
// 1 byte for brightness, 1 byte for alpha
int8_t data[] = {
255, 255,
122, 255,
122, 255,
0, 255,
};
CGDataProviderRef provider = CGDataProviderCreateWithData (NULL,
&data[0],
// size is width * height * bytesPerPixel
width * height * [self bytesPerPixel],
NULL);
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
CGImageRef imageRef = CGImageCreate (width,
height,
[self bitsPerComponent],
[self bitsPerPixel],
width * [self bytesPerPixel],
colorSpaceRef,
// use this
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big,
// instead of this
//kCGBitmapByteOrderDefault,
provider,
NULL,
NO,
kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
return image;
}
- (int)bitsPerPixel {
return 8 * [self bytesPerPixel];;
}
- (int)bytesPerPixel {
return [self bytesPerComponent] * [self componentsPerPixel];
}
- (int)componentsPerPixel {
return 2; // 1 byte for brightness, 1 byte for alpha
}
- (int)bytesPerComponent {
return 1;
}
- (int)bitsPerComponent {
return 8 * [self bytesPerComponent];
}
Edit -- I think there's an issue with the memory buffer addressing using the above code. After some testing, I'm getting inconsistent results.
Give it a try with this modified code:
#interface TestingViewController : UIViewController
#end
#interface TestingViewController ()
#end
#implementation TestingViewController
// CGDataProviderCreateWithData callback to free the pixel data buffer
void freePixelData(void *info, const void *data, size_t size) {
free((void *)data);
}
- (UIImage*) getImageFromGrayScaleArray:(BOOL)allBlack {
int8_t grayArray[] = {
255, 122,
122, 0,
};
int8_t blackArray[] = {
0, 0,
0, 0,
};
int width = 2;
int height = 2;
int imageSizeInPixels = width * height;
int bytesPerPixel = 2; // 1 byte for brightness, 1 byte for alpha
unsigned char *pixels = (unsigned char *)malloc(imageSizeInPixels * bytesPerPixel);
memset(pixels, 255, imageSizeInPixels * bytesPerPixel); // setting alpha values to 255
if (allBlack) {
for (int i = 0; i < imageSizeInPixels; i++) {
pixels[i * 2] = blackArray[i]; // writing array of bytes as image brightnesses
}
} else {
for (int i = 0; i < imageSizeInPixels; i++) {
pixels[i * 2] = grayArray[i]; // writing array of bytes as image brightnesses
}
}
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceGray();
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL,
pixels,
imageSizeInPixels * bytesPerPixel,
freePixelData);
CGImageRef imageRef = CGImageCreate(width,
height,
8,
8 * bytesPerPixel,
width * bytesPerPixel,
colorSpaceRef,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big,
provider,
NULL,
false,
kCGRenderingIntentDefault);
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
return image;
}
- (void)viewDidLoad {
[super viewDidLoad];
self.view.backgroundColor = [UIColor systemTealColor];
UIImage *img1 = [self getImageFromGrayScaleArray:NO];
UIImage *img2 = [self getImageFromGrayScaleArray:YES];
UIImageView *v1 = [UIImageView new];
UIImageView *v2 = [UIImageView new];
v1.image = img1;
v1.backgroundColor = [UIColor systemYellowColor];
v2.image = img2;
v2.backgroundColor = [UIColor systemYellowColor];
v1.contentMode = UIViewContentModeScaleToFill;
v2.contentMode = UIViewContentModeScaleToFill;
v1.translatesAutoresizingMaskIntoConstraints = NO;
[self.view addSubview:v1];
v2.translatesAutoresizingMaskIntoConstraints = NO;
[self.view addSubview:v2];
UILayoutGuide *g = [self.view safeAreaLayoutGuide];
[NSLayoutConstraint activateConstraints:#[
[v1.topAnchor constraintEqualToAnchor:g.topAnchor constant:40.0],
[v1.centerXAnchor constraintEqualToAnchor:g.centerXAnchor],
[v1.widthAnchor constraintEqualToConstant:200.0],
[v1.heightAnchor constraintEqualToAnchor:v1.widthAnchor],
[v2.topAnchor constraintEqualToAnchor:v1.bottomAnchor constant:40.0],
[v2.centerXAnchor constraintEqualToAnchor:self.view.centerXAnchor],
[v2.widthAnchor constraintEqualToAnchor:v1.widthAnchor],
[v2.heightAnchor constraintEqualToAnchor:v2.widthAnchor],
]];
}
#end
We add two 200x200 image views, and set the top .image to the returned UIImage using:
int8_t grayArray[] = {
255, 122,
122, 0,
};
and the bottom image using:
int8_t blackArray[] = {
0, 0,
0, 0,
};
Output:
I got camera preview layer where the camera preset is 1280x720.
above the preview layer i added a square UIView with border.
My goal is getting cropped Image from camera.
Method to extract data from camera
-(CGImageRef)createImageFromBuffer:(CVImageBufferRef)buffer
left:(size_t)left
top:(size_t)top
width:(size_t)width
height:(size_t)height CF_RETURNS_RETAINED {
size_t bytesPerRow = CVPixelBufferGetBytesPerRow(buffer);
size_t dataWidth = CVPixelBufferGetWidth(buffer);
size_t dataHeight = CVPixelBufferGetHeight(buffer);
if (left + width > dataWidth ||
top + height > dataHeight) {
[NSException raise:NSInvalidArgumentException format:#"Crop rectangle does not fit within image data."];
}
size_t newBytesPerRow = ((width*4+0xf)>>4)<<4;
CVPixelBufferLockBaseAddress(buffer,0);
int8_t *baseAddress = (int8_t *)CVPixelBufferGetBaseAddress(buffer);
size_t size = newBytesPerRow*height;
int8_t *bytes = (int8_t *)malloc(size * sizeof(int8_t));
if (newBytesPerRow == bytesPerRow) {
memcpy(bytes, baseAddress+top*bytesPerRow, size * sizeof(int8_t));
} else {
for (int y=0; y<height; y++) {
memcpy(bytes+y*newBytesPerRow,
baseAddress+left*4+(top+y)*bytesPerRow,
newBytesPerRow * sizeof(int8_t));
}
}
CVPixelBufferUnlockBaseAddress(buffer, 0);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef newContext = CGBitmapContextCreate(bytes,
width,
height,
8,
newBytesPerRow,
colorSpace,
kCGBitmapByteOrder32Little|
kCGImageAlphaNoneSkipFirst);
CGColorSpaceRelease(colorSpace);
CGImageRef result = CGBitmapContextCreateImage(newContext);
CGContextRelease(newContext);
free(bytes);
return result;
}
code to rotate image
- (CGImageRef)createRotatedImage:(CGImageRef)original degrees:(float)degrees CF_RETURNS_RETAINED {
if (degrees == 0.0f) {
CGImageRetain(original);
return original;
} else {
double radians = degrees * M_PI / 180;
#if TARGET_OS_EMBEDDED || TARGET_IPHONE_SIMULATOR
radians = -1 * radians;
#endif
size_t _width = CGImageGetWidth(original);
size_t _height = CGImageGetHeight(original);
CGRect imgRect = CGRectMake(0, 0, _width, _height);
CGAffineTransform __transform = CGAffineTransformMakeRotation(radians);
CGRect rotatedRect = CGRectApplyAffineTransform(imgRect, __transform);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(NULL,
rotatedRect.size.width,
rotatedRect.size.height,
CGImageGetBitsPerComponent(original),
0,
colorSpace,
kCGBitmapAlphaInfoMask & kCGImageAlphaPremultipliedFirst);
CGContextSetAllowsAntialiasing(context, FALSE);
CGContextSetInterpolationQuality(context, kCGInterpolationNone);
CGColorSpaceRelease(colorSpace);
CGContextTranslateCTM(context,
+(rotatedRect.size.width/2),
+(rotatedRect.size.height/2));
CGContextRotateCTM(context, radians);
CGContextDrawImage(context, CGRectMake(-imgRect.size.width/2,
-imgRect.size.height/2,
imgRect.size.width,
imgRect.size.height),
original);
CGImageRef rotatedImage = CGBitmapContextCreateImage(context);
CFRelease(context);
return rotatedImage;
}
}
Extracting the data:
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
if(self.lastDecodeTime && [self.lastDecodeTime timeIntervalSinceNow]>-DECODE_LIMIT_TIME){
return;
}
if ( self.scannerDisabled)
return;
self.lastDecodeTime=[NSDate date];
CVImageBufferRef videoFrame = CMSampleBufferGetImageBuffer(sampleBuffer);
CGFloat cameraFrameWidth = CVPixelBufferGetWidth(videoFrame);
CGFloat cameraFrameHeight = CVPixelBufferGetHeight(videoFrame);
CGPoint rectPoint = self.rectangleView.frame.origin;
rectPoint = [self.previewLayer convertPoint:rectPoint fromLayer:self.view.layer];
CGPoint cameraPoint = [self.previewLayer captureDevicePointOfInterestForPoint:rectPoint];
CGPoint matrixPoint = CGPointMake(cameraPoint.x*cameraFrameWidth,cameraPoint.x*cameraFrameHeight);
CGFloat D = self.rectangleView.frame.size.width*2.0;
CGRect matrixRect = CGRectMake(matrixPoint.x, matrixPoint.y, D, D);
CGImageRef videoFrameImage = [self createImageFromBuffer:videoFrame left:matrixRect.origin.x top:matrixRect.origin.y width:matrixRect.size.width height:matrixRect.size.height];
CGImageRef rotatedImage = [self createRotatedImage:videoFrameImage degrees:self.rotationDeg];
CGImageRelease(videoFrameImage);
...
...
...
}
for debugging i added a small image view at top left to see the cropped result..
you can see i'm in the right direction BUT there is some kind of offset.
i'm assuming since the camera buffer is 1280x720 and the iphone screen got different aspect so there is some kind of crop which may be the offset i'm dealing with..
attached the screenshot, you can see the crop image isn't centred
p.s here is the output settings
AVCaptureVideoDataOutput *output = [AVCaptureVideoDataOutput new];
NSDictionary *rgbOutputSettings = [NSDictionary dictionaryWithObject:
[NSNumber numberWithInt:kCMPixelFormat_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey];
[output setVideoSettings:rgbOutputSettings];
Any ideas ?
Just try this to get cropped image from whole image
[self.view resizableSnapshotViewFromRect:requiredRectToCrop afterScreenUpdates:YES withCapInsets:UIEdgeInsetsZero];
I'm using VLCKit to play video in my app, and I need to be able to take a screenshot of the video at certain points. This is the code I'm using:
-(NSData*)generateThumbnail{
int s = 1;
UIScreen* screen = [UIScreen mainScreen];
if ([screen respondsToSelector:#selector(scale)]) {
s = (int) [screen scale];
}
GLint viewport[4];
glGetIntegerv(GL_VIEWPORT, viewport);
int width = viewport[2];
int height = [_profile.resolution integerValue];//viewport[3];
int myDataLength = width * height * 4;
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
for(int y1 = 0; y1 < height; y1++) {
for(int x1 = 0; x1 <width * 4; x1++) {
buffer2[(height - 1 - y1) * width * 4 + x1] = buffer[y1 * 4 * width + x1];
}
}
free(buffer);
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
CGColorSpaceRelease(colorSpaceRef);
CGDataProviderRelease(provider);
UIImage *image = [ UIImage imageWithCGImage:imageRef scale:s orientation:UIImageOrientationUp ];
NSData *thumbAsData = UIImageJPEGRepresentation(image, 5);
return thumbAsData;
}
To be honest, I have no idea how most of this works. I copied it from somewhere a while ago (I don't remember the source). It mostly works, however frequently it seems parts of the image are missing.
Can someone point me in the right direction? Most of the other posts I see regarding OpenGL screenshots are fairly old, and don't seem to apply.
Thanks.
I wrote a class to work around this problem.
Basically you take directly a screenshot from the screen, and then if you want you can take just a part of the image and also scale it.
Taking a screenshot from the screen, you take every stuff. UIKit , OpenGL, AVFoundation, etc.
Here the class: https://github.com/matteogobbi/MGScreenshotHelper/
Below useful functions, but i suggest you to download (and star :D) directly my helper class ;)
/* Get the screenshot of the screen (useful when you have UIKit elements and OpenGL or AVFoundation stuff */
+ (UIImage *)screenshotFromScreen
{
CGImageRef UIGetScreenImage(void);
CGImageRef screen = UIGetScreenImage();
UIImage* screenImage = [UIImage imageWithCGImage:screen];
CGImageRelease(screen);
return screenImage;
}
/* Get the screenshot of a determinate rect of the screen, and scale it to the size that you want. */
+ (UIImage *)getScreenshotFromScreenWithRect:(CGRect)captureRect andScaleToSize:(CGSize)newSize
{
UIImage *image = [[self class] screenshotFromScreen];
image = [[self class] cropImage:image withRect:captureRect];
image = [[self class] scaleImage:image toSize:newSize];
return image;
}
#pragma mark - Other methods
/* Get a portion of an image */
+ (UIImage *)cropImage:(UIImage *)image withRect:(CGRect)rect
{
CGImageRef imageRef = CGImageCreateWithImageInRect([image CGImage], rect);
UIImage *cropedImage = [UIImage imageWithCGImage:imageRef];
return cropedImage;
}
/* Scale an image */
+ (UIImage *)scaleImage:(UIImage *)image toSize:(CGSize)newSize
{
UIGraphicsBeginImageContextWithOptions(newSize, YES, 0.0);
[image drawInRect:CGRectMake(0, 0, newSize.width, newSize.height)];
UIImage *scaledImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return scaledImage;
}
Ok turns out this is a problem only in the Simulator. On a device it seems to work 98% of the time.
I'm using vuforia for Augmented Reality applciation. When i detect image i can able display or render 3D object and UIImageView then i can take the screenshot of 3D object but i can't save the normal image. I'm just displaying normal UIImageview holding of images. Do i need to render 2D image instead of normal UIImageView?
Render 3D:
- (void)renderFrameQCAR
{
[self setFramebuffer];
// Clear colour and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Render video background and retrieve tracking state
QCAR::State state = QCAR::Renderer::getInstance().begin();
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
// We must detect if background reflection is active and adjust the culling direction.
// If the reflection is active, this means the pose matrix has been reflected as well,
// therefore standard counter clockwise face culling will result in "inside out" models.
if (offTargetTrackingEnabled) {
glDisable(GL_CULL_FACE);
} else {
glEnable(GL_CULL_FACE);
}
glCullFace(GL_BACK);
if(QCAR::Renderer::getInstance().getVideoBackgroundConfig().mReflection == QCAR::VIDEO_BACKGROUND_REFLECTION_ON)
glFrontFace(GL_CW); //Front camera
else
glFrontFace(GL_CCW); //Back camera
for (int i = 0; i < state.getNumTrackableResults(); ++i) {
// Get the trackable
// _numResults = state.getNumTrackableResults();
[self performSelectorOnMainThread:#selector(DisplayPhotoButton
) withObject:nil waitUntilDone:YES];
const QCAR::TrackableResult* result = state.getTrackableResult(i);
const QCAR::Trackable& trackable = result->getTrackable();
//const QCAR::Trackable& trackable = result->getTrackable();
QCAR::Matrix44F modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(result->getPose());
// OpenGL 2
QCAR::Matrix44F modelViewProjection;
if (offTargetTrackingEnabled) {
SampleApplicationUtils::rotatePoseMatrix(90, 1, 0, 0,&modelViewMatrix.data[0]);
SampleApplicationUtils::scalePoseMatrix(kObjectScaleOffTargetTracking, kObjectScaleOffTargetTracking, kObjectScaleOffTargetTracking, &modelViewMatrix.data[0]);
} else {
SampleApplicationUtils::translatePoseMatrix(0.0f, 0.0f, kObjectScaleNormal, &modelViewMatrix.data[0]);
SampleApplicationUtils::scalePoseMatrix(kObjectScaleNormal, kObjectScaleNormal, kObjectScaleNormal, &modelViewMatrix.data[0]);
}
SampleApplicationUtils::multiplyMatrix(&vapp.projectionMatrix.data[0], &modelViewMatrix.data[0], &modelViewProjection.data[0]);
glUseProgram(shaderProgramID);
if (offTargetTrackingEnabled) {
glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)buildingModel.vertices);
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)buildingModel.normals);
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)buildingModel.texCoords);
} else {
glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)teapotVertices);
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)teapotNormals);
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)teapotTexCoords);
}
glEnableVertexAttribArray(vertexHandle);
glEnableVertexAttribArray(normalHandle);
glEnableVertexAttribArray(textureCoordHandle);
// Choose the texture based on the target name
int targetIndex = 0; // "stones"
if (!strcmp(trackable.getName(), "chips"))
targetIndex = 1;
else if (!strcmp(trackable.getName(), "tarmac"))
targetIndex = 2;
glActiveTexture(GL_TEXTURE0);
if (offTargetTrackingEnabled) {
glBindTexture(GL_TEXTURE_2D, augmentationTexture[3].textureID);
} else {
glBindTexture(GL_TEXTURE_2D, augmentationTexture[targetIndex].textureID);
}
glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE, (const GLfloat*)&modelViewProjection.data[0]);
glUniform1i(texSampler2DHandle, 0 /*GL_TEXTURE0*/);
if (offTargetTrackingEnabled) {
glDrawArrays(GL_TRIANGLES, 0, buildingModel.numVertices);
} else {
glDrawElements(GL_TRIANGLES, NUM_TEAPOT_OBJECT_INDEX, GL_UNSIGNED_SHORT, (const GLvoid*)teapotIndices);
}
SampleApplicationUtils::checkGlError("EAGLView renderFrameQCAR");
}
glDisable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
glDisableVertexAttribArray(vertexHandle);
glDisableVertexAttribArray(normalHandle);
glDisableVertexAttribArray(textureCoordHandle);
QCAR::Renderer::getInstance().end();
[self presentFramebuffer];
}
Display UIImageView when cameraview open:
- (id)initWithFrame:(CGRect)frame appSession:(SampleApplicationSession *) app
{
self = [super initWithFrame:frame];
if (self) {
vapp = app;
// takePhotoFlag = NO;
// [self DisplayPhotoButton];
// Enable retina mode if available on this device
if (YES == [vapp isRetinaDisplay]) {
[self setContentScaleFactor:2.0f];
}
// Load the augmentation textures
for (int i = 0; i < NUM_AUGMENTATION_TEXTURES; ++i) {
augmentationTexture[i] = [[Texture alloc] initWithImageFile:[NSString stringWithCString:textureFilenames[i] encoding:NSASCIIStringEncoding]];
}
// Create the OpenGL ES context
context = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
// The EAGLContext must be set for each thread that wishes to use it.
// Set it the first time this method is called (on the main thread)
if (context != [EAGLContext currentContext]) {
[EAGLContext setCurrentContext:context];
}
// Generate the OpenGL ES texture and upload the texture data for use
// when rendering the augmentation
for (int i = 0; i < NUM_AUGMENTATION_TEXTURES; ++i) {
GLuint textureID;
glGenTextures(1, &textureID);
[augmentationTexture[i] setTextureID:textureID];
glBindTexture(GL_TEXTURE_2D, textureID);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, [augmentationTexture[i] width], [augmentationTexture[i] height], 0, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid*)[augmentationTexture[i] pngData]);
}
offTargetTrackingEnabled = NO;
[self loadBuildingsModel];
[self initShaders];
_takePhotoFlag = NO;
[self DisplayPhotoButton];
}
return self;
}
- (void)DisplayPhotoButton
{
UIImage *closeButtonImage = [UIImage imageNamed:#"back.png"];
// UIImage *closeButtonTappedImage = [UIImage imageNamed:#"button_close_pressed.png"];
CGRect aRect = CGRectMake(20,20,
closeButtonImage.size.width,
closeButtonImage.size.height);
photo = [UIButton buttonWithType:UIButtonTypeCustom];
photo.frame = aRect;
photo.userInteractionEnabled=YES;
[photo setImage:closeButtonImage forState:UIControlStateNormal];
// photo.autoresizingMask = UIViewAutoresizingFlexibleLeftMargin;
[photo addTarget:self action:#selector(takePhoto) forControlEvents:UIControlEventTouchUpInside];
[self addSubview:photo];
UIButton *arrowButton = [UIButton buttonWithType:UIButtonTypeCustom];
[arrowButton setImage:[UIImage imageNamed:#"back_btn.png"] forState:UIControlStateNormal];
// [overlayButton setFrame:CGRectMake(80, 420, 60, 30)];
[arrowButton setFrame:CGRectMake(100, 10, 40, 40)];
[arrowButton addTarget:self action:#selector(showActionSheet:forEvent:) forControlEvents:UIControlEventTouchUpInside];
[self addSubview:arrowButton];
}
OpenGL screenshot:
- (UIImage*) glToUIImage
{
UIImage *outputImage = nil;
CGFloat scale = [[UIScreen mainScreen] scale];
CGRect s = CGRectMake(0, 0, 320.0f * scale, 480.0f * scale);
uint8_t *buffer = (uint8_t *) malloc(s.size.width * s.size.height * 4);
glReadPixels(0, 0, s.size.width, s.size.height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
UIImage *imageFromRawData(uint8_t *data, int width, int height) {
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL,data,width*height*4,NULL);
CGImageRef imageRef = CGImageCreate(width, height, 8, 32, width*4, CGColorSpaceCreateDeviceRGB(), kCGImageAlphaLast, provider, NULL, NO, kCGRenderingIntentDefault);
UIImage *newImage = [UIImage imageWithCGImage:imageRef];
return newImage;
}
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, buffer, s.size.width * s.size.height * 4, NULL);
CGImageRef iref = CGImageCreate(s.size.width, s.size.height, 8, 32, s.size.width * 4, CGColorSpaceCreateDeviceRGB(), kCGBitmapByteOrderDefault, ref, NULL, true, kCGRenderingIntentDefault);
size_t width = CGImageGetWidth(iref);
size_t height = CGImageGetHeight(iref);
size_t length = width * height * 4;
uint32_t *pixels = (uint32_t *)malloc(length);
CGContextRef context1 = CGBitmapContextCreate(pixels, width, height, 8, width * 4,
CGImageGetColorSpace(iref), kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Big);
CGAffineTransform transform = CGAffineTransformIdentity;
transform = CGAffineTransformMakeTranslation(0.0f, height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
CGContextConcatCTM(context1, transform);
CGContextDrawImage(context1, CGRectMake(0.0f, 0.0f, width, height), iref);
CGImageRef outputRef = CGBitmapContextCreateImage(context1);
outputImage = [UIImage imageWithCGImage: outputRef];
CGDataProviderRelease(ref);
CGImageRelease(iref);
CGContextRelease(context1);
CGImageRelease(outputRef);
free(pixels);
free(buffer);
UIImageWriteToSavedPhotosAlbum(outputImage, nil, nil, nil);
return outputImage;
}
Save from present frame buffer:
- (BOOL)presentFramebuffer
{
if (_takePhotoFlag)
{
[self glToUIImage];
_takePhotoFlag = NO;
}
// setFramebuffer must have been called before presentFramebuffer, therefore
// we know the context is valid and has been set for this (render) thread
// Bind the colour render buffer and present it
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
return [context presentRenderbuffer:GL_RENDERBUFFER];
}
You can either have an openGL screenshot or UI screenshot. To combine the 2 I suggest you to do both images. This will sound silly but it is probably the fastest and most powerful way to do this:
Take a screenshot from openGL (as you already do)
Create an image view from that screenshot
Insert that image view on your main view
Take an UI screenshot* of the main view
Remove the image view
*By UI screenshot I mean something like this:
+ (UIImage *)imageFromView:(UIView *)view {
UIGraphicsBeginImageContextWithOptions(view.bounds.size, view.opaque, .0);
[view.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage * img = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return img;
}
If you will have an issue such as seeing black background instead of the background image there is probably an issue generating the CGImage somewhere in the pipeline where it is skipping or premultiplying the alpha channel. (a very common mistake)
EDIT: Getting image from read pixels RGBA data:
This is what I used for getting an UIImage from raw RGBA data. Note this procedure will not handle any orientation but you can modify it a bit take the orientation as an argument as well and then use imageWithCGImage:scale:orientation:
UIImage *imageFromRawData(uint8_t *data, int width, int height) {
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL,data,width*height*4,NULL);
CGImageRef imageRef = CGImageCreate(width, height, 8, 32, width*4, CGColorSpaceCreateDeviceRGB(), kCGImageAlphaLast, provider, NULL, NO, kCGRenderingIntentDefault);
UIImage *newImage = [UIImage imageWithCGImage:imageRef];
return newImage;
}
I'm using AVCaptureSession for live cameraview feed then i render some images on the camera overlay view. I didn't use any EAGLView, just overlaying some images using AVCaptureSession with previewlayer. I want to take screenshot live camera feed image and overlay image. So, i searched some links finally got with glReadPixel(), but when i implement this code it returns black image. Just added OpenGLES.framework library and imported.
- (void)viewDidLoad
{
[super viewDidLoad];
[self setCaptureSession:[[AVCaptureSession alloc] init]];
[self addVideoInputFrontCamera:NO]; // set to YES for Front Camera, No for Back camera
[self addStillImageOutput];
[self setPreviewLayer:[[AVCaptureVideoPreviewLayer alloc] initWithSession:[self captureSession]] ];
[[self previewLayer] setVideoGravity:AVLayerVideoGravityResizeAspectFill];
CGRect layerRect = [[[self view] layer] bounds];
[[self previewLayer]setBounds:layerRect];
[[self previewLayer] setPosition:CGPointMake(CGRectGetMidX(layerRect),CGRectGetMidY(layerRect))];
[[[self view] layer] addSublayer:[self previewLayer]];
[[NSNotificationCenter defaultCenter] addObserver:self selector:#selector(saveImageToPhotoAlbum) name:kImageCapturedSuccessfully object:nil];
[[self captureSession] startRunning];
UIImageView *dot =[[UIImageView alloc] initWithFrame:CGRectMake(50,50,200,200)];
dot.image=[UIImage imageNamed:#"draw.png"];
[self.view addSubview:dot];
}
capturing the live camera feed with overlay content using glReadPixel():
- (UIImage*) glToUIImage
{
CGFloat scale = [[UIScreen mainScreen] scale];
// CGRect s = CGRectMake(0, 0, 320.0f * scale, 480.0f * scale);
CGRect s = CGRectMake(0, 0, 768.0f * scale, 1024.0f * scale);
uint8_t *buffer = (uint8_t *) malloc(s.size.width * s.size.height * 4);
glReadPixels(0, 0, s.size.width, s.size.height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
CGDataProviderRef ref = CGDataProviderCreateWithData(NULL, buffer, s.size.width * s.size.height * 4, NULL);
CGImageRef iref = CGImageCreate(s.size.width, s.size.height, 8, 32, s.size.width * 4, CGColorSpaceCreateDeviceRGB(), kCGBitmapByteOrderDefault, ref, NULL, true, kCGRenderingIntentDefault);
size_t width = CGImageGetWidth(iref);
size_t height = CGImageGetHeight(iref);
size_t length = width * height * 4;
uint32_t *pixels = (uint32_t *)malloc(length);
CGContextRef context1 = CGBitmapContextCreate(pixels, width, height, 8, width * 4,
CGImageGetColorSpace(iref), kCGImageAlphaNoneSkipFirst | kCGBitmapByteOrder32Big);
CGAffineTransform transform = CGAffineTransformIdentity;
transform = CGAffineTransformMakeTranslation(0.0f, height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
CGContextConcatCTM(context1, transform);
CGContextDrawImage(context1, CGRectMake(0.0f, 0.0f, width, height), iref);
CGImageRef outputRef = CGBitmapContextCreateImage(context1);
outputImage = [UIImage imageWithCGImage: outputRef];
CGDataProviderRelease(ref);
CGImageRelease(iref);
CGContextRelease(context1);
CGImageRelease(outputRef);
free(pixels);
free(buffer);
UIImageWriteToSavedPhotosAlbum(outputImage, nil, nil, nil);
NSLog(#"Screenshot size: %d, %d", (int)[outputImage size].width, (int)[outputImage size].height);
return outputImage;
}
-(void)screenshot:(id)sender{
[self glToUIImage];
}
But it returns black image.
glReadPixels() won't work with an AV Foundation preview layer. There's no OpenGL ES context to capture pixels from, and even if there was, you'd need to capture from it before the scene was presented to the display.
If what you're trying to do is to capture an image overlaid on live video from the camera, my GPUImage framework could handle that for you. All you'd need to do would be to set up a GPUImageVideoCamera, a GPUImagePicture instance for what you needed to overlay, and a blend filter of some sort. You would then feed the output to a GPUImageView for display, and be able to capture still images from the blend filter at any point. The framework handles the heavy lifting for you with all of this.