Extract subimage of CVImageBufferRef - ios

I simply want to extract a small area of a YUV 420 image. That is, create a CVImageBufferRef from a CVImageBufferRef which only contains a rectangular part of the original image.
Here is what I tried so far:
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection
{
// callback from AVCaptureOutput
//
CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef);
if (imageBufferRef)
{
// Take a subset of buffer to create a smaller image
CVPixelBufferLockBaseAddress(imageBufferRef, 0);
size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0);
size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1);
size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0);
size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1);
size_t cropHeightY = 320;
size_t cropWidthY = 320;
size_t cropHeightUV = cropHeightY / 2;
size_t cropWidthUV = cropWidthY;
size_t cropY_X0 = widthY / 2 - (cropWidthY / 2);
size_t cropY_Y0 = heightY / 2 - (cropHeightY / 2);
size_t cropUV_X0 = widthUV / 2 - (cropWidthUV / 2);
size_t cropUV_Y0 = heightUV / 2 - (cropHeightUV / 2);
void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0);
void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1);
size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0);
size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1);
size_t pixelBytesY = bytesPerRowY / widthY;
size_t pixelBytesUV = bytesPerRowUV / widthUV;
void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY);
void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV);
size_t bytesPerRowOut = cropWidthY * pixelBytesY;
size_t sizeY = bytesPerRowOut * cropHeightY;
size_t sizeUV = bytesPerRowOut * cropHeightUV;
unsigned char * pixelY = (unsigned char *)malloc(sizeY);
unsigned char * pixelUV = (unsigned char *)malloc(sizeUV);
for (int i = 0; i < cropHeightY; ++i) {
memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut);
}
for (int i = 0; i < cropHeightUV; ++i) {
memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut);
}
void *baseAddresses[2] = {pixelY, pixelUV};
size_t planeWidths[2] = {cropWidthY, cropWidthUV};
size_t planeHeights[2] = {cropHeightY, cropHeightUV};
size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut};
// create a new CVImageBufferRef from pixelY and pixelUV
CVPixelBufferRef outBuff;
CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outBuff);
if(logCameraSettings) {
NSLog(#"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY);
size_t outWidthY = CVPixelBufferGetWidthOfPlane(outBuff, 0);
size_t outHeightY = CVPixelBufferGetHeightOfPlane(outBuff, 0);
NSLog(#"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY);
}
// Here would be the place where I actually want to do something with the image
// TEST: show image (in debugger in following method)
[self convertToUIImage:imageBufferRef]; // --> works
[self convertToUIImage:outBuff]; // --> only gray, does not work
// Release the allocated memory
CVPixelBufferUnlockBaseAddress(imageBufferRef,0);
free(pixelY);
free(pixelUV);
}
}
-(void) convertToUIImage:(CVImageBufferRef)imageBuffer
{
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage = [temporaryContext
createCGImage:ciImage
fromRect:CGRectMake(0, 0,
CVPixelBufferGetWidth(imageBuffer),
CVPixelBufferGetHeight(imageBuffer))];
// Inspect the following UIImage in debugger.
UIImage *image = [[UIImage alloc] initWithCGImage:videoImage];
CGImageRelease(videoImage);
}
In the above code I created a small function convertToUIImage which has no purpose except to let me inspect the CVImageBufferRef I created as a UIImage in the debugger.
Inspecting imageBufferRef shows me the correct camera feed.
Inspecting outBuff does however not show me a small area of that camera feed but an all gray patch of the correct size.
So my question is:
What am I doing wrong here?
Is this even the correct way to go to achieve my goal?
Any help is really appreciated. Thank you in advance.

Here is how I solved it
- (void) captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBufferRef fromConnection:(AVCaptureConnection *)connection
{
// callback from AVCaptureOutput
//
CVImageBufferRef imageBufferRef = CMSampleBufferGetImageBuffer(sampleBufferRef);
if (imageBufferRef)
{
// Take a subset of buffer to create a smaller image
CVPixelBufferLockBaseAddress(imageBufferRef, 0);
size_t widthY = CVPixelBufferGetWidthOfPlane(imageBufferRef, 0);
size_t widthUV = CVPixelBufferGetWidthOfPlane(imageBufferRef, 1);
size_t heightY = CVPixelBufferGetHeightOfPlane(imageBufferRef, 0);
size_t heightUV = CVPixelBufferGetHeightOfPlane(imageBufferRef, 1);
size_t cropHeightY = 500;
size_t cropWidthY = 500;
size_t cropHeightUV = cropHeightY / 2;
size_t cropWidthUV = cropWidthY;
size_t cropY_X0 = widthY / 2 - (cropWidthY / 2);
size_t cropY_Y0 = heightY / 2 - (cropHeightY / 2);
size_t cropUV_X0 = widthUV / 2 - (cropWidthUV / 2);
size_t cropUV_Y0 = heightUV / 2 - (cropHeightUV / 2);
void *baseAddressY = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 0);
void *baseAddressUV = CVPixelBufferGetBaseAddressOfPlane(imageBufferRef, 1);
size_t bytesPerRowY = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 0);
size_t bytesPerRowUV = CVPixelBufferGetBytesPerRowOfPlane(imageBufferRef, 1);
size_t pixelBytesY = bytesPerRowY / widthY;
size_t pixelBytesUV = bytesPerRowUV / widthUV;
void *startPosY = baseAddressY + (cropY_Y0 * bytesPerRowY + cropY_X0 * pixelBytesY);
void *startPosUV = baseAddressUV + (cropUV_Y0 * bytesPerRowUV + cropUV_X0 * pixelBytesUV);
size_t bytesPerRowOut = cropWidthY * pixelBytesY;
size_t sizeY = bytesPerRowOut * cropHeightY;
size_t sizeUV = bytesPerRowOut * cropHeightUV;
unsigned char * pixelY = (unsigned char *)malloc(sizeY);
unsigned char * pixelUV = (unsigned char *)malloc(sizeUV);
for (int i = 0; i < cropHeightY; ++i) {
memcpy(pixelY + i * bytesPerRowOut, startPosY + i * bytesPerRowY, bytesPerRowOut);
}
for (int i = 0; i < cropHeightUV; ++i) {
memcpy(pixelUV + i * bytesPerRowOut, startPosUV + i * bytesPerRowUV, bytesPerRowOut);
}
void *baseAddresses[2] = {pixelY, pixelUV};
size_t planeWidths[2] = {cropWidthY, cropWidthUV};
size_t planeHeights[2] = {cropHeightY, cropHeightUV};
size_t planeBytesPerRow[2] = {bytesPerRowOut, bytesPerRowOut};
// Transform input to UIImage
UIImage *inputAsUIImage = [self convertToUIImage:imageBufferRef];
// Extract subimage of UIImage
CGRect fromRect = CGRectMake(cropY_X0, cropY_Y0, cropWidthY, cropHeightY); // or whatever rectangle
CGImageRef drawImage = CGImageCreateWithImageInRect(inputAsUIImage.CGImage, fromRect);
UIImage *newImage = [UIImage imageWithCGImage:drawImage];
CGImageRelease(drawImage);
// Convert UIImage back to CVImageBufferRef
// 1. Create a CIImage with the underlying CGImage encapsulated by the UIImage (referred to as 'image'):
CIImage *inputImage = [CIImage imageWithCGImage:newImage.CGImage];
// 2. Create a CIContext:
CIContext *ciContext = [CIContext contextWithCGContext:UIGraphicsGetCurrentContext() options:nil];
// 3. Render the CIImage to a CVPixelBuffer (referred to as 'outputBuffer'):
CVPixelBufferRef outputBuffer;
CVPixelBufferCreateWithPlanarBytes(NULL, cropWidthY, cropHeightY, '420v', NULL, 0, 2, baseAddresses, planeWidths, planeHeights, planeBytesPerRow, NULL, NULL, NULL, &outputBuffer);
[ciContext render:inputImage toCVPixelBuffer:outputBuffer];
if(logCameraSettings) {
NSLog(#"Original Image Size:\n width:%zu\n height:%zu\n", widthY, heightY);
size_t outWidthY = CVPixelBufferGetWidthOfPlane(outputBuffer, 0);
size_t outHeightY = CVPixelBufferGetHeightOfPlane(outputBuffer, 0);
NSLog(#"Modified Image Size:\n width:%zu\n height:%zu\n", outWidthY, outHeightY);
}
// Do something with it here
// Release the allocated memory
CVPixelBufferUnlockBaseAddress(imageBufferRef,0);
free(pixelY);
free(pixelUV);
}
}
-(UIImage*) convertToUIImage:(CVImageBufferRef)imageBuffer
{
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage = [temporaryContext
createCGImage:ciImage
fromRect:CGRectMake(0, 0,
CVPixelBufferGetWidth(imageBuffer),
CVPixelBufferGetHeight(imageBuffer))];
UIImage *image = [[UIImage alloc] initWithCGImage:videoImage];
CGImageRelease(videoImage);
return image;
}

Related

How to handle CT(Computed Tomography) grayscale binary data to RGB UIimage

Here is a binary CT grayscale data down the file/date_byte1
The aim is to change the window wide window to change the CT image
The two bytes in the data are 1 pixels, and the width and height are 512 pixels
How to transform the pixel array RGB array into a bitmap
- (void)viewDidLoad {
[super viewDidLoad];
NSData *data = [[NSData alloc]initWithContentsOfFile:[[NSBundle mainBundle] pathForResource:#"data_byte1" ofType:#""]];
Byte *testByte = (Byte *)[data bytes];
self.imageView.image = [self imageFromGRAYBytes:testByte imageSize:CGSizeMake(512, 512)];
}
- (UIImage *)imageFromGRAYBytes:(unsigned char *)imageBytes imageSize:(CGSize)imageSize {
CGImageRef imageRef = [self imageRefFromGRAYBytes:imageBytes imageSize:imageSize];
UIImage *image = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
return image;
}
- (CGImageRef)imageRefFromGRAYBytes:(unsigned char *)imageBytes imageSize:(CGSize)imageSize {
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGContextRef context = CGBitmapContextCreate(imageBytes,
imageSize.width,
imageSize.height,
16,
imageSize.width * 2,
colorSpace,
kCGImageAlphaNone);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
return imageRef;
}
I'm going through the following methodGet the picture
Obtained original diagram
But I want to change the display of the picture through this method
- (int)getPresentValueV:(int)v AndWidth:(int)w AndHeight:(int)h{
int minv = h - roundf(w/2);
if (v < minv) {
return 0;
}
int maxv = h + roundf(w/2);
if (v > maxv) {
return 255;
}
int pv = roundf(255*(v - minv)/w);
if (pv < 0 ){
return 0;
}
if( pv > 255 ){
return 255;
}
return pv;
}
This method changes the output of PV use w h, which is less than 255, The default value of w 1600 . The default value of h -400.Next based on PV as an array of RGB (PV, PV, PV) to create UIimage.
for (unsigned i = 0; i < data.length/2; i++) {
NSData *intData = [data subdataWithRange:NSMakeRange(i*2, 2)];
[arrray addObject:#([self getPresentValueV: *(int*)([intData bytes]) AndWidth:1600 AndHeight:-400])];
}
So I want to choose the different w h by the getPresentValueV method above, and get the PV to generate the RGB (PV, PV, PV) array to generate a new picture.
- (UIImage *)imageWithGrayArray:(NSArray *)array {
const int width = 512;
const int height = 512;
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
UInt32 * pixels = (UInt32 *)malloc(height * width * sizeof(UInt32));
memset(pixels, 0, height * width * sizeof(UInt32));
for (int i = 0; i < array.count;i++ ) {
NSArray *subArray = array[i];
for ( int j = 0 ; j < subArray.count; j++) {
*(pixels +i *width + j) = makeRGBAColor([subArray[j] integerValue],0, 0, 255);
}
}
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pixels,
width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
UIImage *image = [UIImage imageWithCGImage:CGBitmapContextCreateImage(context)];
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
free(pixels);
return image;
}
I generated a two-dimensional array to generate a new picture, but it was not the correct result.

For IOS, how to use vImage to convert an ARGB image to Gray image

I am a freshman for IOS APP development, and I met a problem: " how to use vImage to convert an ARGB image to Gray image ". Actually, I have realized this conversion processing by other methods. However, I have found "vImage" could complete this work through "Accelerate Framework Reference".
I have tried this method, but there is no any conversion result. Here, I attach the code, can you help me?
-(UIImage *)rgbTograyByvImage
{
const size_t width = self.size.width * self.scale;
const size_t height = self.size.height * self.scale;
const size_t bytesPerRow = width * 4;
CGColorSpaceRef space = CGColorSpaceCreateDeviceRGB();
CGContextRef bmContext= CGBitmapContextCreate(NULL, width, height, 8, bytesPerRow, space, kCGBitmapByteOrderDefault | kCGImageAlphaPremultipliedFirst);
CGColorSpaceRelease(space);
if (!bmContext) {
return nil;
}
CGContextDrawImage(bmContext, (CGRect){.origin.x = 0.0f, .origin.y = 0.0f, .size.width = width, .size.height = height}, self.CGImage);
UInt32 *data = (UInt32 *)CGBitmapContextGetData(bmContext);
if (!data) {
CGContextRelease(bmContext);
return nil;
}
int Aphal = 0;
int Red = 1;
int Green = 2;
int Blue = 3;
UInt8 *aphalSpace = malloc(sizeof(UInt8) * width * height);
UInt8 *redSpace = malloc(sizeof(UInt8) * width * height);
UInt8 *greenSpace = malloc(sizeof(UInt8) * width * height);
UInt8 *blueSpace = malloc(sizeof(UInt8) * width * height);
for (int y=0; y<height; y++) {
for (int x=0; x<width; x++) {
UInt8 *argbPixel = (UInt8 *) (&data[y*width+x]);
aphalSpace[y*width+x] = argbPixel[Aphal];
redSpace[y*width+x] = argbPixel[Red];
greenSpace[y*width+x] = argbPixel[Green];
blueSpace[y*width+x] = argbPixel[Blue];
}
}
vImage_Buffer argbImageBuffer = {(UInt8*)data, height, width, bytesPerRow};
vImage_Buffer aImageBuffer = {aphalSpace, height, width, width};
vImage_Buffer rImageBuffer = {redSpace, height, width, width};
vImage_Buffer gImageBuffer = {greenSpace, height, width, width};
vImage_Buffer bImageBuffer = {blueSpace, height, width, width};
vImage_Error error;
error = vImageConvert_ARGB8888toPlanar8(&argbImageBuffer, &aImageBuffer, &rImageBuffer, &gImageBuffer, &bImageBuffer, kvImageNoFlags);
if (error != kvImageNoError) {
NSLog(#"%s, vImage error %zd", __PRETTY_FUNCTION__, error);
}
CGImageRef grayImage = CGBitmapContextCreateImage(bmContext);
UIImage *gray = [UIImage imageWithCGImage:grayImage];
CGContextRelease(bmContext);
CGImageRelease(grayImage);
free(aphalSpace);
free(redSpace);
free(greenSpace);
free(blueSpace);
return gray;
}
You want vImageMatrixMultiply_ARGB8888ToPlanar8, new for iOS 9.

'Received Memory Warning" while saving frames to images in iOS

Below code is to save each frame of a movie to Photo Album in iPad device... After about 60 frames saved into Image.. it gives me 'Received Memory Warning.." error and my app crashes eventually.. what goes wrong here? Am I missing something..? I use iOS 7...
-(UIImage *) getImageFromGLBuffer : GLuint frameBuffer : (CGCize) screenSize
{
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer);
int width = screenSize.width;
int height = screenSize.height;
NSInteger iDataLength = width * height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc( iDataLength );
glReadPixels(0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
glBindFramebuffer( GL_FRAMEBUFFER, 0);
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(iDataLength);
for(int y = 0; y <height; y++)
{
for(int x = 0; x < width * 4; x++)
{
buffer2[(int)((height - 1 - y) * width * 4 + x)] = buffer[(int)(y * 4 * width + x)];
}
}
// Release the first buffer
free((void*)buffer);
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, iDataLength, releaseBufferData);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
CGDataProviderRelease(provider);
CGColorSpaceRelease(colorSpaceRef);
// then make the UIImage from that
UIImage *image = [UIImage imageWithCGImage:imageRef]; //[[UIImage alloc] initWithCGImage:imageRef];
NSData* imageData = UIImagePNGRepresentation(image); // get png representation
UIImage* pngImage = [UIImage imageWithData:imageData];
CGImageRelease(imageRef);
return pngImage;
}
// callback for CGDataProviderCreateWithData
void releaseBufferData(void *info, const void *data, size_t dataSize)
{
NSLog(#"releaseBufferData\n");
// free the buffer
free((void*)data);
}
- (void)saveImageToPhotoAlbum : GLuint frameBuffer : (CGCize) screenSize
{
if( iqFrameBuffer != nil )
{
UIImage* image = [self getImageFromGLBuffer:frameBuffer : screenSize];
if( image != nil)
{
dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
UIImageWriteToSavedPhotosAlbum(image, self, #selector(image:didFinishSavingWithError:contextInfo:), nil);
});
}
else
{
NSLog(#"Couldn't save to Photo Album due to invalid image..");
}
}
else
{
NSLog(#"Frame buffer is invalid. Couldn't save to image..");
}
}
// callback for UIImageWriteToSavedPhotosAlbum
- (void)image:(UIImage *)image didFinishSavingWithError:(NSError *)error contextInfo:(void *)contextInfo
{
NSLog(#"Image has been saved to Photo Album successfully..\n");
// [image release]; // release image
image = nil;
}

IOS7 capturing screenshot using opengl returns black screen

Capturing screenshot using opengl works fine for iOS version before 7.
But while running app in ios7 it returns black screen.
Following is the piece of code which i am working with:
-(UIImage *) glToUIImage {
CGSize size = self.view.frame.size;
int image_height = (int)size.width;
int image_width = (int)size.height;
NSInteger myDataLength = image_width * image_height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, image_width, image_height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < image_height; y++)
{
for(int x = 0; x < image_width * 4; x++)
{
buffer2[(image_height - 1 - y) * image_width * 4 + x] = buffer[y * 4 * image_width + x];
}
}
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * image_width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(image_width, image_height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
return myImage;
}
// screenshot function, combined my opengl image with background image and
// saved into Photos.
-(UIImage*)screenshot
{
UIImage *image = [self glToUIImage];
CGRect pos = CGRectMake(0, 0, image.size.width, image.size.height);
UIGraphicsBeginImageContextWithOptions(image.size, NO, [[UIScreen mainScreen]scale]);
[image drawInRect:pos];
UIImage* final = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
final = [[UIImage alloc] initWithCGImage: final.CGImage
scale: 1.0
orientation: UIImageOrientationRight];
// final picture I saved into Photos.
UIImageWriteToSavedPhotosAlbum(final, self, #selector(image:didFinishSavingWithError:contextInfo:), nil);
return final;
}
Has anybody captured screenshot using opengl for ios7?
Have resolved the issue. Updated the drawableproperties function of eaglview class:
eaglLayer.drawableProperties = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithBool:FALSE],
kEAGLDrawablePropertyRetainedBacking,
kEAGLColorFormatRGBA8,
kEAGLDrawablePropertyColorFormat,
nil];
:) :)

Get pixels value from png file. which is right & why?

I need pixel value from png file. so I searched SO and got two methods as follows:
Method 1
- (void)GeneratePixelArray {
UIImage *cImage = [UIImage imageNamed:#"ball.png"];
int width = (int)cImage.size.width;
int height = (int)cImage.size.height;
unsigned char *cMap = (unsigned char *)malloc(width * height);
memset(cMap, 0, width * height);
CFDataRef imageData = CGDataProviderCopyData(CGImageGetDataProvider(cImage.CGImage));
const UInt32 *pixels = (const UInt32*)CFDataGetBytePtr(imageData);
//0xff 0x00 is guard for this demo
for (int j = 0; j < (width * height); j++)
{
printf("0x%x\n", (unsigned int)pixels[j]);
}
CFRelease(imageData);
}
Method 2
- (void *)GeneratePixelArray//: (UIImage *) image
{
UIImage *cImage = [[UIImage alloc] imageNamed:#"ball.png"];
//UIImage *cImage = [UIImage imageNamed:#"ball.png"];
int pixelsWidth = (int)cImage.size.width;
int pixelsHeight = (int)cImage.size.height;
CGRect rect = {{0,0}, {pixelsWidth, pixelsHeight}};
// Use RCG color space without Alpha, just RGB
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
if(colorSpace == NULL)
{
NSLog(#"Error allocating color space! \n");
return NULL;
}
unsigned int bitmapBytesPerRow = pixelsWidth*4;
unsigned int bitmapByteCount = bitmapBytesPerRow*pixelsHeight;
void * bitmapData = malloc(bitmapByteCount);
if (bitmapData == NULL) {
NSLog(#"Memory not allocated!\n");
CGColorSpaceRelease(colorSpace);
return NULL;
}
// create bitmap context ,8 bits per component
CGContextRef context = CGBitmapContextCreate(bitmapData,
pixelsWidth,
pixelsHeight,
8,//bits per component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedLast);
// Make sure release colorspace before returning
CGColorSpaceRelease(colorSpace);
if (context == NULL)
{
free(bitmapData);
NSLog(#"Context not be created!");
return NULL;
}
// Draw the image to the bitmap context
CGContextDrawImage(context, rect, cImage.CGImage);
// Now we can get a pointer to the image data associated with context
unsigned char *bitsData;
bitsData = CGBitmapContextGetData(context);
if (!bitsData)
{
NSLog(#"Failed");
return;
}
void *data = CGBitmapContextGetData(context);
unsigned char* bitmapData2 = malloc(bitmapByteCount);
if (bitmapData2 == NULL)
{
NSLog(#"Memory not be allocated!\n");
return NULL;
}
unsigned char* rcdata = (unsigned char *)data;
unsigned char* wcdata = bitmapData2;
// remove ARGB's fourth value, it is Alpha
for (int i = 0; i < bitmapByteCount / 4; ++i, rcdata += 4)
{
printf("%x\n", (unsigned int)*(unsigned int*)rcdata);
*(wcdata + 0) = *(rcdata + 0);
*(wcdata + 1) = *(rcdata + 1);
*(wcdata + 2) = *(rcdata + 2);
*(wcdata + 3) = *(rcdata + 3);
if (*(wcdata+3)<20) {
printf("alpha...\n");
}
// if ( (*(wcdata + 0)==255) &&(*(wcdata + 1)==0) && (*(wcdata + 2)==0) ) {
// printf("red\n");
// }
wcdata += 4;// skip alpha
}
CGContextRelease(context);
return bitsData;
}
I logged output using:
printf("%x\n", (unsigned int)*(unsigned int*)rcdata);
Method 1 and Method 2 logs differ! Why? I am confused.
Thanks a lot!

Resources