I am trying to convert an image into grayscale in the following way:
#define bytesPerPixel 4
#define bitsPerComponent 8
-(unsigned char*) getBytesForImage: (UIImage*)pImage
{
CGImageRef image = [pImage CGImage];
NSUInteger width = CGImageGetWidth(image);
NSUInteger height = CGImageGetHeight(image);
NSUInteger bytesPerRow = bytesPerPixel * width;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = malloc(height * width * 4);
CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
CGContextRelease(context);
return rawData;
}
-(UIImage*) processImage: (UIImage*)pImage
{
DebugLog(#"processing image");
unsigned char *rawData = [self getBytesForImage: pImage];
NSUInteger width = pImage.size.width;
NSUInteger height = pImage.size.height;
DebugLog(#"width: %d", width);
DebugLog(#"height: %d", height);
NSUInteger bytesPerRow = bytesPerPixel * width;
for (int xCoordinate = 0; xCoordinate < width; xCoordinate++)
{
for (int yCoordinate = 0; yCoordinate < height; yCoordinate++)
{
int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel;
//Getting original colors
float red = ( rawData[byteIndex] / 255.f );
float green = ( rawData[byteIndex + 1] / 255.f );
float blue = ( rawData[byteIndex + 2] / 255.f );
//Processing pixel data
float averageColor = (red + green + blue) / 3.0f;
red = averageColor;
green = averageColor;
blue = averageColor;
//Assigning new color components
rawData[byteIndex] = (unsigned char) red * 255;
rawData[byteIndex + 1] = (unsigned char) green * 255;
rawData[byteIndex + 2] = (unsigned char) blue * 255;
}
}
NSData* newPixelData = [NSData dataWithBytes: rawData length: height * width * 4];
UIImage* newImage = [UIImage imageWithData: newPixelData];
free(rawData);
DebugLog(#"image processed");
return newImage;
}
So when I want to convert an image I just call processImage:
imageToDisplay.image = [self processImage: image];
But imageToDisplay doesn't display. What may be the problem?
Thanks.
I needed a version that preserved the alpha channel, so I modified the code posted by Dutchie432:
#implementation UIImage (grayscale)
typedef enum {
ALPHA = 0,
BLUE = 1,
GREEN = 2,
RED = 3
} PIXELS;
- (UIImage *)convertToGrayscale {
CGSize size = [self size];
int width = size.width;
int height = size.height;
// the pixels will be painted to this array
uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t));
// clear the pixels so any transparency is preserved
memset(pixels, 0, width * height * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// create a context with RGBA pixels
CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace,
kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);
// paint the bitmap to our context which will fill in the pixels array
CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x];
// convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
uint32_t gray = 0.3 * rgbaPixel[RED] + 0.59 * rgbaPixel[GREEN] + 0.11 * rgbaPixel[BLUE];
// set the pixels to gray
rgbaPixel[RED] = gray;
rgbaPixel[GREEN] = gray;
rgbaPixel[BLUE] = gray;
}
}
// create a new CGImageRef from our context with the modified pixels
CGImageRef image = CGBitmapContextCreateImage(context);
// we're done with the context, color space, and pixels
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
free(pixels);
// make a new UIImage to return
UIImage *resultUIImage = [UIImage imageWithCGImage:image];
// we're done with image now too
CGImageRelease(image);
return resultUIImage;
}
#end
Here is a code using only UIKit and the luminosity blending mode. A bit of a hack, but it works well.
// Transform the image in grayscale.
- (UIImage*) grayishImage: (UIImage*) inputImage {
// Create a graphic context.
UIGraphicsBeginImageContextWithOptions(inputImage.size, YES, 1.0);
CGRect imageRect = CGRectMake(0, 0, inputImage.size.width, inputImage.size.height);
// Draw the image with the luminosity blend mode.
// On top of a white background, this will give a black and white image.
[inputImage drawInRect:imageRect blendMode:kCGBlendModeLuminosity alpha:1.0];
// Get the resulting image.
UIImage *filteredImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return filteredImage;
}
To keep the transparency, maybe you can just set the opaque mode parameter of the UIGraphicsBeginImageContextWithOptions to NO. Needs to be checked.
Based on Cam's code with the ability to deal with the scale for Retina displays.
- (UIImage *) toGrayscale
{
const int RED = 1;
const int GREEN = 2;
const int BLUE = 3;
// Create image rectangle with current image width/height
CGRect imageRect = CGRectMake(0, 0, self.size.width * self.scale, self.size.height * self.scale);
int width = imageRect.size.width;
int height = imageRect.size.height;
// the pixels will be painted to this array
uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t));
// clear the pixels so any transparency is preserved
memset(pixels, 0, width * height * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// create a context with RGBA pixels
CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace,
kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);
// paint the bitmap to our context which will fill in the pixels array
CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);
for(int y = 0; y < height; y++) {
for(int x = 0; x < width; x++) {
uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x];
// convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
uint8_t gray = (uint8_t) ((30 * rgbaPixel[RED] + 59 * rgbaPixel[GREEN] + 11 * rgbaPixel[BLUE]) / 100);
// set the pixels to gray
rgbaPixel[RED] = gray;
rgbaPixel[GREEN] = gray;
rgbaPixel[BLUE] = gray;
}
}
// create a new CGImageRef from our context with the modified pixels
CGImageRef image = CGBitmapContextCreateImage(context);
// we're done with the context, color space, and pixels
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
free(pixels);
// make a new UIImage to return
UIImage *resultUIImage = [UIImage imageWithCGImage:image
scale:self.scale
orientation:UIImageOrientationUp];
// we're done with image now too
CGImageRelease(image);
return resultUIImage;
}
I liked Mathieu Godart's answer, but it didn't seem to work properly for retina or alpha images. Here's an updated version that seems to work for both of those for me:
- (UIImage*)convertToGrayscale
{
UIGraphicsBeginImageContextWithOptions(self.size, NO, self.scale);
CGRect imageRect = CGRectMake(0.0f, 0.0f, self.size.width, self.size.height);
CGContextRef ctx = UIGraphicsGetCurrentContext();
// Draw a white background
CGContextSetRGBFillColor(ctx, 1.0f, 1.0f, 1.0f, 1.0f);
CGContextFillRect(ctx, imageRect);
// Draw the luminosity on top of the white background to get grayscale
[self drawInRect:imageRect blendMode:kCGBlendModeLuminosity alpha:1.0f];
// Apply the source image's alpha
[self drawInRect:imageRect blendMode:kCGBlendModeDestinationIn alpha:1.0f];
UIImage* grayscaleImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return grayscaleImage;
}
What exactly takes place when you use this function? Is the function returning an invalid image, or is the display not showing it correctly?
This is the method I use to convert to greyscale.
- (UIImage *) convertToGreyscale:(UIImage *)i {
int kRed = 1;
int kGreen = 2;
int kBlue = 4;
int colors = kGreen | kBlue | kRed;
int m_width = i.size.width;
int m_height = i.size.height;
uint32_t *rgbImage = (uint32_t *) malloc(m_width * m_height * sizeof(uint32_t));
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rgbImage, m_width, m_height, 8, m_width * 4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast);
CGContextSetInterpolationQuality(context, kCGInterpolationHigh);
CGContextSetShouldAntialias(context, NO);
CGContextDrawImage(context, CGRectMake(0, 0, m_width, m_height), [i CGImage]);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
// now convert to grayscale
uint8_t *m_imageData = (uint8_t *) malloc(m_width * m_height);
for(int y = 0; y < m_height; y++) {
for(int x = 0; x < m_width; x++) {
uint32_t rgbPixel=rgbImage[y*m_width+x];
uint32_t sum=0,count=0;
if (colors & kRed) {sum += (rgbPixel>>24)&255; count++;}
if (colors & kGreen) {sum += (rgbPixel>>16)&255; count++;}
if (colors & kBlue) {sum += (rgbPixel>>8)&255; count++;}
m_imageData[y*m_width+x]=sum/count;
}
}
free(rgbImage);
// convert from a gray scale image back into a UIImage
uint8_t *result = (uint8_t *) calloc(m_width * m_height *sizeof(uint32_t), 1);
// process the image back to rgb
for(int i = 0; i < m_height * m_width; i++) {
result[i*4]=0;
int val=m_imageData[i];
result[i*4+1]=val;
result[i*4+2]=val;
result[i*4+3]=val;
}
// create a UIImage
colorSpace = CGColorSpaceCreateDeviceRGB();
context = CGBitmapContextCreate(result, m_width, m_height, 8, m_width * sizeof(uint32_t), colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast);
CGImageRef image = CGBitmapContextCreateImage(context);
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
UIImage *resultUIImage = [UIImage imageWithCGImage:image];
CGImageRelease(image);
free(m_imageData);
// make sure the data will be released by giving it to an autoreleased NSData
[NSData dataWithBytesNoCopy:result length:m_width * m_height];
return resultUIImage;
}
Different approach with CIFilter. Preserves alpha channel and works with transparent background:
+ (UIImage *)convertImageToGrayScale:(UIImage *)image
{
CIImage *inputImage = [CIImage imageWithCGImage:image.CGImage];
CIContext *context = [CIContext contextWithOptions:nil];
CIFilter *filter = [CIFilter filterWithName:#"CIColorControls"];
[filter setValue:inputImage forKey:kCIInputImageKey];
[filter setValue:#(0.0) forKey:kCIInputSaturationKey];
CIImage *outputImage = filter.outputImage;
CGImageRef cgImageRef = [context createCGImage:outputImage fromRect:outputImage.extent];
UIImage *result = [UIImage imageWithCGImage:cgImageRef];
CGImageRelease(cgImageRef);
return result;
}
A swift extension to UIImage, preserving alpha:
extension UIImage {
private func convertToGrayScaleNoAlpha() -> CGImageRef {
let colorSpace = CGColorSpaceCreateDeviceGray();
let bitmapInfo = CGBitmapInfo(CGImageAlphaInfo.None.rawValue)
let context = CGBitmapContextCreate(nil, UInt(size.width), UInt(size.height), 8, 0, colorSpace, bitmapInfo)
CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.CGImage)
return CGBitmapContextCreateImage(context)
}
/**
Return a new image in shades of gray + alpha
*/
func convertToGrayScale() -> UIImage {
let bitmapInfo = CGBitmapInfo(CGImageAlphaInfo.Only.rawValue)
let context = CGBitmapContextCreate(nil, UInt(size.width), UInt(size.height), 8, 0, nil, bitmapInfo)
CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.CGImage);
let mask = CGBitmapContextCreateImage(context)
return UIImage(CGImage: CGImageCreateWithMask(convertToGrayScaleNoAlpha(), mask), scale: scale, orientation:imageOrientation)!
}
}
Here is another good solution as a category method on UIImage. It's based on this blog post and its comments. But I fixed a memory issue here:
- (UIImage *)grayScaleImage {
// Create image rectangle with current image width/height
CGRect imageRect = CGRectMake(0, 0, self.size.width * self.scale, self.size.height * self.scale);
// Grayscale color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
// Create bitmap content with current image size and grayscale colorspace
CGContextRef context = CGBitmapContextCreate(nil, self.size.width * self.scale, self.size.height * self.scale, 8, 0, colorSpace, kCGImageAlphaNone);
// Draw image into current context, with specified rectangle
// using previously defined context (with grayscale colorspace)
CGContextDrawImage(context, imageRect, [self CGImage]);
// Create bitmap image info from pixel data in current context
CGImageRef grayImage = CGBitmapContextCreateImage(context);
// release the colorspace and graphics context
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
// make a new alpha-only graphics context
context = CGBitmapContextCreate(nil, self.size.width * self.scale, self.size.height * self.scale, 8, 0, nil, kCGImageAlphaOnly);
// draw image into context with no colorspace
CGContextDrawImage(context, imageRect, [self CGImage]);
// create alpha bitmap mask from current context
CGImageRef mask = CGBitmapContextCreateImage(context);
// release graphics context
CGContextRelease(context);
// make UIImage from grayscale image with alpha mask
CGImageRef cgImage = CGImageCreateWithMask(grayImage, mask);
UIImage *grayScaleImage = [UIImage imageWithCGImage:cgImage scale:self.scale orientation:self.imageOrientation];
// release the CG images
CGImageRelease(cgImage);
CGImageRelease(grayImage);
CGImageRelease(mask);
// return the new grayscale image
return grayScaleImage;
}
An fast and efficient Swift 3 implementation for iOS 9/10. I feel this is efficient having now tried every image filtering method I could find for processing 100's of images at a time (when downloading using AlamofireImage's ImageFilter option). I settled on this method as FAR better than any other I tried (for my use case) in terms of memory and speed.
func convertToGrayscale() -> UIImage? {
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
let imageRect = CGRect(x: 0.0, y: 0.0, width: self.size.width, height: self.size.height)
let context = UIGraphicsGetCurrentContext()
// Draw a white background
context!.setFillColor(red: 1.0, green: 1.0, blue: 1.0, alpha: 1.0)
context!.fill(imageRect)
// optional: increase contrast with colorDodge before applying luminosity
// (my images were too dark when using just luminosity - you may not need this)
self.draw(in: imageRect, blendMode: CGBlendMode.colorDodge, alpha: 0.7)
// Draw the luminosity on top of the white background to get grayscale of original image
self.draw(in: imageRect, blendMode: CGBlendMode.luminosity, alpha: 0.90)
// optional: re-apply alpha if your image has transparency - based on user1978534's answer (I haven't tested this as I didn't have transparency - I just know this would be the the syntax)
// self.draw(in: imageRect, blendMode: CGBlendMode.destinationIn, alpha: 1.0)
let grayscaleImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return grayscaleImage
}
Re the use of colorDodge: I initially had issues getting my images light enough to match the grayscale coloring produced by using CIFilter("CIPhotoEffectTonal") - my results turned out too dark. I was able to get a decent match by applying CGBlendMode.colorDodge # ~ 0.7 alpha, which seems to increase the overall contrast.
Other color blend effects might work too - but I think you would want to apply before luminocity, which is the greyscale filtering effect. I found this page very helpful to reference about the different BlendModes.
Re efficiency gains I found: I need to process 100's of thumbnail images as they are loaded from a server (using AlamofireImage for async loading, caching, and applying a filter). I started to experience crashes when the total size of my images exceeded the cache size, so I experimented with other methods.
The CoreImage CPU based CIFilter approach was the first I tried, and wasn't memory efficient enough for the number of images I'm handling.
I also tried applying a CIFilter via the GPU using EAGLContext(api: .openGLES3), which was actually even more memory intensive - I actually got memory warnings for 450+ mb use while loading 200 + images.
I tried bitmap processing (i.e. CGContext(data: nil, width: width, height: height, bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace, bitmapInfo: CGImageAlphaInfo.none.rawValue)... which worked well except I couldn't get a high enough resolution for a modern retina device. Images were very grainy even when I added context.scaleBy(x: scaleFactor, y: scaleFactor).
So out of everything I tried, this method (UIGraphics Context Draw) to be VASTLY more efficient re speed and memory when applying as a filter to AlamofireImage. As in, seeing less than 70 mbs ram when processing my 200+ images and them basically loading instantly, rather than over about 35 seconds it took with the openEAGL methods. I know these are not very scientific benchmarks. I will instrument it if anyone is very curious though :)
And lastly, if you do need to pass this or another greyscale filter into AlamofireImage - this is how to do it: (note you must import AlamofireImage into your class to use ImageFilter)
public struct GrayScaleFilter: ImageFilter {
public init() {
}
public var filter: (UIImage) -> UIImage {
return { image in
return image.convertToGrayscale() ?? image
}
}
}
To use it, create the filter like this and pass into af_setImage like so:
let filter = GrayScaleFilter()
imageView.af_setImage(withURL: url, filter: filter)
#interface UIImageView (Settings)
- (void)convertImageToGrayScale;
#end
#implementation UIImageView (Settings)
- (void)convertImageToGrayScale
{
// Create image rectangle with current image width/height
CGRect imageRect = CGRectMake(0, 0, self.image.size.width, self.image.size.height);
// Grayscale color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
// Create bitmap content with current image size and grayscale colorspace
CGContextRef context = CGBitmapContextCreate(nil, self.image.size.width, self.image.size.height, 8, 0, colorSpace, kCGImageAlphaNone);
// Draw image into current context, with specified rectangle
// using previously defined context (with grayscale colorspace)
CGContextDrawImage(context, imageRect, [self.image CGImage]);
// Create bitmap image info from pixel data in current context
CGImageRef imageRef = CGBitmapContextCreateImage(context);
// Create a new UIImage object
UIImage *newImage = [UIImage imageWithCGImage:imageRef];
// Release colorspace, context and bitmap information
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
CFRelease(imageRef);
// Return the new grayscale image
self.image = newImage;
}
#end
I have yet another answer. This one is extremely performant and handles retina graphics as well as transparency. It expands on Sargis Gevorgyan's approach:
+ (UIImage*) grayScaleFromImage:(UIImage*)image opaque:(BOOL)opaque
{
// NSTimeInterval start = [NSDate timeIntervalSinceReferenceDate];
CGSize size = image.size;
CGRect bounds = CGRectMake(0, 0, size.width, size.height);
// Create bitmap content with current image size and grayscale colorspace
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
size_t bitsPerComponent = 8;
size_t bytesPerPixel = opaque ? 1 : 2;
size_t bytesPerRow = bytesPerPixel * size.width * image.scale;
CGContextRef context = CGBitmapContextCreate(nil, size.width, size.height, bitsPerComponent, bytesPerRow, colorSpace, opaque ? kCGImageAlphaNone : kCGImageAlphaPremultipliedLast);
// create image from bitmap
CGContextDrawImage(context, bounds, image.CGImage);
CGImageRef cgImage = CGBitmapContextCreateImage(context);
UIImage* result = [[UIImage alloc] initWithCGImage:cgImage scale:image.scale orientation:UIImageOrientationUp];
CGImageRelease(cgImage);
CGContextRelease(context);
// performance results on iPhone 6S+ in Release mode.
// Results are in photo pixels, not device pixels:
// ~ 5ms for 500px x 600px
// ~ 15ms for 2200px x 600px
// NSLog(#"generating %d x %d # %dx grayscale took %f seconds", (int)size.width, (int)size.height, (int)image.scale, [NSDate timeIntervalSinceReferenceDate] - start);
return result;
}
Using blending modes instead is elegant, but copying to a grayscale bitmap is more performant because you only use one or two color channels instead of four. The opacity bool is meant to take in your UIView's opaque flag so you can opt out of using an alpha channel if you know you won't need one.
I haven't tried the Core Image based solutions in this answer thread, but I would be very cautious about using Core Image if performance is important.
Thats my try to convert fast by drawing directly to grayscale colorspace without each pixel enumeration. It works 10x faster than CIImageFilter solutions.
#implementation UIImage (Grayscale)
static UIImage *grayscaleImageFromCIImage(CIImage *image, CGFloat scale)
{
CIImage *blackAndWhite = [CIFilter filterWithName:#"CIColorControls" keysAndValues:kCIInputImageKey, image, kCIInputBrightnessKey, #0.0, kCIInputContrastKey, #1.1, kCIInputSaturationKey, #0.0, nil].outputImage;
CIImage *output = [CIFilter filterWithName:#"CIExposureAdjust" keysAndValues:kCIInputImageKey, blackAndWhite, kCIInputEVKey, #0.7, nil].outputImage;
CGImageRef ref = [[CIContext contextWithOptions:nil] createCGImage:output fromRect:output.extent];
UIImage *result = [UIImage imageWithCGImage:ref scale:scale orientation:UIImageOrientationUp];
CGImageRelease(ref);
return result;
}
static UIImage *grayscaleImageFromCGImage(CGImageRef imageRef, CGFloat scale)
{
NSInteger width = CGImageGetWidth(imageRef) * scale;
NSInteger height = CGImageGetHeight(imageRef) * scale;
NSMutableData *pixels = [NSMutableData dataWithLength:width*height];
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray();
CGContextRef context = CGBitmapContextCreate(pixels.mutableBytes, width, height, 8, width, colorSpace, 0);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGImageRef ref = CGBitmapContextCreateImage(context);
UIImage *result = [UIImage imageWithCGImage:ref scale:scale orientation:UIImageOrientationUp];
CGContextRelease(context);
CGColorSpaceRelease(colorSpace);
CGImageRelease(ref);
return result;
}
- (UIImage *)grayscaleImage
{
if (self.CIImage) {
return grayscaleImageFromCIImage(self.CIImage, self.scale);
} else if (self.CGImage) {
return grayscaleImageFromCGImage(self.CGImage, self.scale);
}
return nil;
}
#end
I've tried to create a screenshot by using UIActivityViewController and saved into Photos in iPhone/iPad device. However, in simulator everything shows correctly, but when I switched to device, it only shows part. Here is the screenshot:
In simulator (You can see there is one background, one green line and one star image)
In real device (You can see there is only one star image, and everything else is gone)
I merged all of those three different UIImages into one image so that I can take a screenshot.
I first merge the background image (bridge UIImage) with star image.
-(UIImage*)mergeUIImageView:(UIImage*)bkgound
FrontPic:(UIImage*)fnt
FrontPicX:(CGFloat)xPos
FrontPicY:(CGFloat)yPos
FrontPicWidth:(CGFloat)picWidth
FrontPicHeight:(CGFloat)picHeight
FinalSize:(CGSize)finalSize
{
UIGraphicsBeginImageContext(CGSizeMake([UIScreen mainScreen].bounds.size.height, [UIScreen mainScreen].bounds.size.width));
// bkgound - is the bridge image
[bkgound drawInRect:CGRectMake(0, 0, [UIScreen mainScreen].bounds.size.height, [UIScreen mainScreen].bounds.size.width)];
// fnt - is the star image
[fnt drawInRect:CGRectMake(xPos, yPos, picWidth, picHeight)];
// merged image
UIImage* newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return newImage;
}
Then I merged this picture with opengl rendered picture which is the green line.
a) I first change the opengGL image to UIImage by using this function
-(UIImage *) glToUIImage {
float scaleFactor = [[UIScreen mainScreen] scale];
CGRect screen = [[UIScreen mainScreen] bounds];
CGFloat image_height = screen.size.width * scaleFactor;
CGFloat image_width = screen.size.height * scaleFactor;
NSInteger myDataLength = image_width * image_height * 4;
// allocate array and read pixels into it.
GLubyte *buffer = (GLubyte *) malloc(myDataLength);
glReadPixels(0, 0, image_width, image_height, GL_RGBA, GL_UNSIGNED_BYTE, buffer);
// gl renders "upside down" so swap top to bottom into new array.
// there's gotta be a better way, but this works.
GLubyte *buffer2 = (GLubyte *) malloc(myDataLength);
for(int y = 0; y < image_height; y++)
{
for(int x = 0; x < image_width * 4; x++)
{
buffer2[(int)((image_height - 1 - y) * image_width * 4 + x)] = buffer[(int)(y * 4 * image_width + x)];
}
}
// make data provider with data.
CGDataProviderRef provider = CGDataProviderCreateWithData(NULL, buffer2, myDataLength, NULL);
// prep the ingredients
int bitsPerComponent = 8;
int bitsPerPixel = 32;
int bytesPerRow = 4 * image_width;
CGColorSpaceRef colorSpaceRef = CGColorSpaceCreateDeviceRGB();
CGBitmapInfo bitmapInfo = kCGBitmapByteOrderDefault;
CGColorRenderingIntent renderingIntent = kCGRenderingIntentDefault;
// make the cgimage
CGImageRef imageRef = CGImageCreate(image_width, image_height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpaceRef, bitmapInfo, provider, NULL, NO, renderingIntent);
// then make the uiimage from that
UIImage *myImage = [UIImage imageWithCGImage:imageRef];
return myImage;
}
b)Then I merge this opengl image with my above image(bridge + star) by using the same function above
-(UIImage*)screenshot
{
// get opengl image from above function
UIImage *image = [self glToUIImage];
CGRect pos = CGRectMake(0, 0, image.size.width, image.size.height);
UIGraphicsBeginImageContext(image.size);
[image drawInRect:pos];
[self.background.image drawInRect:pos];
UIImage* final = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return final;
}
And it works great in (iPhone, iPad, iPhone with retina and iPad with retina)simulator (version 6.0). However, when I switched to real device (iPhone 4/4s/5, iPad (2/mini/retina)) it only shows star image. The xcode version is 4.6.3 and base SDK is latest IOS(IOS 6.1) and IOS deployment target is 5.0. Could you guys tell me how to fix it? Thanks.
The problem is IOS 6.0 will not keep the buffer all the time, it will erase it. However, when you do screenshot, you are getting data from buffer so that's why I keep getting black background. So add category to let device keep buffer image instead will solve this problem.
#interface CAEAGLLayer (Retained)
#end
#implementation CAEAGLLayer (Retained)
- (NSDictionary*) drawableProperties
{
return #{kEAGLDrawablePropertyRetainedBacking : #(YES)};
}
#end
update 1
I have found a promising apple doc heredescribing CGDataProviderCopyData. I think this does what I originally asked about by taking a drawing from a context and extracting the pixel values.
The example code uses CGImageGetDataProvider and some other features that I do not understand so that I cannot figure out how to implement their functions. How do I take information from the variable con or from its context and get access to the pixels?
update 1
update 0
Maybe I am asking the wrong question here. CGContextDrawImage scales the image from 104 by 104 to 13 by 13 in my case, but then CGContextDrawImage displays the image. Maybe I need to find the part of CGContextDrawImage which just does the scaling.
I have found initWithData:scale: in the "UIImage Class Reference". But I don't know how to supply the data for that method. The scale I want is 0.25 .
- (id)initWithData:(NSData *)data scale:(CGFloat)scale
Can someone tell me how to supply the (NSData *)data for my app?
update 0
//
// BSViewController.m
#import "BSViewController.h"
#interface BSViewController ()
#end
#implementation BSViewController
- (IBAction) chooseImage:(id) sender{
[self.view addSubview:self.imageView];
UIImage* testCard = [UIImage imageNamed:#"ipad 7D.JPG"];
CGSize sz = [testCard size];
CGImageRef num = CGImageCreateWithImageInRect([testCard CGImage],CGRectMake(532, 0, 104, 104));
UIGraphicsBeginImageContext(CGSizeMake( 250,650));
CGContextRef con = UIGraphicsGetCurrentContext();
CGContextDrawImage(con, CGRectMake(0, 0, 13, 13) ,num);
UIGraphicsEndImageContext();
self.workingImage = CFBridgingRelease(num);
CGImageRelease(num);
I am working on the transition from above to below.
More specifically I need to feed imageRef the correct input. I want to give the imageRef a 13 by 13 image, but when I give imageRef num it gets a 104 by 104 image, and when I give imageRef con it gets a 0 by 0 image. (Another tentative approach is mentioned at the bottom.)
The code below is Brandon Trebitowski's
CGImageRef imageRef = num;
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
NSLog(#"the width: %u", width);
NSLog(#"the height: %u", height);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
unsigned char *rawData = (unsigned char*) calloc(height * width * 4, sizeof(unsigned char));
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGContextRef context = CGBitmapContextCreate(rawData, width, height,
bitsPerComponent, bytesPerRow, colorSpace,
kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGColorSpaceRelease(colorSpace);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGContextRelease(context);
NSLog(#"Stop 3");
// Now your rawData contains the image data in the RGBA8888 pixel format.
int byteIndex = (bytesPerRow * 0) + 0 * bytesPerPixel;
for (int ii = 0 ; ii < width * height ; ++ii)
{
int outputColor = (rawData[byteIndex] + rawData[byteIndex+1] + rawData[byteIndex+2]) / 3;
rawData[byteIndex] = (char) (outputColor);
rawData[byteIndex+1] = (char) (outputColor);
rawData[byteIndex+2] = (char) (outputColor);
byteIndex += 4;
}
}
- (void)viewDidLoad
{
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
}
- (void)didReceiveMemoryWarning
{
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
#end
I have also experimented with defining self.workingImage one of the following two ways and supplying that to imageRef.
self.workingImage = num;
self.workingImage = (__bridge UIImage *)(num);
CGImageRef imageRef = [self.workingImage CGImage];
I changed 2 lines and added 3 lines and got the results I wanted.
The main change was to use UIGraphicsBeginImageContextWithOptions instead of UIGraphicsBeginImageContext so that the rescaling could be done before the image was drawn.
#implementation BSViewController
- (IBAction) chooseImage:(id) sender{
[self.view addSubview:self.imageView];
UIImage* testCard = [UIImage imageNamed:#"ipad 7D.JPG"];
CGSize sz = [testCard size];
CGImageRef num = CGImageCreateWithImageInRect([testCard CGImage],CGRectMake(532, 0, 104, 104));
UIGraphicsBeginImageContextWithOptions(CGSizeMake( 104,104), NO, 0.125); // Changed
CGContextRef con = UIGraphicsGetCurrentContext();
CGContextDrawImage(con, CGRectMake(0, 0, 104, 104) ,num); // Changed
UIImage* im = UIGraphicsGetImageFromCurrentImageContext(); // Added
UIGraphicsEndImageContext();
self.workingImage = CFBridgingRelease(num);
CGImageRelease(num);
UIImageView* iv = [[UIImageView alloc] initWithImage:im]; // Added
[self.imageView addSubview: iv]; // Added
CGImageRef imageRef = [im CGImage];
NSUInteger width = CGImageGetWidth(imageRef);
NSUInteger height = CGImageGetHeight(imageRef);
etc.
I'm trying to apply multiple effects to images. I've created a separate file to handle the effects processing that I can send a UIImageView to and receive a modified copy. To save processing time, I first load the image into the separate processing file and store it in memory, rather than load the image every time I want to modify it. The flow is getImageData -> modifyRGB -> displayImage. Everything works till the last step. The returned modified image is displayed on screen for a split second, then Xcode crashes with a EXEC_BAD_ACCESS:code 1 error. I've been over the code repeatedly, and can't find the problem. Any help is greatly appreciated. Thank you!
UPDATE WITH MORE INFO
I'm using Xcode 4.3.1 with Automatic Reference Counting
Using Line Breaks, I can verify that the crash happens when the line self.imageView.image = [self.imageManipulation displayImage]; is executed. The image IS updated, but then the program immediately crashes.
Using NSZombie, i get an error -[Not A Type retain]: message sent to deallocated instance 0x2cceaf80
From my viewController I use:
[self.imageManipulation getImageData:self.imageView.image];
[self.imageManipulation modifyRGB];
self.imageView.image = [self.imageManipulation displayImage];
My ImageManipulation file consists of:
#implementation ImageManipulation
static unsigned char *rgbaDataOld;
static unsigned char *rgbaDataNew;
static int width;
static int height;
- (void)getImageData:(UIImage *)image
{
CGImageRef imageRef = [image CGImage];
width = CGImageGetWidth(imageRef);
height = CGImageGetHeight(imageRef);
rgbaDataOld = malloc(height * width * 4);
rgbaDataNew = malloc(height * width * 4);
NSUInteger bytesPerPixel = 4;
NSUInteger bytesPerRow = bytesPerPixel * width;
NSUInteger bitsPerComponent = 8;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(rgbaDataOld, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big);
CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef);
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
CGImageRelease(imageRef);
}
//modify rgb values
- (void)modifyRGB
{
for (int byteIndex = 0 ; byteIndex < width * height * 4; byteIndex += 4)
{
rgbaDataNew[byteIndex] = (char) (int) (rgbaDataOld[byteIndex] / 3) + 1;
rgbaDataNew[byteIndex+1] = (char) (int) (rgbaDataOld[byteIndex+1] / 3 + 1);
rgbaDataNew[byteIndex+2] = (char) (int) (rgbaDataOld[byteIndex+2] / 3) + 1;
rgbaDataNew[byteIndex+3] = (char) (int) 255;
}
}
//set image
- (UIImage *)displayImage
{
CGContextRef context;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
context = CGBitmapContextCreate(rgbaDataNew, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedLast );
CGImageRef imageRef = CGBitmapContextCreateImage (context);
UIImage *outputImage = [UIImage imageWithCGImage:imageRef];
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
CGImageRelease(imageRef);
return outputImage;
}
#end
I changed my dispayImage() method to receive a UIImageView property and return VOID. This way, all work is done to the passed UIImageView instead of a localized instance and nothing is returned.
My guess is that the returned UIImage approach I was using before was crashing because the returned reference was deallocated the second the method completed. This also allows me to use CGImageRelease without any ill effects.
Here's the new approach:
- (void)displayImage:(UIImageView *)image
{
CGContextRef context;
CGImageRef imageRef = [image.image CGImage];
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
context = CGBitmapContextCreate(rgbaDataNew, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedLast );
imageRef = CGBitmapContextCreateImage (context);
image.image = [UIImage imageWithCGImage:imageRef];
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
CGImageRelease(imageRef);
image = nil;
}
Thank you to everyone that offered help! I learned a tremendous amount just from your suggestions. This was my first time using bt and NSZombie and now I'm using them religiously! Thanks again!
You're releasing the imageRef, which causes the UIImage to be autoreleased.
Make sure that you retain the UIImage in order to prevent such events from occurring:
- (UIImage *)displayImage
{
CGContextRef context;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
context = CGBitmapContextCreate(rgbaDataNew, width, height, 8, 4 * width, colorSpace, kCGImageAlphaPremultipliedLast );
CGImageRef imageRef = CGBitmapContextCreateImage (context);
UIImage *outputImage = [[UIImage imageWithCGImage:imageRef] retain];
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
CGImageRelease(imageRef); //this line would cause outputImage to be released
return outputImage;
}
In displayImage tell the compiler you want to retain the image with __strong:
UIImage __strong *outputImage = [UIImage imageWithCGImage:imageRef];
or if you prefer:
__strong UIImage *outputImage = [UIImage imageWithCGImage:imageRef];