CGImageCreateWithMask image encodes with alpha channel inverted - ios

/NOTE, I've fixed the code.. look for Edit note/
For iOS 5.0+, for running on the iPad, I've created a function to allow the user to mask an input image, generating two new images, a foreground image and a background image. When I add these to an UIImageView, and display on device or simulator, I get what I expect.
However, when I save these by encoding the data as session data, the resulting images are backwards (ie the image matte has been reversed). Two of us have run over the code, there aren't any places were these are reversed, no copy/paste errors. I thought there could be something to kCGImageAlphaPremultipliedFirst vs kCGImageAlphaPremultipliedLast. When I encode the matted images, they start out with kCGImageAlphaPremultipliedFirst, when they are loaded, they are kCGImageAlphaPremultipliedLast.
Any help or ideas would be greatly appreciate.
Amy#InsatiableGenius
The functions below are called with :
[self createMask];
[self addImageAndBackground:foregroundImg backgroundImg:backgroundImg];
- (UIImage*)maskImage:(UIImage *)image withMask:(UIImage *)maskImage {
CGImageRef maskRef = maskImage.CGImage;
CGImageRef mask = CGImageMaskCreate(CGImageGetWidth(maskRef),
CGImageGetHeight(maskRef),
CGImageGetBitsPerComponent(maskRef),
CGImageGetBitsPerPixel(maskRef),
CGImageGetBytesPerRow(maskRef),
CGImageGetDataProvider(maskRef), NULL, false);
CGImageRef sourceImage = [image CGImage];
CGImageRef imageWithAlpha = sourceImage;
if ((CGImageGetAlphaInfo(sourceImage) == kCGImageAlphaNone)
|| (CGImageGetAlphaInfo(sourceImage) == kCGImageAlphaNoneSkipFirst)
|| (CGImageGetAlphaInfo(sourceImage) == kCGImageAlphaNoneSkipLast)) {
imageWithAlpha = CopyImageAndAddAlphaChannel(sourceImage);
}
CGImageRef masked = CGImageCreateWithMask(imageWithAlpha, mask);
CGImageRelease(mask);
if (sourceImage != imageWithAlpha) {
CGImageRelease(imageWithAlpha);
}
UIImage* retImage = [UIImage imageWithCGImage:masked];
CGImageRelease(masked);
/* EDIT STARTS HERE return retImage; */
//Added extra render step to force it to save correct alpha values (not the mask)
UIImage* retImage = [UIImage imageWithCGImage:masked];
CGImageRelease(masked);
UIGraphicsBeginImageContext(retImage.size);
[retImage drawAtPoint:CGPointZero];
UIImage *newImg = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
retImage = nil;
return newImg;
}
-(void)createMask{
//take whole screen uiimage from paintview
//user painted black for mask, set rest of window to white
[paintView setWhiteBackground:YES];
//get user painted mask
UIImage *maskFromPaint = [paintView allocNormalResImageWithBlur:NO/*blur?*/];
[self dumpTestImg:maskFromPaint name:#"maskFromPaint"];
UIImage *maskNoAlpha = [maskFromPaint resetImageAlpha:1.0];
[self dumpTestImg:maskNoAlpha name:#"maskFromPaintNoAlpha"];
//mask has to be gray
UIImage *maskFromPaintGray = [self convertImageToGrayScale:maskNoAlpha];
[self dumpTestImg:maskFromPaintGray name:#"maskFromPaintGray"];
//Had to call this normalize function because some pngs are not compatiable (8 bit)
UIImage *disp_original = [[UIImage alloc] initWithCGImage:[[original normalize] CGImage] ];
//Resize original to screen size (alternatively we could upscale the paint... not sure which for now)
disp_original = [disp_original resizedImageWithContentMode:UIViewContentModeScaleAspectFit bounds:inputImageView.frame.size interpolationQuality:kCGInterpolationHigh] ;
CGSize imageInViewSize = disp_original.size;
//use size of displayed original to crop the paintview
CGRect overlayRect = CGRectMake((int)(inputImageView.frame.size.width - imageInViewSize.width) / 2,
(int)(inputImageView.frame.size.height - imageInViewSize.height) / 2,
(int)imageInViewSize.width,
(int)imageInViewSize.height);
//here is the actual crop
//get rectangle from paint that is the same size as the displayed original
CGImageRef maskFromPaintimageRef = CGImageCreateWithImageInRect([maskFromPaintGray CGImage], overlayRect);
UIImage *invertedMaskFromPaint = [UIImage imageWithCGImage:maskFromPaintimageRef];
self.maskImg = [self invertImage:invertedMaskFromPaint];
[self dumpTestImg:self.maskImg name:#"maskFromPaintCropped"];
self.backgroundImg = [self maskImage:disp_original withMask:self.maskImg];
self.foregroundImg = [self maskImage:disp_original withMask:invertedMaskFromPaint];
foregroundImgView.image = foregroundImg;
backgroundImgView.image = backgroundImg;
foregroundImgView.hidden =NO;
backgroundImgView.hidden =NO;
[container bringSubviewToFront:foregroundImgView];
[container bringSubviewToFront:backgroundImgView];
[self dumpTestImg:foregroundImg name:#"foregroundImg"];
[self dumpTestImg:backgroundImg name:#"backgroundImg"];
//cleanup
CGImageRelease(maskFromPaintimageRef);
maskFromPaint = nil;
maskFromPaintGray = nil;
maskNoAlpha = nil;
disp_original = nil;
//put things back
[paintView setWhiteBackground:NO];
}
CGImageRef CopyImageAndAddAlphaChannel(CGImageRef sourceImage) {
CGImageRef retVal = NULL;
size_t width = CGImageGetWidth(sourceImage);
size_t height = CGImageGetHeight(sourceImage);
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef offscreenContext = CGBitmapContextCreate(NULL, width, height,
8, 0, colorSpace, kCGImageAlphaPremultipliedLast );
if (offscreenContext != NULL) {
CGContextDrawImage(offscreenContext, CGRectMake(0, 0, width, height), sourceImage);
retVal = CGBitmapContextCreateImage(offscreenContext);
CGContextRelease(offscreenContext);
}
CGColorSpaceRelease(colorSpace);
return retVal;
}
- (UIImage*)invertImage:(UIImage *)sourceImage {
CIContext *context = [CIContext contextWithOptions:nil];
CIFilter *filter= [CIFilter filterWithName:#"CIColorInvert"];
CIImage *inputImage = [[CIImage alloc] initWithImage:sourceImage];
[filter setValue:inputImage forKey:#"inputImage"];
return [UIImage imageWithCGImage:[context createCGImage:filter.outputImage fromRect:filter.outputImage.extent]];
}
-(void)addImageAndBackground:(UIImage *)foregroundImgIn backgroundImg:(UIImage *)backgroundImgIn{
UIImageView *tmpIV;
UIImageView *imgVF = [[UIImageView alloc] initWithImage: foregroundImgIn];
imgVF.userInteractionEnabled = YES;
[self dumpTestImg:foregroundImgIn name:#"foregroundIn"];
UIImageView *imgVB = [[UIImageView alloc] initWithImage: backgroundImgIn];
imgVB.userInteractionEnabled = YES;
[self dumpTestImg:backgroundImgIn name:#"backgroundIn"];
}

Related

Black and White Photoshop effect on UIImage

Hello I'd like to create the following Black and White Photoshop effect on a UIImage
https://drive.google.com/file/d/0B5dHxpdDwpPec3dPTWdLVnNhZFk/view?usp=sharing
In which you can change the brightness of each of the six colors (reds yellows green cyans blues magentas)
I used this to make the image black and white but it doesn't allow me to change the specific colors
self.imageView.image = chosenImage;
CIImage *beginImage = [CIImage imageWithCGImage:chosenImage.CGImage];
CIImage *blackAndWhite = [CIFilter filterWithName:#"CIColorControls" keysAndValues:kCIInputImageKey, beginImage, #"inputBrightness", [NSNumber numberWithFloat:0.0], #"inputContrast", [NSNumber numberWithFloat:1.1], #"inputSaturation", [NSNumber numberWithFloat:0.0], nil].outputImage;
CIImage *output = [CIFilter filterWithName:#"CIExposureAdjust" keysAndValues:kCIInputImageKey, blackAndWhite, #"inputEV", [NSNumber numberWithFloat:0.7], nil].outputImage;
CIContext *context = [CIContext contextWithOptions:nil];
CGImageRef cgiimage = [context createCGImage:output fromRect:output.extent];
UIImage *newImage = [UIImage imageWithCGImage:cgiimage];
self.imageView.image = newImage;
Thank You for your time
I think you can accomplish that effect with the following function:
- (UIImage *)grayScaleImageWith:(UIImage *)image blackPoint:(CGFloat)blackPoint whitePoint:(CGFloat)whitePoint andGamma:(CGFloat)gamma {
// Create image rectangle with current image width/height
CGRect imageRect = CGRectMake(0, 0, image.size.width, image.size.height);
// Grayscale color space
CGColorSpaceRef colorSpace = CGColorSpaceCreateCalibratedGray(whitePoint, blackPoint, gamma);
// Create bitmap content with current image size and grayscale colorspace
CGContextRef context = CGBitmapContextCreate(nil, image.size.width, image.size.height, 8, 0, colorSpace, kCGImageAlphaNone);
// Draw image into current context, with specified rectangle
// using previously defined context (with grayscale colorspace)
CGContextDrawImage(context, imageRect, [image CGImage]);
// Create bitmap image info from pixel data in current context
CGImageRef imageRef = CGBitmapContextCreateImage(context);
// Create a new UIImage object
UIImage *newImage = [UIImage imageWithCGImage:imageRef];
// Release colorspace, context and bitmap information
CGColorSpaceRelease(colorSpace);
CGContextRelease(context);
CFRelease(imageRef);
// Return the new grayscale image
return newImage;
}
Then call it filling the black and white values with the values selected on the UI:
CGFloat black[3] = { 0, 0, 0 }; // replace content with values from interface
CGFloat white[3] = { 100, 100, 100 }; // replace content with values from interface
[self grayScaleImageWith:image blackPoint:black whitePoint:white andGamma:1.8f];
I have not tested this code yet but I hope at least it points you in the right direction.

Cropped iOS Image comes back too large

Been trying to fix this problem all day to no avail.
Pretty much, I'm taking a screenshot of the view, then trying to crop out the first 50px and a footer. Problem is that when I do this, the result is a little blowed up, and quality is lost. Here's what I wrote, which I think conforms to retina.
-(UIImage *)takeSnapShotAndReturn{
//Take screenshot of whole view
if([[UIScreen mainScreen] respondsToSelector:#selector(scale)]){
UIGraphicsBeginImageContextWithOptions(self.view.bounds.size,NO,[UIScreen mainScreen].scale);
}
else{
UIGraphicsBeginImageContext(self.view.window.bounds.size);
}
[self.view.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
combinedImage = [self cropOutArea:image withRectangle:CGRectMake(0, 50, 320, 467)];
UIImageWriteToSavedPhotosAlbum(combinedImage, nil, nil, nil);
UIGraphicsEndImageContext();
return image;
}
-(UIImage *)cropOutArea:(UIImage*)image withRectangle:(CGRect)rectangle{
if(image.scale > 1){
rectangle = CGRectMake(rectangle.origin.x * image.scale,
rectangle.origin.y * image.scale,
rectangle.size.width * image.scale,
rectangle.size.height * image.scale);
}
CGImageRef imageRef = CGImageCreateWithImageInRect(image.CGImage, rectangle);
UIImage *result = [UIImage imageWithCGImage:imageRef scale:image.scale orientation:image.imageOrientation];
CGImageRelease(imageRef);
return result;
}
I find cropping extremely confusing!
I'm not sure EXACTLY what you're trying to do, but this may be it .....
-(UIImage *)simplishTopCropAndTo640:(UIImage *)fromImage
// moderately optimised!
{
float shortDimension = fminf(fromImage.size.width, fromImage.size.height);
// 1.use CGImageCreateWithImageInRect to take only the top square...
// 2. use drawInRect (or CGContextDrawImage, same) to scale...
CGRect topSquareOfOriginalRect =
CGRectMake(0,0, shortDimension,shortDimension);
// NOT fromImage.size.width,fromImage.size.width);
CGImageRef topSquareIR = CGImageCreateWithImageInRect(
fromImage.CGImage, topSquareOfOriginalRect);
CGSize size = CGSizeMake( 640,640 );
CGRect sized = CGRectMake(0.0f, 0.0f, size.width, size.height);
UIGraphicsBeginImageContextWithOptions(size, NO, 0.0f);
CGContextRef cc = UIGraphicsGetCurrentContext();
CGContextSetInterpolationQuality(cc, kCGInterpolationLow);
CGContextTranslateCTM(cc, 0, size.height);
CGContextScaleCTM(cc, 1.0, -1.0);
CGContextDrawImage(cc, sized, topSquareIR );
// arguably, those three lines more simply...
//[[UIImage imageWithCGImage:topSquareIR] drawInRect:sized];
CGImageRelease(topSquareIR);
UIImage *result = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
result =
[UIImage imageWithCGImage:result.CGImage
scale:result.scale
orientation: fromImage.imageOrientation];
//consider...something like...
//[UIImage imageWithCGImage:cgimg
// scale:3 orientation:fromImage.imageOrientation];
return result;
}
Consider also this valuable category .....
-(UIImage *)ordinaryCrop:(CGRect)toRect
{
// crops any image, to any rect. you can't beat that
CGImageRef imageRef = CGImageCreateWithImageInRect([self CGImage], toRect);
UIImage *cropped = [UIImage imageWithCGImage:imageRef];
CGImageRelease(imageRef);
return cropped;
}
Finally don't forget this if you're using the camera "the most useful code in the universe!" iOS UIImagePickerController result image orientation after upload
Hope it helps somehow
Try setting this BOOL property before releasing result in cropOutArea.
result.layer.masksToBounds = YES

imageWithCGImage make app crashed

My project is automatic reference counting, but when I use CGBitmapContextCreateImage and UIIMage's method imageWithCGImage, something happened. I wrote some testing code, when the code running, the memory increased until the app crashed.
The device:
[16G, iPad2, retina]
The code:
(UIImage*) createImageByImage:(UIImage*)img
{
CGSize imgSize = img.size;
imgSize.width *= [UIScreen mainScreen].scale;
imgSize.height *= [UIScreen mainScreen].scale;
CGColorSpaceRef space = CGColorSpaceCreateDeviceRGB();
CGContextRef ctx = CGBitmapContextCreate(nil, imgSize.width, imgSize.height, 8, imgSize.width * (CGColorSpaceGetNumberOfComponents(space) + 1), space, kCGImageAlphaPremultipliedLast);
CGColorSpaceRelease(space);
CGRect rect;
rect.origin = CGPointMake(0, 0);
rect.size = imgSize;
// here bypass some transformation to the CGContext
CGContextDrawImage(ctx, rect, img.CGImage);
CGImageRef cgImage = CGBitmapContextCreateImage(ctx);
CGContextRelease(ctx);
UIImage* image = [UIImage imageWithCGImage:cgImage scale:[UIScreen mainScreen].scale orientation:UIImageOrientationDown];
CGImageRelease(cgImage);
return image;
}
the caller:
UIImage* baseImg = [UIImage imageNamed:#"corkboard.jpg"]; // any big image,e.g. 1024*768
for (int i = 0; i < 100; i++) {
UIImage* tempImg = [self createImageByImage:baseImg];
tempImg = nil;
}
baseImg = nil;
Is there someone who can explain? Waiting for your help!!!
Another thing: in the code, if I only replace
[UIImage imageWithCGImage:cgImage ....];
with
[UIImage imageWithCGImage:img.CGImage ....];
the error disappeared! But then the function changed, not met the original demand!

UIImage from CgImageRef while adding sticker

UIImage *sticky = [UIImage imageNamed:#"Radio.png"];
[_imgViewSticky setImage:sticky];
CIImage *outputImage = [self.originalImage CIImage];
CIContext *context = [CIContext contextWithOptions:nil];
CGImageRef cgImg = [context createCGImage:outputImage fromRect:[outputImage extent]];
float widthRatio = [outputImage extent].size.width / 320;
float heighRatio = [outputImage extent].size.height / 480;
CGPoint cgStickyPoint = CGPointMake(_imgViewSticky.frame.origin.x * widthRatio, _imgViewSticky.frame.origin.y * heighRatio);
cgImg = [self setStickyForCGImage:cgImg withPosition:cgStickyPoint];
The last line returns a CGImageRef object.
And I'm assigning the value to final image like this:
UIImage *finalImage = [UIImage ImageWithCGImageRef:cgImg];
Yet I'm not getting the image. Any ideas why? Any Help is much appreciated.
I notice that your CIContext isn't receiving any drawing, which could be why you're not getting an image. I don't have a clear picture of what you want, but this code will superimpose one UIImage on top of another UIImage:
UIGraphicsBeginImageContextWithOptions(backgroundImage.size, NO, 0.0); //Create an image context
[backgroundImage drawInRect:CGRectMake(0, 0, backgroundImage.size.width, backgroundImage.size.height)]; //Draw the first UIImage
[stickerImage drawInRect:stickerRect]; //Draw the second UIImage wherever you want on top of the first image
UIImage *finalImage = UIGraphicsGetImageFromCurrentImageContext(); //Get the final UIImage result

iOS, Generated images, and masking

I'm trying to generate an image that is lozenge-shaped and shows some percentage finished versus unfinished. The way I implemented this was as follows:
Generate 2 rectangles - one the size of the filled region, the other the size of the empty rectange
Invoke UIGrapicsBeginImageContext() with the size of the rectangle I am interested in
Draw the 2 rectangles in the context side-by side
Grab the image from the context and end the context
Create a new masked image by using CGImageMaskCreate() followed by CGImageCreateWithMask() and extracting the masked image
I generate the filled and empty bitmaps using category extensions to UIImage, and then apply a static mask image to them.
The Problem: This works fine in the simulator, but the masking doesn't work on a real device.
Instead of including the code here, I'm including a link to a project that has the code. The relevant files are:
UIImage.h/UIImage.m: The category extension to UIImage that adds both the "create an image with a specified color" and "create a masked image using the supplied mask".
TLRangeDisplay.h/TLRangeDisplay.m: the code for my lozenge-shaped status display. The routine of interest there is fillWithRect().
Here is the code I added to UIImage (via a category):
+ (UIImage *)imageWithColor:(UIColor *)color {
CGRect rect = CGRectMake(0.0f, 0.0f, 1.0f, 1.0f);
UIGraphicsBeginImageContext(rect.size);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetFillColorWithColor(context, [color CGColor]);
CGContextFillRect(context, rect);
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return image;
}
+ (UIImage *)imageWithColor:(UIColor *)color andSize:(CGSize)size {
CGRect rect = CGRectMake(0.0f, 0.0f, size.height, size.width);
UIGraphicsBeginImageContext(rect.size);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextSetFillColorWithColor(context, [color CGColor]);
CGContextFillRect(context, rect);
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return image;
}
- (UIImage*) maskWith:(UIImage *)maskImage {
CGImageRef maskRef = maskImage.CGImage;
CGImageRef mask = CGImageMaskCreate(CGImageGetWidth(maskRef), CGImageGetHeight(maskRef),
CGImageGetBitsPerComponent(maskRef),
CGImageGetBitsPerPixel(maskRef), CGImageGetBytesPerRow(maskRef), CGImageGetDataProvider(maskRef), NULL, false);
CGImageRef masked = CGImageCreateWithMask([self CGImage], mask);
UIImage* image = [UIImage imageWithCGImage:masked];
CFRelease(mask);
CFRelease(masked);
return image;
}
And here is the routine that does the masking:
-(void)fillWithRect {
CGRect f = self.frame;
CGFloat width = f.size.width;
CGFloat fullRange = maxValue_ - minValue_;
CGFloat filledRange = currentValue_ - minValue_;
CGRect fillRect = CGRectMake(0, 0, (filledRange * width) / fullRange, f.size.height);
CGRect emptyRect = CGRectMake(fillRect.size.width, 0, width - fillRect.size.width, f.size.height);
UIImage *fillImage = nil;
UIImage *emptyImage = nil;
if(fillRect.size.width > 0) {
fillImage = [UIImage imageWithColor:fillColor_ andSize:fillRect.size];
}
if(emptyRect.size.width > 0) {
emptyImage = [UIImage imageWithColor:emptyColor_ andSize:emptyRect.size];
}
// Build the 2-color image
UIGraphicsBeginImageContext(f.size);
[fillImage drawInRect:fillRect];
[emptyImage drawInRect:emptyRect];
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
// Mask it
if(nil != maskImage_)
image = [image maskWith:maskImage_];
CGRect fullRect = CGRectMake(0, 0, f.size.width, f.size.height);
// Merge ith with the shape
UIGraphicsBeginImageContext(f.size);
[image drawInRect:fullRect];
[shapeImage_ drawInRect:fullRect];
image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
[shownView_ removeFromSuperview];
shownView_ = [[UIImageView alloc] initWithImage:image];
[self addSubview:shownView_];
if(nil != shownView_)
[self bringSubviewToFront:shownView_];
}
The project can be downloaded from http://dl.dropbox.com/u/5375467/ColorPlayOS4.zip
Thanks for any insights on this problem!

Resources