I created RotatedView subclass of UIView and added UIImageView as subview of RotatedView and also draw a image on RotatedView using drawRect: method same as image of imageView. I applied pinch and rotate gestures on imageView. When i pinch the imageView drawing image is also changed. But when i rotate the imageView, the drawing image is not changed. I used following code::
- (id)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self) {
// Initialization code
self.backgroundColor = [UIColor redColor];
imageView = [[UIImageView alloc] initWithFrame:CGRectMake(80, 150, 100, 150)];
imageView.image = [UIImage imageNamed:#"Images_6.jpg"];
imageView.userInteractionEnabled = YES;
[self addSubview:imageView];
UIRotationGestureRecognizer *rotationGesture = [[UIRotationGestureRecognizer alloc] initWithTarget:self action:#selector(rotationMethod:)];
rotationGesture.delegate = self;
[imageView addGestureRecognizer:rotationGesture];
}
return self;
}
- (void)drawRect:(CGRect)rect
{
// Drawing code
CGContextRef context =UIGraphicsGetCurrentContext();
CGRect rectFrame = CGRectMake(0, 0, imageView.frame.size.width, imageView.frame.size.height);
CGContextSetStrokeColorWithColor(context, [UIColor blueColor].CGColor);
CGContextSetLineWidth(context, 10.0);
CGContextBeginPath(context);
CGContextMoveToPoint(context, 0, 0);
CGContextAddLineToPoint(context, 90, 0);
CGContextAddLineToPoint(context, 90, 90);
CGContextAddLineToPoint(context, 0, 90);
CGContextAddLineToPoint(context, 0, 0);
CGContextClip(context);
CGContextStrokePath(context);
CGContextDrawImage(context,rectFrame, imageView.image.CGImage);
}
-(void)rotationMethod:(UIRotationGestureRecognizer *)gestureRecognizer{
NSLog(#"rotationMethod");
if ([gestureRecognizer state]==UIGestureRecognizerStateBegan || [gestureRecognizer state]==UIGestureRecognizerStateChanged) {
CGAffineTransform transform = CGAffineTransformRotate(gestureRecognizer.view.transform, gestureRecognizer.rotation);
gestureRecognizer.view.transform = transform;
[gestureRecognizer setRotation:0];
}
[self setNeedsDisplay];
}
How do i get rotate image in UIView when the imageView is rotated?
**
Edited:
**
I got solution using first method. Now i am using second method. I think this is simple and best but I am not sure which one is best. In second method, image is rotated but not at center point. I am struggle to solve this problem. Please help me.
Modifying methods are:
- (void)drawRect:(CGRect)rect
{
NSLog(#"drawRect");
// Drawing code
CGContextRef context =UIGraphicsGetCurrentContext();
CGContextSetStrokeColorWithColor(context, [UIColor blueColor].CGColor);
CGContextSetLineWidth(context, 10.0);
CGContextBeginPath(context);
CGContextMoveToPoint(context, imageRect.origin.x, imageRect.origin.y);
CGContextAddLineToPoint(context, imageRect.size.width, imageRect.origin.y);
CGContextAddLineToPoint(context, imageRect.size.width, imageRect.size.height);
CGContextAddLineToPoint(context, imageRect.origin.x, imageRect.size.height);
CGContextAddLineToPoint(context, imageRect.origin.x, imageRect.origin.y);
CGContextClip(context);
CGContextStrokePath(context);
CGContextDrawImage(context, imageRect, [self rotateImage:imageView.image].CGImage);
}
-(void)rotationMethod:(UIRotationGestureRecognizer *)gestureRecognizer{
NSLog(#"rotationMethod");
if ([gestureRecognizer state]==UIGestureRecognizerStateBegan || [gestureRecognizer state]==UIGestureRecognizerStateChanged) {
CGAffineTransform transform = CGAffineTransformRotate(gestureRecognizer.view.transform, gestureRecognizer.rotation);
gestureRecognizer.view.transform = transform;
lastRotation = gestureRecognizer.rotation;
[gestureRecognizer setRotation:0];
}
[self setNeedsDisplay];
}
- (UIImage *)rotateImage:(UIImage *) img
{
NSLog(#"rotateImage");
CGAffineTransform transform = imageView.transform;
transform = CGAffineTransformTranslate(transform, img.size.width, img.size.height);
transform = CGAffineTransformRotate(transform, lastRotation);
transform = CGAffineTransformScale(transform, -1, -1);
CGContextRef ctx = CGBitmapContextCreate(NULL, img.size.width, img.size.height,
CGImageGetBitsPerComponent(img.CGImage), 0,
CGImageGetColorSpace(img.CGImage),
CGImageGetBitmapInfo(img.CGImage));
CGContextConcatCTM(ctx, transform);
CGContextDrawImage(ctx, CGRectMake(20,20,img.size.width,img.size.height), img.CGImage);
CGImageRef cgimg = CGBitmapContextCreateImage(ctx);
UIImage *newImg = [UIImage imageWithCGImage:cgimg];
CGContextRelease(ctx);
CGImageRelease(cgimg);
return newImg;
}
You need to use CGContextScaleCTM and CGContextRotateCTM (and possibly to appropriately transform your CGContextRef to match your UIImageView's transform.
Take a look at the Quartz 2D Programming Guide
Ok. Finally i got it using ANImageBitmapRep. I declared lastRotation, angle,rotateMe in RotatedView.h class.
-(void)rotationMethod:(UIRotationGestureRecognizer *)gestureRecognizer{
if ([gestureRecognizer state]==UIGestureRecognizerStateBegan) {
if (!rotateMe) {
rotateMe = [[ANImageBitmapRep alloc] initWithImage:displayImageView.image];
}else{
rotateMe =[ANImageBitmapRep imageBitmapRepWithImage:displayImageView.image];
}
}
CGFloat rotation = 0.0 - (lastRotation - [gestureRecognizer rotation]);
CGAffineTransform currentTransform = [gestureRecognizer view].transform;
CGAffineTransform newTransform = CGAffineTransformRotate(currentTransform,rotation);
[[gestureRecognizer view] setTransform:newTransform];
lastRotation = [gestureRecognizer rotation];
angle =lastRotation * (180 / M_PI);
[self setNeedsDisplay];
}
- (void)drawRect:(CGRect)rect
{
CGRect imageRect = CGRectMake(0, 0, 90, 90);
// Drawing code
CGContextRef context =UIGraphicsGetCurrentContext();
CGContextSetStrokeColorWithColor(context, [UIColor blueColor].CGColor);
CGContextSetLineWidth(context, 10.0);
CGContextBeginPath(context);
CGContextMoveToPoint(context, imageRect.origin.x, imageRect.origin.y);
CGContextAddLineToPoint(context, imageRect.size.width, imageRect.origin.y);
CGContextAddLineToPoint(context, imageRect.size.width, imageRect.size.height);
CGContextAddLineToPoint(context, imageRect.origin.x, imageRect.size.height);
CGContextAddLineToPoint(context, imageRect.origin.x, imageRect.origin.y);
CGContextClip(context);
CGContextStrokePath(context);
UIImage *rotatedImage1=[UIImage imageWithCGImage:[rotateMe imageByRotating:angle]];
CGContextDrawImage(context, imageRect, rotatedImage1.CGImage);
}
I find this solution. If anyone find best solution, give me suggestions according this problem.
Related
i am trying to imitate the new contact ios7 behavior.
I have a UIScrollView with a UImageView inside.
With this code i can draw the circular shape in front of the image.
-(void)viewWillAppear:(BOOL)animated
{
CGFloat screenHeight = [[UIScreen mainScreen] bounds].size.height;
int position = 0;
if (screenHeight == 568)
{
position = 124;
}
else
{
position = 80;
}
CAShapeLayer *circleLayer = [CAShapeLayer layer];
UIBezierPath *path2 = [UIBezierPath bezierPathWithOvalInRect:
CGRectMake(0.0f, position, 320.0f, 320.0f)];
[path2 setUsesEvenOddFillRule:YES];
[circleLayer setPath:[path2 CGPath]];
[circleLayer setFillColor:[[UIColor clearColor] CGColor]];
path;
path = [UIBezierPath bezierPathWithRoundedRect:CGRectMake(0, 0, 320, screenHeight-44) cornerRadius:0];
[path appendPath:path2];
[path setUsesEvenOddFillRule:YES];
fillLayer = [CAShapeLayer layer];
fillLayer.path = path.CGPath;
fillLayer.fillRule = kCAFillRuleEvenOdd;
fillLayer.fillColor = [UIColor blackColor].CGColor;
fillLayer.opacity = 0.8;
[self.view.layer addSublayer:fillLayer];
UILabel *moveLabel = [[UILabel alloc]initWithFrame:CGRectMake(0, 10, 320, 50)];
[moveLabel setText:#"Mueva y escale"];
[moveLabel setTextAlignment:NSTextAlignmentCenter];
[moveLabel setTextColor:[UIColor whiteColor]];
[self.view addSubview:moveLabel];
}
I need to get the portion of the image that is inside the circle when the user choose "Use Photo" option.
I wrote this category
#implementation UIImage (PathCropping)
-(UIImage *)imageCroppedWithPath:(UIBezierPath *)path
{
return [self imageCroppedWithPath:path invertPath:NO];
}
-(UIImage *)imageCroppedWithPath:(UIBezierPath *)path
invertPath:(BOOL)invertPath
{
float scaleFactor = [self scale];
CGRect imageRect = CGRectMake(0, 0, self.size.width * scaleFactor, self.size.height *scaleFactor);
CGColorSpaceRef colorSpace = CGImageGetColorSpace(self.CGImage);
CGContextRef context = CGBitmapContextCreate(NULL,
imageRect.size.width,
imageRect.size.height ,
CGImageGetBitsPerComponent(self.CGImage),
0,
colorSpace,
(CGBitmapInfo)CGImageGetAlphaInfo(self.CGImage)
);
CGContextSaveGState(context);
CGContextTranslateCTM(context, 0.0, self.size.height *scaleFactor);
CGContextScaleCTM(context, 1.0 , -1.0);
CGAffineTransform transform = CGAffineTransformMakeScale(scaleFactor, scaleFactor);
[path applyTransform:transform];
if(invertPath){
UIBezierPath *rectPath = [UIBezierPath bezierPathWithRect:imageRect];
CGContextAddPath(context, rectPath.CGPath);
CGContextAddPath(context, path.CGPath);
CGContextEOClip(context);
} else {
CGContextAddPath(context, path.CGPath);
CGContextClip(context);
}
CGContextScaleCTM(context, 1.0, -1.0);
CGContextTranslateCTM(context, 0.0, -self.size.height * scaleFactor);
CGContextDrawImage(context, imageRect, [self CGImage]);
CGImageRef imageRef = CGBitmapContextCreateImage(context);
return [UIImage imageWithCGImage:imageRef scale:scaleFactor orientation:0];
}
#end
The following image was cropped twice with the same path. once inverted, once not inverted. than I merged them again.
NSString *imageName = #"IMG_2345.jpg";
UIImage *image = [UIImage imageNamed:imageName];
UIImage *image2 = [UIImage imageNamed:imageName];
CGSize size = CGSizeMake(320, 320 * image.size.height / image.size.width);
image = [image resizedImage:size interpolationQuality:kCGInterpolationHigh];
image2 = [image2 resizedImage:size interpolationQuality:kCGInterpolationHigh];
image2 = [image2 grayscaledImage];
image = [image imageCroppedWithPath:[self circlePathWithSize: circleSize] invertPath: NO];
image2 = [image2 imageCroppedWithPath:[self circlePathWithSize: circleSize] invertPath: YES];
It is on Github
In the code below I'm adding a small image to a large image. I need to be able to apply a transform to the small image. Right now something weird is going on, for example if I pass in a transform of CGAffineTransformMakeRotation(M_PI_2), the newImage is not rotated in place but rather drawn somewhere off screen. How can I fix this?
+ (UIImage*)addToImage:(UIImage *)baseImage newImage:(UIImage*)newImage atPoint:(CGPoint)point transform:(CGAffineTransform)transform {
UIGraphicsBeginImageContextWithOptions(baseImage.size, NO, 1);
CGContextRef context = UIGraphicsGetCurrentContext();
[baseImage drawInRect:CGRectMake(0, 0, baseImage.size.width, baseImage.size.height)];
CGRect newRect = CGRectMake(point.x,
point.y,
newImage.size.width,
newImage.size.height);
CGContextConcatCTM(context, transform);
[newImage drawInRect:newRect];
UIImage* result = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return result;
}
Note that I only want to apply the transform to newImage, not baseImage.
Seems like you just forgot to translate the origin of the context before and after the rotation. Here, try this:
- (UIImage*)addToImage:(UIImage *)baseImage newImage:(UIImage*)newImage atPoint:(CGPoint)point transform:(CGAffineTransform)transform {
UIGraphicsBeginImageContextWithOptions(baseImage.size, NO, [[UIScreen mainScreen] scale]);
CGContextRef context = UIGraphicsGetCurrentContext();
CGRect baseRect = CGRectMake(0, 0, baseImage.size.width, baseImage.size.height);
[baseImage drawInRect:baseRect];
CGRect newRect = CGRectMake(point.x, point.y, newImage.size.width, newImage.size.height);
CGContextTranslateCTM(context, point.x, point.y);
CGContextConcatCTM(context, transform);
CGContextTranslateCTM(context, -point.x, -point.y);
[newImage drawInRect:newRect];
UIImage* result = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return result;
}
EDIT:
If you want to rotate the image around its center, add the halves of the image's sides to the corresponding translation values:
float xTranslation = point.x+newRect.size.width/2;
float yTranslation = point.y+newRect.size.height/2;
CGContextTranslateCTM(context, xTranslation, yTranslation);
CGContextConcatCTM(context, transform);
CGContextTranslateCTM(context, -xTranslation, -yTranslation);
I am programming first time using Core Graphics, so don't have much idea that how exactly can I solve the problem.
I am drawing rounded rect bezier path along with gradient and stroke as background view for UITableviewCells. Everything has gone fine, except the extra black corners as shown in figure.
I haven't got any idea why they are showing and exactly what they are. Please can anyone help me? Thanks..
Code for creating cell
#import "CustomCellBackground.h"
.
.
.
-(UITableViewCell *) tableView:(UITableView *)tableView cellForRowAtIndexPath:(NSIndexPath *)indexPath
{
static NSString *CellIdentifier = #"Cell";
UITableViewCell *cell = [tableView
dequeueReusableCellWithIdentifier:CellIdentifier];
if (cell == nil) {
cell = [[UITableViewCell alloc] initWithStyle:UITableViewCellStyleValue1 reuseIdentifier:CellIdentifier];
cell.backgroundView = [[CustomCellBackground alloc] init];
cell.selectedBackgroundView = [[CustomCellBackground alloc]init];
}
// Configure the cell.
cell.selectionStyle = UITableViewCellSelectionStyleNone;
cell.textLabel.textColor = [UIColor whiteColor];
cell.textLabel.backgroundColor = [UIColor clearColor];
return cell;
}
In CustomCellBackground.m
- (void)drawRect:(CGRect)rect
{
// Drawing code
CGContextRef context = UIGraphicsGetCurrentContext();
CGPathRef path = [[UIBezierPath bezierPathWithRoundedRect:rect cornerRadius:10.0] CGPath];
CGContextAddPath(context, path);
CGContextClip(context);
//CGContextSetLineJoin(context, kCGLineJoinRound);
drawLinearGradientWithFourColors(context, self.bounds);
CGContextSaveGState(context);
CGContextSetStrokeColorWithColor(context,[UIColor whiteColor].CGColor);
CGContextSetLineWidth(context, 1.0);
CGContextAddPath(context, path);
CGContextStrokePath(context);
CGContextRestoreGState(context);
}
void drawLinearGradientWithFourColors(CGContextRef context, CGRect rect)
{
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGFloat locations[] = {0.0, 0.2, 0.5, 1.0};
CGFloat colors[16] = {
85/255.0, 85/255.0, 85/255.0, 1.0,
45/255.0, 45/255.0, 45/255.0, 1.0,
22/255.0, 22/255.0, 22/255.0, 1.0,
0, 0, 0, 1.0
};
CGGradientRef gradient = CGGradientCreateWithColorComponents(colorSpace, colors, locations, 4);
CGPoint startPoint = CGPointMake(CGRectGetMidX(rect), CGRectGetMinY(rect));
CGPoint endPoint = CGPointMake(CGRectGetMidX(rect), CGRectGetMaxY(rect));
CGContextSaveGState(context);
CGContextDrawLinearGradient(context, gradient, startPoint, endPoint, 0);
CGContextRestoreGState(context);
CGGradientRelease(gradient);
CGColorSpaceRelease(colorSpace);
}
When you init your view , setOpaque parameter for layer of the view and view.
- (id)initWithFrame:(CGRect)frame
{
self = [super initWithFrame:frame];
if (self) {
[self setOpaque:NO];
[self.layer setOpaque:NO];
}
return self;
}
I googled it and pasted the code below. For more info refer link:
Use below code:
typedef enum {
CustomCellBackgroundViewPositionTop,
CustomCellBackgroundViewPositionMiddle,
CustomCellBackgroundViewPositionBottom,
CustomCellBackgroundViewPositionSingle
} CustomCellBackgroundViewPosition;
#interface CustomCellBackgroundView : UIView {
UIColor *borderColor;
UIColor *fillColor;
CustomCellBackgroundViewPosition position;
}
#property(nonatomic, retain) UIColor *borderColor, *fillColor;
#property(nonatomic) CustomCellBackgroundViewPosition position;
#end
- (void)drawRect:(CGRect)rect {
// Drawing code
CGContextRef c = UIGraphicsGetCurrentContext();
CGContextSetFillColorWithColor(c, [fillColor CGColor]);
CGContextSetStrokeColorWithColor(c, [borderColor CGColor]);
if (position == CustomCellBackgroundViewPositionTop) {
CGContextFillRect(c, CGRectMake(0.0f, rect.size.height - 10.0f, rect.size.width, 10.0f));
CGContextBeginPath(c);
CGContextMoveToPoint(c, 0.0f, rect.size.height - 10.0f);
CGContextAddLineToPoint(c, 0.0f, rect.size.height);
CGContextAddLineToPoint(c, rect.size.width, rect.size.height);
CGContextAddLineToPoint(c, rect.size.width, rect.size.height - 10.0f);
CGContextStrokePath(c);
CGContextClipToRect(c, CGRectMake(0.0f, 0.0f, rect.size.width, rect.size.height - 10.0f));
} else if (position == CustomCellBackgroundViewPositionBottom) {
CGContextFillRect(c, CGRectMake(0.0f, 0.0f, rect.size.width, 10.0f));
CGContextBeginPath(c);
CGContextMoveToPoint(c, 0.0f, 10.0f);
CGContextAddLineToPoint(c, 0.0f, 0.0f);
CGContextStrokePath(c);
CGContextBeginPath(c);
CGContextMoveToPoint(c, rect.size.width, 0.0f);
CGContextAddLineToPoint(c, rect.size.width, 10.0f);
CGContextStrokePath(c);
CGContextClipToRect(c, CGRectMake(0.0f, 10.0f, rect.size.width, rect.size.height));
} else if (position == CustomCellBackgroundViewPositionMiddle) {
CGContextFillRect(c, rect);
CGContextBeginPath(c);
CGContextMoveToPoint(c, 0.0f, 0.0f);
CGContextAddLineToPoint(c, 0.0f, rect.size.height);
CGContextAddLineToPoint(c, rect.size.width, rect.size.height);
CGContextAddLineToPoint(c, rect.size.width, 0.0f);
CGContextStrokePath(c);
return; // no need to bother drawing rounded corners, so we return
}
// At this point the clip rect is set to only draw the appropriate
// corners, so we fill and stroke a rounded rect taking the entire rect
CGContextBeginPath(c);
addRoundedRectToPath(c, rect, 10.0f, 10.0f);
CGContextFillPath(c);
CGContextSetLineWidth(c, 1);
CGContextBeginPath(c);
addRoundedRectToPath(c, rect, 10.0f, 10.0f);
CGContextStrokePath(c);
}
static void addRoundedRectToPath(CGContextRef context, CGRect rect,
float ovalWidth,float ovalHeight)
{
float fw, fh;
if (ovalWidth == 0 || ovalHeight == 0) {// 1
CGContextAddRect(context, rect);
return;
}
CGContextSaveGState(context);// 2
CGContextTranslateCTM (context, CGRectGetMinX(rect),// 3
CGRectGetMinY(rect));
CGContextScaleCTM (context, ovalWidth, ovalHeight);// 4
fw = CGRectGetWidth (rect) / ovalWidth;// 5
fh = CGRectGetHeight (rect) / ovalHeight;// 6
CGContextMoveToPoint(context, fw, fh/2); // 7
CGContextAddArcToPoint(context, fw, fh, fw/2, fh, 1);// 8
CGContextAddArcToPoint(context, 0, fh, 0, fh/2, 1);// 9
CGContextAddArcToPoint(context, 0, 0, fw/2, 0, 1);// 10
CGContextAddArcToPoint(context, fw, 0, fw, fh/2, 1); // 11
CGContextClosePath(context);// 12
CGContextRestoreGState(context);// 13
}
I have an UIImageView displaying a picture.
I registered a pinch gesture to zoom in/out and a pan gesture to draw on it (I have a good reason to use the pan gesture for that, instead of touchMoved)
Here is the pinch target:
- (void)pinchGestureActionOnSubject:(UIPinchGestureRecognizer *)pinch
{
if([(UIPinchGestureRecognizer*)pinch state] == UIGestureRecognizerStateBegan)
{
_lastScale = 1.0;
}
CGFloat scale = 1.0 - (_lastScale - [(UIPinchGestureRecognizer*)pinch scale]);
CGAffineTransform currentTransform = self.imageViewSubject.transform;
CGAffineTransform newTransform = CGAffineTransformScale(currentTransform, scale, scale);
[self.imageViewSubject setTransform:newTransform];
_lastScale = [(UIPinchGestureRecognizer*)pinch scale];
}
and here is the pan target:
-(void) panOnSubjectForDrawing:(id)sender
{
if([sender numberOfTouches] > 0)
{
CGPoint currentPoint = [sender locationOfTouch:0 inView:self.imageViewSubject];
if(lastPoint.x == 0 || lastPoint.y == 0)
{
lastPoint.x = currentPoint.x;
lastPoint.y = currentPoint.y;
}
UIGraphicsBeginImageContext(self.imageViewSubject.bounds.size);
CGContextRef context = UIGraphicsGetCurrentContext();
[self.imageViewSubject.image drawInRect:CGRectMake(0, 0, self.imageViewSubject.bounds.size.width, self.imageViewSubject.bounds.size.height) blendMode:kCGBlendModeNormal alpha:1.0];
CGContextSetLineCap(context, kCGLineCapRound);
CGContextSetLineWidth(context, 10); // Epaisseur du trait
CGContextSetBlendMode(UIGraphicsGetCurrentContext(), kCGBlendModeClear); // Transparent
CGContextMoveToPoint(context, lastPoint.x, lastPoint.y);
CGContextAddLineToPoint(context, currentPoint.x, currentPoint.y);
CGContextStrokePath(context);
self.imageViewSubject.image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
lastPoint = currentPoint;
}
}
The problem is that if I draw after I zoom in, the picture get blurry. But if I draw without zoom, the image keep intact.
I heard that it can be because my frame has to be integer, so I add:
self.imageViewSubjectframe = CGRectIntegral(self.imageViewSubject.frame);
before I draw, but it is worst, more more more blurry.
Any idea?
I want to recreate a tab bar but I stumbled on this problem. As you can see in the images below my current (right image) selected tab bar item is a lot less crisp or sharper than the one from the UITabBar. Notice the small 1 point border around the icon in the left (which I don't know how to do) as well as the gradient inside the icon which is a lot noticeable in mine. I already thought of Core Graphics and Core Images Filters as possible approaches but can't seem to get that effect. I found an older thread which is part of what I want but the answer doesn't seem to work for me and requires a manual loop through the pixels of the image (which I don't know if it is to be desired). Can someone help me?
This is the code I'm currently using which, btw, you're welcome to correct some mistakes if you see any because I'm starting with Core Graphics:
- (void)drawRect:(CGRect)rect
{
CGContextRef context = UIGraphicsGetCurrentContext();
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextSaveGState(context);
{
/* Adjust for different coordinate systems from UIKit and Core Graphics and center the image */
CGContextTranslateCTM(context, self.bounds.size.width/2.0 - self.image.size.width/2.0, self.bounds.size.height - self.bounds.size.height/2.0 + self.image.size.height/2.0);
CGContextScaleCTM(context, 1.0f, -1.0f);
CGRect rect = CGRectMake(0, 0, self.image.size.width, self.image.size.height);
/* Add a drop shadow */
UIColor *dropShadowColor = [UIColor colorWithRed:0.0f green:0.0f blue:0.0f alpha:0.8f];
CGContextSetShadowWithColor(context, CGSizeMake(0, 1), 5, dropShadowColor.CGColor);
/* Draw the original image */
CGContextDrawImage(context, rect, self.image.CGImage);
/* Clip to the original image, so that we only draw the shadows on the
inside of the image but nothing outside. */
CGContextClipToMask(context, rect, self.image.CGImage);
if(self.isSelected){
/* draw background image */
CGImageRef background = [UIImage imageNamed:#"UITabBarBlueGradient"].CGImage;
CGContextDrawImage(context, rect, background);
}
else{
/* draw background color to unselected items */
CGColorRef backgroundColor = [UIColor colorWithRed:95/255.0 green:95/255.0 blue:95/255.0 alpha:1].CGColor;
CGContextSetFillColorWithColor(context, backgroundColor);
CGContextFillRect(context, rect);
/* location of the gradient's colors */
CGFloat locations[] = { 0.0, 1.0 };
NSArray *colors = [NSArray arrayWithObjects:(id)[UIColor colorWithRed:1 green:1 blue:1 alpha:0].CGColor, (id)[UIColor colorWithRed:1 green:1 blue:1 alpha:0.6].CGColor, nil];
/* create the gradient with colors and locations */
CGGradientRef gradient = CGGradientCreateWithColors(colorSpace,(__bridge CFArrayRef) colors, locations);
{
/* start and end points of the gradient */
CGPoint startPoint = CGPointMake(CGRectGetMidX(rect), CGRectGetMinY(rect));
CGPoint endPoint = CGPointMake(CGRectGetMidX(rect), CGRectGetMaxY(rect));
/* draw gradient */
CGContextDrawLinearGradient(context, gradient, startPoint, endPoint, 0);
}
CGGradientRelease(gradient);
}
}
CGContextRestoreGState(context);
CGColorSpaceRelease(colorSpace);
}
I'm working on this too, an optimization you can probably make is instead of rendering the UIImage each time drawrect is called you can save off the UIImage objects in an ivar and just update a UIImageView.image property to display them.
I'm generating my image with the "shine" like this:
(plus_icon.png is a 30 x 30 image with a 4 px wide cross occupying the entire thing in black on a transparent background: which renders like in imageView 2 and 4 like this:
-(UIImage *)tabBarImage{
UIGraphicsBeginImageContext(CGSizeMake(60, 60));
UIImage *image = [UIImage imageNamed:#"plus_icon"];
CGContextRef ctx = UIGraphicsGetCurrentContext();
CGContextSetFillColorWithColor(ctx, [[UIColor clearColor] CGColor]);
CGContextFillRect(ctx, CGRectMake(0, 0, 60, 60));
CGRect imageRect = CGRectMake(15, 15, 30, 30);
CGContextDrawImage(ctx, imageRect, [image CGImage]);
image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return image;
}
-(UIImage *)sourceImage{
UIGraphicsBeginImageContext(CGSizeMake(60.0, 60.0));
CGContextRef context = UIGraphicsGetCurrentContext();
size_t num_locations = 2;
CGFloat locations[2] = { 0.3, 1.0 };
CGFloat components[8] = {NC(72), NC(122), NC(229), 1.0, NC(110), NC(202), NC(255), 1.0 };
CGColorSpaceRef cspace;
CGGradientRef gradient;
cspace = CGColorSpaceCreateDeviceRGB();
gradient = CGGradientCreateWithColorComponents (cspace, components, locations, num_locations);
CGPoint sPoint = CGPointMake(0.0, 15.0);
CGPoint ePoint = CGPointMake(0.0, 45.0);
CGContextDrawLinearGradient (context, gradient, sPoint, ePoint, kCGGradientDrawsBeforeStartLocation| kCGGradientDrawsAfterEndLocation);
CGGradientRelease(gradient);
CGColorSpaceRelease(cspace);
[self addShineToContext:context];
UIImage * image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return image;
}
-(void)addShineToContext:(CGContextRef) context{
CGContextSaveGState(context);
size_t num_locations = 2;
CGFloat locations[2] = { 0.3, 0.7};
CGFloat components[8] = {1.0, 1.0, 1.0, 0.8, 1.0, 1.0, 1.0, 0.0};//{0.82, 0.82, 0.82, 0.4, 0.92, 0.92, 0.92, .8 };
CGColorSpaceRef cspace;
CGGradientRef gradient;
cspace = CGColorSpaceCreateDeviceRGB();
gradient = CGGradientCreateWithColorComponents (cspace, components, locations, num_locations);
CGPoint sPoint = CGPointMake(25.0f, 15.0);
CGPoint ePoint = CGPointMake(35.0f, 44.0f);
[self addShineClip:context];
CGContextDrawLinearGradient(context, gradient, sPoint, ePoint, kCGGradientDrawsBeforeStartLocation);
// CGContextSetFillColorWithColor(context, [[UIColor redColor] CGColor]);
// CGContextFillRect(context, CGRectMake(15,15, 30, 30));
CGColorSpaceRelease(cspace);
CGGradientRelease(gradient);
CGContextRestoreGState(context);
}
-(void)addShineClip:(CGContextRef)context{
CGContextMoveToPoint(context, 15, 35);
CGContextAddQuadCurveToPoint(context, 25, 30, 45, 28);
CGContextAddLineToPoint(context, 45, 15);
CGContextAddLineToPoint(context, 15, 15);
CGContextClosePath(context);
CGContextClip(context);
}
- (void)viewDidLoad
{
[super viewDidLoad];
// Do any additional setup after loading the view, typically from a nib.
self.imageView1.image = [self compositeOverSlate:[self drawTabBarOverSourceWithBlend:kCGBlendModeSourceIn]];
self.imageView2.image = [self compositeOverSlate:[self drawTabBarOverSourceWithBlend:kCGBlendModeDestinationIn]];
self.imageView3.image = [self compositeOverSlate:[self drawTabBarOverSourceWithBlend:kCGBlendModeSourceAtop]];
self.imageView4.image = [self compositeOverSlate:[self drawTabBarOverSourceWithBlend:kCGBlendModeDestinationAtop]];
}
-(UIImage *)compositeOverSlate:(UIImage *)image{
UIGraphicsBeginImageContext(image.size);
CGContextRef ctx = UIGraphicsGetCurrentContext();
CGRect imageRect = CGRectMake(0, 0, 0, 0);
imageRect.size = image.size;
CGContextSetFillColorWithColor(ctx, [[UIColor darkGrayColor] CGColor]);
CGContextFillRect(ctx, imageRect);
CGContextSetShadow(ctx, CGSizeMake(-1.0, 2.0), .5);
CGContextDrawImage(ctx, imageRect, [image CGImage]);
UIImage *result = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return result;
}
-(UIImage *)drawTabBarOverSourceWithBlend:(CGBlendMode)blendMode{
UIGraphicsBeginImageContext(CGSizeMake(60,60));
CGContextRef ctx = UIGraphicsGetCurrentContext();
CGContextDrawImage(ctx, CGRectMake(0, 0, 60.0, 60.0), [[self sourceImage] CGImage]);
CGContextSetBlendMode(ctx, blendMode);
CGContextDrawImage(ctx, CGRectMake(0, 0, 60.0, 60.0), [[self tabBarImage] CGImage]);
UIImage *result = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return result;
}
- (void)didReceiveMemoryWarning
{
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
but I don't have the border outline cracked yet, but will update if I do crack it.