I use CoreImage to detect the face. I want to crop the face after face detection. I use this snippet to detect face:
-(void)markFaces:(UIImageView *)facePicture{
CIImage* image = [CIImage imageWithCGImage:imageView.image.CGImage];
CIDetector* detector = [CIDetector detectorOfType:CIDetectorTypeFace
context:nil options:[NSDictionary dictionaryWithObject:CIDetectorAccuracyHigh forKey:CIDetectorAccuracy]];
NSArray* features = [detector featuresInImage:image];
CGAffineTransform transform = CGAffineTransformMakeScale(1, -1);
transform = CGAffineTransformTranslate(transform, 0, -imageView.bounds.size.height);
for(CIFaceFeature* faceFeature in features)
{
// Get the face rect: Translate CoreImage coordinates to UIKit coordinates
const CGRect faceRect = CGRectApplyAffineTransform(faceFeature.bounds, transform);
faceView = [[UIView alloc] initWithFrame:faceRect];
faceView.layer.borderWidth = 1;
faceView.layer.borderColor = [[UIColor redColor] CGColor];
UIGraphicsBeginImageContext(faceView.bounds.size);
[faceView.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage *viewImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
//Blur the UIImage with a CIFilter
CIImage *imageToBlur = [CIImage imageWithCGImage:viewImage.CGImage];
CIFilter *gaussianBlurFilter = [CIFilter filterWithName: #"CIGaussianBlur"];
[gaussianBlurFilter setValue:imageToBlur forKey: #"inputImage"];
[gaussianBlurFilter setValue:[NSNumber numberWithFloat: 10] forKey: #"inputRadius"];
CIImage *resultImage = [gaussianBlurFilter valueForKey: #"outputImage"];
UIImage *endImage = [[UIImage alloc] initWithCIImage:resultImage];
//Place the UIImage in a UIImageView
UIImageView *newView = [[UIImageView alloc] initWithFrame:self.view.bounds];
newView.image = endImage;
[self.view addSubview:newView];
CGFloat faceWidth = faceFeature.bounds.size.width;
[imageView addSubview:faceView];
// LEFT EYE
if(faceFeature.hasLeftEyePosition)
{
const CGPoint leftEyePos = CGPointApplyAffineTransform(faceFeature.leftEyePosition, transform);
UIView *leftEyeView = [[UIView alloc] initWithFrame:CGRectMake(leftEyePos.x - faceWidth*EYE_SIZE_RATE*0.5f,
leftEyePos.y - faceWidth*EYE_SIZE_RATE*0.5f
,faceWidth*EYE_SIZE_RATE,
faceWidth*EYE_SIZE_RATE)];
NSLog(#"Left Eye X = %0.1f Y = %0.1f Width = %0.1f Height = %0.1f",leftEyePos.x - faceWidth*EYE_SIZE_RATE*0.5f,
leftEyePos.y - faceWidth*EYE_SIZE_RATE*0.5f,faceWidth*EYE_SIZE_RATE,
faceWidth*EYE_SIZE_RATE);
leftEyeView.backgroundColor = [[UIColor magentaColor] colorWithAlphaComponent:0.3];
leftEyeView.layer.cornerRadius = faceWidth*EYE_SIZE_RATE*0.5;
[imageView addSubview:leftEyeView];
}
// RIGHT EYE
if(faceFeature.hasRightEyePosition)
{
const CGPoint rightEyePos = CGPointApplyAffineTransform(faceFeature.rightEyePosition, transform);
UIView *rightEye = [[UIView alloc] initWithFrame:CGRectMake(rightEyePos.x - faceWidth*EYE_SIZE_RATE*0.5,
rightEyePos.y - faceWidth*EYE_SIZE_RATE*0.5,
faceWidth*EYE_SIZE_RATE,
faceWidth*EYE_SIZE_RATE)];
NSLog(#"Right Eye X = %0.1f Y = %0.1f Width = %0.1f Height = %0.1f",rightEyePos.x - faceWidth*EYE_SIZE_RATE*0.5f,
rightEyePos.y - faceWidth*EYE_SIZE_RATE*0.5f,faceWidth*EYE_SIZE_RATE,
faceWidth*EYE_SIZE_RATE);
rightEye.backgroundColor = [[UIColor blueColor] colorWithAlphaComponent:0.2];
rightEye.layer.cornerRadius = faceWidth*EYE_SIZE_RATE*0.5;
[imageView addSubview:rightEye];
}
// MOUTH
if(faceFeature.hasMouthPosition)
{
const CGPoint mouthPos = CGPointApplyAffineTransform(faceFeature.mouthPosition, transform);
UIView* mouth = [[UIView alloc] initWithFrame:CGRectMake(mouthPos.x - faceWidth*MOUTH_SIZE_RATE*0.5,
mouthPos.y - faceWidth*MOUTH_SIZE_RATE*0.5,
faceWidth*MOUTH_SIZE_RATE,
faceWidth*MOUTH_SIZE_RATE)];
NSLog(#"Mouth X = %0.1f Y = %0.1f Width = %0.1f Height = %0.1f",mouthPos.x - faceWidth*MOUTH_SIZE_RATE*0.5f,
mouthPos.y - faceWidth*MOUTH_SIZE_RATE*0.5f,faceWidth*MOUTH_SIZE_RATE,
faceWidth*MOUTH_SIZE_RATE);
mouth.backgroundColor = [[UIColor greenColor] colorWithAlphaComponent:0.3];
mouth.layer.cornerRadius = faceWidth*MOUTH_SIZE_RATE*0.5;
[imageView addSubview:mouth];
}
}
}
What I want is just crop the face.
You can easily crop the face using this function.it is tested and working properly.
-(void)faceWithFrame:(CGRect)frame{
CGRect rect = frame;
CGImageRef imageRef = CGImageCreateWithImageInRect([self.imageView.image CGImage], rect);
UIImage *cropedImage = [UIImage imageWithCGImage:imageRef];
self.cropedImg.image =cropedImage;
}
You just pass the face frame and the above function will give the crop face image.
Related
here i’m geting the image in Img but showing nil but when i print the size of the image or display image (Img) in imageview i m getting its size and image also in imageview then when i pass the size of the image(Img) in renderer, the issue is renderer is nil i tried to print the log also but its getting Null the help will be appreciated
MY Code:
UIImage* Img =_myimage;
UIImageView * imageview = [[UIImageView alloc]initWithImage:Img];
// renderer i am getting is nil in IOS 9.3
UIGraphicsImageRenderer * renderer = [[UIGraphicsImageRenderer alloc] initWithSize:_myimage.size];
// CGSize size=CGSizeMake(Img.size.width,Img.size.height)
NSLog(#"renderer %#",renderer);
UIBezierPath *Path;
Path = [UIBezierPath bezierPath];
Path.contractionFactor = 0.8;
NSValue *mvalue = [_pointsarray objectAtIndex:0];
CGPoint mp2 = [mvalue CGPointValue];
[Path moveToPoint:mp2];
[Path addBezierThroughPoints:_pointsarray];
[Path closePath];
Path.lineWidth = 2;
CAShapeLayer*shapeLayer = [CAShapeLayer new];
shapeLayer.path=Path.CGPath;
[shapeLayer setFillColor:[UIColor redColor].CGColor];
[shapeLayer fillColor];
UIImage shapeImage = [renderer imageWithActions:^(UIGraphicsImageRendererContext _Nonnull context){
[shapeLayer renderInContext: context.CGContext];}];
CIImage * shapeCimage = [[CIImage alloc] initWithImage:shapeImage];
CIFilter * gaussianBlurFilter = [CIFilter filterWithName: #"CIGaussianBlur"];
[gaussianBlurFilter setValue:shapeCimage forKey: #"inputImage"];
[gaussianBlurFilter setValue:#7 forKey:#"inputRadius"];
CIImage * blurredCIImage = [gaussianBlurFilter valueForKey:kCIOutputImageKey];
UIImage * blurredImage = [UIImage imageWithCIImage:blurredCIImage];
[self.view addSubview:imageview];
How can I call this function?
I am trying to call this function from -viewDidLoad.
I tried [circularImageWithImage(imageView.image, myclor, 0.2)];
static UIImage *circularImageWithImage(UIImage *inputImage,
UIColor *borderColor,
CGFloat borderWidth)
{
CGRect rect = (CGRect){ .origin=CGPointZero, .size=inputImage.size };
UIGraphicsBeginImageContextWithOptions(rect.size, NO, inputImage.scale); {
// Fill the entire circle with the border color.
[borderColor setFill];
[[UIBezierPath bezierPathWithOvalInRect:rect] fill];
// Clip to the interior of the circle (inside the border).
CGRect interiorBox = CGRectInset(rect, borderWidth, borderWidth);
UIBezierPath *interior = [UIBezierPath bezierPathWithOvalInRect:interiorBox];
[interior addClip];
[inputImage drawInRect:rect];
}
UIImage *outputImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return outputImage;
}
You can try with this code.
UIImage *image = [UIImage imageNamed:#"yourImage.png"];
UIImageView *imageView = [[UIImageView alloc]initWithFrame:CGRectMake(0, 0, 100, 100)];//set your frame
//imageView.center = self.view.center;
UIImage *modifiedImage = circularImageWithImage(image, [UIColor redColor], 1.2);//border width >= 1.0 is better. Otherwise you may not see this
imageView.image = modifiedImage;
[self.view addSubview:imageView];
UIImage *sample = circularImageWithImage(imageView.image, myclor, 0.2);
how to use SVGKImage change SVG fill color?
SVGKImage *svgImage = [SVGKImage imageNamed:#"card.svg"];
svgImage.size = self.bounds.size;
SVGKLayeredImageView *imageLayer = [[SVGKLayeredImageView alloc] initWithSVGKImage:svgImage];
SVGKLayer *layer = (SVGKLayer *)imageLayer.layer;
self.layer.mask = layer;
Here, I find a easy way to get the CAShapeLayer
SVGKImage *svgImage = [SVGKImage imageNamed:#"test.svg"];
CAShapeLayer *thisLayer = svgImage.CALayerTree;
//thisLayer has the UIBezierPath info in test.svg,so,you can do anything with it.
You have to use the CALayer sub-layers to changing the color :
SVGKImage *svgImage = [SVGKImage imageNamed:#"Anchor.svg"];
SVGKLayeredImageView *svgImageView = [[SVGKLayeredImageView alloc] initWithSVGKImage:svgImage];
[capturedImageView addSubview:svgImageView];
CALayer* layer = svgImageView.layer;
for (CALayer *subLayer in layer.sublayers) {
DLog(#"%#", [subLayer class]);
for (CALayer *subSubLayer in subLayer.sublayers) {
DLog(#"%#", [subSubLayer class]);
for (CALayer *subSubSubLayer in subSubLayer.sublayers) {
DLog(#"%#", [subSubSubLayer class]);
if( [subSubSubLayer isKindOfClass:[CAShapeLayer class]]){
CAShapeLayer* shapeLayer = (CAShapeLayer*)subSubSubLayer;
shapeLayer.fillColor = [UIColor redColor].CGColor;
}
}
}
}
Hope this helps.
Source : https://github.com/SVGKit/SVGKit/issues/98
I know a method that can change the color of an UIImage.
So, we should get the UIImage from SVGKImage firstly.
UIImage *img = [SVGKImage imageNamed:#"imageName"].UIImage;
And then, we can define a method like this:
- (UIImage *)changeTintColorWithImage:(UIImage *)img color:(UIColor *)tintColor {
UIImage *imageIn = img;
CGRect rect = CGRectMake(0, 0, imageIn.size.width, imageIn.size.height);
CGImageAlphaInfo alphaInfo = CGImageGetAlphaInfo(imageIn.CGImage);
BOOL opaque = alphaInfo == kCGImageAlphaNoneSkipLast
|| alphaInfo == kCGImageAlphaNoneSkipFirst
|| alphaInfo == kCGImageAlphaNone;
UIGraphicsBeginImageContextWithOptions(imageIn.size, opaque, imageIn.scale);
CGContextRef context = UIGraphicsGetCurrentContext();
CGContextTranslateCTM(context, 0, imageIn.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
CGContextSetBlendMode(context, kCGBlendModeNormal);
CGContextClipToMask(context, rect, imageIn.CGImage);
CGContextSetFillColorWithColor(context, tintColor.CGColor);
CGContextFillRect(context, rect);
UIImage *imageOut = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return imageOut;
}
OK. this method can help us change the color of an image.It mainly use some APIs in CoreGraphics.
And I suggest you can create a category with UIImage, it will be convenient to use.
Now you can change the color of png image itself.
Objective C:
UIImage *img = [UIImage imageNamed:#"imagename"];
UIImage *tintedImage = [img imageWithRenderingMode:UIImageRenderingModeAlwaysTemplate];
UIImageView *imgVw = [[UIImageView alloc] initWithImage:tintedImage];
imgVw.frame = CGRectMake(0, 20, 100, 100);
imgVw.tintColor = [UIColor greenColor];
[self.view addSubview:imgVw];
SWIFT:
let img = UIImage(named: "imagename")
let tintedImage: UIImage? = img?.withRenderingMode(.alwaysTemplate)
let imgVw = UIImageView(image: tintedImage)
imgVw.frame = CGRect(x: 0, y: 20, width: 100, height: 100)
imgVw.tintColor = UIColor.green
view.addSubview(imgVw)
cheers!!!
How can i add circular, triangle, square image overlay to UIImagepickercontroller as per above pic
and when user presses camera button then only image inside circular or triangular or square must be captured.
I have used below code but its not giving proper result. kindly see below pic.
Which is totally reverse what i want to achieve.
-(void)openCamera {
self.pickerController = [[UIImagePickerController alloc] init];
self.pickerController.delegate = self;
if ([UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera])
{
[self.pickerController setSourceType:UIImagePickerControllerSourceTypeCamera];
} else {
UIAlertView *alertView = [[UIAlertView alloc]initWithTitle:#"Tatoo App" message:#"No Camera Available" delegate:nil cancelButtonTitle:#"Ok" otherButtonTitles:nil];
[alertView show];
}
self.pickerController.allowsEditing = NO;
self.pickerController.showsCameraControls = NO;
// creating overlayView
self.overlayView = [[UIView alloc] initWithFrame:CGRectMake(0, 0, self.pickerController.view.frame.size.width-20, self.pickerController.view.frame.size.height)];
// letting png transparency be
self.overlayView .backgroundColor = [UIColor colorWithRed:0/255 green:0/255 blue:0/255 alpha:0.5];
[self.overlayView .layer setOpaque:NO];
self.overlayView .opaque = NO;
/*
UIView *circleView = [[UIView alloc]initWithFrame:CGRectMake(self.pickerController.view.frame.size.width/4, self.pickerController.view.frame.size.width/4, self.pickerController.view.frame.size.width/2, self.pickerController.view.frame.size.height/4)];
circleView.backgroundColor = [UIColor greenColor];
circleView.layer.cornerRadius = circleView.frame.size.width / 2;
circleView.clipsToBounds = YES;
[self.overlayView addSubview:circleView];
[self.overlayView bringSubviewToFront:circleView];
*/
CAShapeLayer *maskLayer = [CAShapeLayer layer];
self.overlayView.layer.mask = maskLayer;
self.maskLayer = maskLayer;
// create shape layer for circle we'll draw on top of image (the boundary of the circle)
CAShapeLayer *circleLayer = [CAShapeLayer layer];
circleLayer.lineWidth = 3.0;
circleLayer.fillColor = [[UIColor clearColor] CGColor];
circleLayer.strokeColor = [[UIColor blackColor] CGColor];
[self.overlayView.layer addSublayer:circleLayer];
self.circleLayer = circleLayer;
// create circle path
[self updateCirclePathAtLocation:CGPointMake(self.view.bounds.size.width / 2.0, self.view.bounds.size.height / 2.0) radius:self.view.bounds.size.width * 0.30];
CGFloat scale = [[self.overlayView.window screen] scale];
CGFloat radius = self.circleRadius * scale;
CGPoint center = CGPointMake(self.circleCenter.x * scale, self.circleCenter.y * scale);
CGRect frame = CGRectMake(center.x - radius,
center.y - radius,
radius * 2.0,
radius * 2.0);
// temporarily remove the circleLayer
// CALayer *circleLayer = self.circleLayer;
// [self.circleLayer removeFromSuperlayer];
// render the clipped image
UIGraphicsBeginImageContextWithOptions(self.imageOverlay.frame.size, NO, 0.0);
CGContextRef context = UIGraphicsGetCurrentContext();
if ([self.overlayView respondsToSelector:#selector(drawViewHierarchyInRect:afterScreenUpdates:)])
{
// if iOS 7, just draw it
[self.overlayView drawViewHierarchyInRect:self.overlayView.bounds afterScreenUpdates:YES];
}
else
{
// if pre iOS 7, manually clip it
CGContextAddArc(context, self.circleCenter.x, self.circleCenter.y, self.circleRadius, 0, M_PI * 2.0, YES);
CGContextClip(context);
[self.overlayView.layer renderInContext:context];
}
// capture the image and close the context
UIImage *image = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
// add the circleLayer back
[self.overlayView.layer addSublayer:circleLayer];
// crop the image
CGImageRef imageRef = CGImageCreateWithImageInRect([image CGImage], frame);
UIImage *croppedImage = [UIImage imageWithCGImage:imageRef];
self.takePicture = [UIButton buttonWithType:UIButtonTypeRoundedRect];
self.takePicture.frame = CGRectMake(80.0, self.pickerController.view.frame.size.height-100, 150.0, 30.0);
self.takePicture.tag = 0;
[ self.takePicture setTitle:#"Take" forState:UIControlStateNormal];
[ self.takePicture addTarget:self action:#selector(takeSnapShot:) forControlEvents:UIControlEventTouchUpInside];
[self.overlayView addSubview: self.takePicture];
self.pickerController.cameraOverlayView = self.overlayView;
[self presentViewController:self.pickerController animated:YES completion:nil];
}
I am masking an UIImage in iOS. I want to remove that mask now. How will I achieve it. This is my code for masking the image :
UIColor *header1Color = [UIColor colorWithRed:0.286 green:0.286 blue:0.286 alpha:0.1];
UIImage *img = self.calorie_image.image;
// int width = img.size.width; //308
// int height = img.size.height; //67
UIGraphicsBeginImageContext(img.size);
CGContextRef context = UIGraphicsGetCurrentContext();
[header1Color setFill];
// translate/flip the graphics context (for transforming from CG* coords to UI* coords
CGContextTranslateCTM(context, 0, img.size.height);
CGContextScaleCTM(context, 1.0, -1.0);
// set the blend mode to color burn, and the original image
CGContextSetBlendMode(context, kCGBlendModeColorBurn);
CGRect rect = CGRectMake(0, 0, img.size.width, img.size.height);
CGContextDrawImage(context, rect, img.CGImage);
//calorie_value = 230;
int x = 200;
int y = 0;
int mwidth = 120;
int mheight = 67;
NSString *zone = #"";
NSArray *min = [[NSArray alloc] initWithObjects:#"0", #"1200", #"1800", #"2200", nil];
NSArray *max = [[NSArray alloc] initWithObjects:#"1200", #"1800", #"2200", #"3500", nil];
NSArray *x_values = [[NSArray alloc] initWithObjects:#"42", #"137", #"200", #"320", nil];
NSArray *mwidth_values = [[NSArray alloc] initWithObjects:#"278", #"183", #"120", #"0", nil];
NSArray *zones = [[NSArray alloc] initWithObjects:#"red", #"green", #"orange", #"red", nil];
for (int i=0; i < 4; i++) {
if(calorie_value >= [min[i] integerValue] && calorie_value <= [max[i] integerValue]) {
zone = zones[i];
x = [x_values[i] integerValue];
mwidth = [mwidth_values[i] integerValue];
break;
}
}
if([[DiabetaDbTransaction getToadyCalories] integerValue] > 0) {
CGContextClearRect(context, CGRectMake(x,y,mwidth,mheight));
}
// set a mask that matches the shape of the image, then draw (color burn) a colored rectangle
CGContextClipToMask(context, rect, img.CGImage);
CGContextAddRect(context, rect);
CGContextDrawPath(context,kCGPathFill);
// generate a new UIImage from the graphics context we drew onto
UIImage *coloredImg = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
self.calorie_image.image = coloredImg;
How about to store the orginal image as an ivar
and when you want to remove mask just :
#implementation ViewController {
UIImage *coloredImg_orginal;
}
Before you add mask to image set coloredImg_orginal.
coloredImg_orginal = self.calorie_image.image;
and in the function to remove mask just set orgial image instead of the mask image
self.calorie_image.image = coloredImg_orginal;
(sorry for my bad english)