UIImage masking doesn't work (Swift, iOS 10) - ios

Trying to mask an image with my custom mask. I think I follow the ideas correctly, but for some reason, image isn't get masked. Instead, masked image, created after masking, contains original cropped image as the mask wasn't applied.
Here's the Swift playground code which one can use in order to test my code (image and mask are attached, just drop them to the resources folder):
import UIKit
extension UIImage {
static func resizeImage(image: UIImage, width: CGFloat) -> UIImage {
let scale = width / image.size.width
let newHeight = round(image.size.height * scale)
UIGraphicsBeginImageContextWithOptions(CGSize(width:width, height:newHeight), false, image.scale)
image.draw(in: CGRect(origin: CGPoint(x:0, y:0), size: CGSize(width: width, height: newHeight)))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
static func resizeImage(image: UIImage, height: CGFloat) -> UIImage {
let scale = height / image.size.height
let newWidth = round(image.size.width * scale)
UIGraphicsBeginImageContextWithOptions(CGSize(width:newWidth, height:height), false, image.scale)
image.draw(in: CGRect(origin: CGPoint(x:0, y:0), size: CGSize(width: newWidth, height: height)))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
}
let image = UIImage(named: "image.jpg")!
var mask = UIImage(named: "mask.jpg")!
let k1 = image.size.width / image.size.height
let k2 = mask.size.width / mask.size.height
if k1 >= k2
{
mask = UIImage.resizeImage(image: mask, height: image.size.height)
}
else
{
mask = UIImage.resizeImage(image: mask, width: image.size.width)
}
image
mask
let center = CGPoint(x: image.size.width/2, y: image.size.height/2)
let croppingRect = CGRect(x: abs(image.size.width-mask.size.width)/2*image.scale,
y: abs(image.size.height-mask.size.height)/2*image.scale,
width: mask.size.width*image.scale,
height: mask.size.height*image.scale).integral
let maskReference = mask.cgImage!
let imageReference = image.cgImage!.cropping(to: croppingRect)!
let imageMask = CGImage(maskWidth: maskReference.width,
height: maskReference.height,
bitsPerComponent: maskReference.bitsPerComponent,
bitsPerPixel: maskReference.bitsPerPixel,
bytesPerRow: maskReference.bytesPerRow,
provider: maskReference.dataProvider!, decode: nil, shouldInterpolate: true)
imageMask?.colorSpace
imageMask?.alphaInfo
let maskedReference = imageReference.masking(imageMask!)
let maskedImage = UIImage(cgImage:maskedReference!, scale: image.scale, orientation: image.imageOrientation)

Swift 4+
let icon = UIImageView(image: YOURIMAGE)
icon.frame = CGRect(x:100, y: 100, width: 100, height: 100)
icon.layer.masksToBounds = true
let maskView = UIImageView()
maskView.image = YOURMASKIMAGE
maskView.frame = icon.bounds
icon.mask = maskView
icon.contentMode = .scaleToFill
icon.clipsToBounds = true
view.addSubview(icon)

Related

Saving a UIImage cropped to a CGPath

I am trying to mask a UIImage and then save the masked image. So far, I have got this working when displayed in a UIImageView preview as follows:
let maskLayer = CAShapeLayer()
let maskPath = shape.cgPath
maskLayer.path = maskPath.resized(to: imageView.frame)
maskLayer.fillRule = .evenOdd
imageView.layer.mask = maskLayer
let picture = UIImage(named: "1")!
imageView.contentMode = .scaleAspectFit
imageView.image = picture
where shape is a UIBezierPath().
The resized function is:
extension CGPath {
func resized(to rect: CGRect) -> CGPath {
let boundingBox = self.boundingBox
let boundingBoxAspectRatio = boundingBox.width / boundingBox.height
let viewAspectRatio = rect.width / rect.height
let scaleFactor = boundingBoxAspectRatio > viewAspectRatio ?
rect.width / boundingBox.width :
rect.height / boundingBox.height
let scaledSize = boundingBox.size.applying(CGAffineTransform(scaleX: scaleFactor, y: scaleFactor))
let centerOffset = CGSize(
width: (rect.width - scaledSize.width) / (scaleFactor * 2),
height: (rect.height - scaledSize.height) / (scaleFactor * 2)
)
var transform = CGAffineTransform.identity
.scaledBy(x: scaleFactor, y: scaleFactor)
.translatedBy(x: -boundingBox.minX + centerOffset.width, y: -boundingBox.minY + centerOffset.height)
return copy(using: &transform)!
}
}
So this works in terms of previewing the outcome I'd like. I'd now like to save this modified UIImage to the user's photo album, in it's original size (so basically generate it again but don't resize the image to fit a UIImageView - keep it as-is and apply the mask over it).
I have tried this, but it just saves the original image - no path/mask applied:
func getMaskedImage(path: CGPath) {
let picture = UIImage(named: "1")!
UIGraphicsBeginImageContext(picture.size)
if let context = UIGraphicsGetCurrentContext() {
let pathNew = path.resized(to: CGRect(x: 0, y: 0, width: picture.size.width, height: picture.size.height))
context.addPath(pathNew)
context.clip()
picture.draw(in: CGRect(x: 0.0, y: 0.0, width: picture.size.width, height: picture.size.height))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
UIImageWriteToSavedPhotosAlbum(newImage!, nil, nil, nil)
}
}
What am I doing wrong? Thanks.
Don't throw away your layer-based approach! You can still use that when drawing in a graphics context.
Example:
func getMaskedImage(path: CGPath) -> UIImage? {
let picture = UIImage(named: "my_image")!
let imageLayer = CALayer()
imageLayer.frame = CGRect(origin: .zero, size: picture.size)
imageLayer.contents = picture.cgImage
let maskLayer = CAShapeLayer()
let maskPath = path.resized(to: CGRect(origin: .zero, size: picture.size))
maskLayer.path = maskPath
maskLayer.fillRule = .evenOdd
imageLayer.mask = maskLayer
UIGraphicsBeginImageContext(picture.size)
defer { UIGraphicsEndImageContext() }
if let context = UIGraphicsGetCurrentContext() {
imageLayer.render(in: context)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
return newImage
}
return nil
}
Note that this doesn't actually resize the image. If you want the image resized, you should get the boundingBox of the resized path. Then do this instead:
// create a context as big as the bounding box
UIGraphicsBeginImageContext(boundingBox.size)
defer { UIGraphicsEndImageContext() }
if let context = UIGraphicsGetCurrentContext() {
// move the context to the top left of the path
context.translateBy(x: -boundingBox.origin.x, y: -boundingBox.origin.y)
imageLayer.render(in: context)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
return newImage
}

Swift: CGImage.masking returns nil

Why does CGImage.masking method may return nil? I can't find any documentation on this. My code for some images returns nil:
extension UIImage {
func maskedImage(mask:(UIImage))->UIImage{
let croppingRect = CGRect(x: abs(self.size.width-mask.size.width)/2*self.scale,
y: abs(self.size.height-mask.size.height)/2*self.scale,
width: mask.size.width*self.scale,
height: mask.size.height*self.scale).integral
let maskReference = mask.cgImage!
let imageReference = self.cgImage!.cropping(to: croppingRect)!
let imageMask = CGImage(maskWidth: maskReference.width,
height: maskReference.height,
bitsPerComponent: maskReference.bitsPerComponent,
bitsPerPixel: maskReference.bitsPerPixel,
bytesPerRow: maskReference.bytesPerRow,
provider: maskReference.dataProvider!, decode: nil, shouldInterpolate: true)
let maskedReference = imageReference.masking(imageMask!)
// maskedReference is nil so the next line crashes
let maskedImage = UIImage(cgImage:maskedReference!, scale: self.scale, orientation: self.imageOrientation)
return maskedImage
}
}
UPDATE
This code in for playground wit exact same image and mask does not crash. Though it doesn't mask image properly:
import UIKit
extension UIImage {
static func resizeImage(image: UIImage, width: CGFloat) -> UIImage {
let scale = width / image.size.width
let newHeight = round(image.size.height * scale)
UIGraphicsBeginImageContextWithOptions(CGSize(width:width, height:newHeight), false, image.scale)
image.draw(in: CGRect(origin: CGPoint(x:0, y:0), size: CGSize(width: width, height: newHeight)))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
static func resizeImage(image: UIImage, height: CGFloat) -> UIImage {
let scale = height / image.size.height
let newWidth = round(image.size.width * scale)
UIGraphicsBeginImageContextWithOptions(CGSize(width:newWidth, height:height), false, image.scale)
image.draw(in: CGRect(origin: CGPoint(x:0, y:0), size: CGSize(width: newWidth, height: height)))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
}
let image = UIImage(named: "image.jpg")!
var mask = UIImage(named: "mask.jpg")!
let k1 = image.size.width / image.size.height
let k2 = mask.size.width / mask.size.height
if k1 >= k2
{
mask = UIImage.resizeImage(image: mask, height: image.size.height)
}
else
{
mask = UIImage.resizeImage(image: mask, width: image.size.width)
}
image
mask
let center = CGPoint(x: image.size.width/2, y: image.size.height/2)
let croppingRect = CGRect(x: abs(image.size.width-mask.size.width)/2*image.scale,
y: abs(image.size.height-mask.size.height)/2*image.scale,
width: mask.size.width*image.scale,
height: mask.size.height*image.scale).integral
let maskReference = mask.cgImage!
let imageReference = image.cgImage!.cropping(to: croppingRect)!
let imageMask = CGImage(maskWidth: maskReference.width,
height: maskReference.height,
bitsPerComponent: maskReference.bitsPerComponent,
bitsPerPixel: maskReference.bitsPerPixel,
bytesPerRow: maskReference.bytesPerRow,
provider: maskReference.dataProvider!, decode: nil, shouldInterpolate: true)
imageMask?.colorSpace
imageMask?.alphaInfo
let maskedReference = imageReference.masking(imageMask!)
let maskedImage = UIImage(cgImage:maskedReference!, scale: image.scale, orientation: image.imageOrientation)
image:
mask:
The only difference between playground code and real code is that the image is being fetched from the network and initialized like UIImage(data: imageData, scale: UIScreen.main.scale).

Resize and Crop 2 Images affected the original image quality

Supposed that I have a UIImage's object on the UIViewController, and I want to set the image from the Controller. Basically what I want to do is, merging two images together, that the first image is the 5 star with blue color :
and the second image is the 5 star with grey color :
It's intended for rating image. Since the maximum rating is 5, then I have to multiply it by 20 to get 100 point to make the calculation easier. Please see code for more details logic.
So I have this (BM_RatingHelper.swift) :
static func getRatingImageBasedOnRating(rating: CGFloat, width: CGFloat, height: CGFloat) -> UIImage {
// available maximum rating is 5.0, so we have to multiply it by 20 to achieve 100.0 point
let ratingImageWidth = ( width / 100.0 ) * ( rating * 20.0 )
// get active rating image
let activeRatingImage = BM_ImageHelper.resize(UIImage(named: "StarRatingFullActive")!, targetSize: CGSize(width: width, height: height))
let activeRatingImageView = UIImageView(frame: CGRectMake(0, 0, ratingImageWidth, height));
activeRatingImageView.image = BM_ImageHelper.crop(activeRatingImage, x: 0, y: 0, width: ratingImageWidth, height: height);
// get inactive rating image
let inactiveRatingImage = BM_ImageHelper.resize(UIImage(named: "StarRatingFullInactive")!, targetSize: CGSize(width: width, height: height))
let inactiveRatingImageView = UIImageView(frame: CGRectMake(ratingImageWidth, 0, ( 100.0 - ratingImageWidth ), height));
inactiveRatingImageView.image = BM_ImageHelper.crop(inactiveRatingImage, x: ratingImageWidth, y: 0, width: ( 100.0 - ratingImageWidth ), height: height);
// combine the images
let ratingView = UIView.init(frame: CGRect(x: 0, y: 0, width: width, height: height))
ratingView.backgroundColor = BM_Color.colorForType(BM_ColorType.ColorWhiteTransparent)
ratingView.addSubview(activeRatingImageView)
ratingView.addSubview(inactiveRatingImageView)
return ratingView.capture()
}
The BM_ImageHelper.swift :
import UIKit
class BM_ImageHelper: NSObject {
// http://stackoverflow.com/questions/158914/cropping-an-uiimage
static func crop(image: UIImage, x: CGFloat, y: CGFloat, width: CGFloat, height: CGFloat) -> UIImage {
let rect = CGRect(x: x, y: y, width: width, height: height)
let imageRef = CGImageCreateWithImageInRect(image.CGImage, rect)!
let croppedImage = UIImage(CGImage: imageRef)
return croppedImage
}
// http://iosdevcenters.blogspot.com/2015/12/how-to-resize-image-in-swift-in-ios.html
static func resize(image: UIImage, targetSize: CGSize) -> UIImage {
let size = image.size
let widthRatio = targetSize.width / image.size.width
let heightRatio = targetSize.height / image.size.height
// Figure out what our orientation is, and use that to form the rectangle
var newSize: CGSize
if(widthRatio > heightRatio) {
newSize = CGSizeMake(size.width * heightRatio, size.height * heightRatio)
} else {
newSize = CGSizeMake(size.width * widthRatio, size.height * widthRatio)
}
// This is the rect that we've calculated out and this is what is actually used below
let rect = CGRectMake(0, 0, newSize.width, newSize.height)
// Actually do the resizing to the rect using the ImageContext stuff
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.drawInRect(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
extension UIView {
// http://stackoverflow.com/a/34895760/897733
func capture() -> UIImage {
UIGraphicsBeginImageContextWithOptions(self.frame.size, self.opaque, UIScreen.mainScreen().scale)
self.layer.renderInContext(UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
}
I call that function like (supposed that the image need to filled is ratingImage) :
self.ratingImage.image =
BM_RatingHelper.getRatingImageBasedOnRating(3.7, width: 100.0, height: 20.0)
The code works perfectly, but the merged image is so low in quality although I have use the high quality image. This is the image for 3.7 rating :
What should I do to merge the images without lose the original quality? Thanks.
In your BM_ImageHelper.resize method its giving the scale 1.0. It should be the devices's screens scale.
Change it to
UIGraphicsBeginImageContextWithOptions(newSize, false, UIScreen.mainScreen().scale)
UPDATE
Also change your crop method to address the scale, like
static func crop(image: UIImage, x: CGFloat, y: CGFloat, width: CGFloat, height: CGFloat) -> UIImage {
let transform = CGAffineTransformMakeScale(image.scale, image.scale)
let rect = CGRect(x: x, y: y, width: width, height: height)
let transformedCropRect = CGRectApplyAffineTransform(rect, transform);
let imageRef = CGImageCreateWithImageInRect(image.CGImage, transformedCropRect)!
let croppedImage = UIImage(CGImage: imageRef, scale: image.scale, orientation: image.imageOrientation)
return croppedImage
}

How to merge two UIImages?

I am trying to merge two different images and create a new one. This is the way I would like to do:
I have this image (A):
It's a PNG image and I would like to merge this one with another image (B) which I took from the phone to create something like this:
I need a function who merge A with B creating C. The size must remain from the A image and the image B should auto adapt the size to fit into the polaroid (A). Is it possible to do that? Thank for your help!
UPDATE
Just one thing, the image (A) is a square and the image i took is a 16:9, how can i fix that?? If i use your function the image (B) that i took become stretched!
Hope this may help you,
var bottomImage = UIImage(named: "bottom.png")
var topImage = UIImage(named: "top.png")
var size = CGSize(width: 300, height: 300)
UIGraphicsBeginImageContext(size)
let areaSize = CGRect(x: 0, y: 0, width: size.width, height: size.height)
bottomImage!.draw(in: areaSize)
topImage!.draw(in: areaSize, blendMode: .normal, alpha: 0.8)
var newImage:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
All the Best :)
Swift 5: Extension for UIImage
extension UIImage {
func mergeWith(topImage: UIImage) -> UIImage {
let bottomImage = self
UIGraphicsBeginImageContext(size)
let areaSize = CGRect(x: 0, y: 0, width: bottomImage.size.width, height: bottomImage.size.height)
bottomImage.draw(in: areaSize)
topImage.draw(in: areaSize, blendMode: .normal, alpha: 1.0)
let mergedImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return mergedImage
}
}
Swift 4 UIImage extension that enables easy image merging / overlaying.
extension UIImage {
func overlayWith(image: UIImage, posX: CGFloat, posY: CGFloat) -> UIImage {
let newWidth = size.width < posX + image.size.width ? posX + image.size.width : size.width
let newHeight = size.height < posY + image.size.height ? posY + image.size.height : size.height
let newSize = CGSize(width: newWidth, height: newHeight)
UIGraphicsBeginImageContextWithOptions(newSize, false, 0.0)
draw(in: CGRect(origin: CGPoint.zero, size: size))
image.draw(in: CGRect(origin: CGPoint(x: posX, y: posY), size: image.size))
let newImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
}
This way the overlay picture will be much cleaner:
class func mergeImages(imageView: UIImageView) -> UIImage {
UIGraphicsBeginImageContextWithOptions(imageView.frame.size, false, 0.0)
imageView.superview!.layer.renderInContext(UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
Objective C version of this solution with top image re-centered logic :
-(UIImage *)getImageInclosedWithinAnotherImage
{
float innerImageSize = 20;
UIImage *finalImage;
UIImage *outerImage = [UIImage imageNamed:#"OuterImage.png"];
UIImage *innerImage = [UIImage imageNamed:#"InnerImage.png"];
CGSize outerImageSize = CGSizeMake(40, 40); // Provide custom size or size of your actual image
UIGraphicsBeginImageContext(outerImageSize);
//calculate areaSize for re-centered inner image
CGRect areSize = CGRectMake(((outerImageSize.width/2) - (innerImageSize/2)), ((outerImageSize.width/2) - (innerImageSize/2)), innerImageSize, innerImageSize);
[outerImage drawInRect:CGRectMake(0, 0, outerImageSize.width, outerImageSize.height)];
[innerImage drawInRect:areSize blendMode:kCGBlendModeNormal alpha:1.0];
finalImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return finalImage;
}
The upvoted answer stretches the background image changing its ratio. The solution below fixes that by rendering the image from a UIView that contains the two image views as subviews.
ANSWER YOU ARE LOOKING FOR (Swift 4):
func blendImages(_ img: UIImage,_ imgTwo: UIImage) -> Data? {
let bottomImage = img
let topImage = imgTwo
let imgView = UIImageView(frame: CGRect(x: 0, y: 0, width: 306, height: 306))
let imgView2 = UIImageView(frame: CGRect(x: 0, y: 0, width: 306, height: 306))
// - Set Content mode to what you desire
imgView.contentMode = .scaleAspectFill
imgView2.contentMode = .scaleAspectFit
// - Set Images
imgView.image = bottomImage
imgView2.image = topImage
// - Create UIView
let contentView = UIView(frame: CGRect(x: 0, y: 0, width: 306, height: 306))
contentView.addSubview(imgView)
contentView.addSubview(imgView2)
// - Set Size
let size = CGSize(width: 306, height: 306)
// - Where the magic happens
UIGraphicsBeginImageContextWithOptions(size, true, 0)
contentView.drawHierarchy(in: contentView.bounds, afterScreenUpdates: true)
guard let i = UIGraphicsGetImageFromCurrentImageContext(),
let data = UIImageJPEGRepresentation(i, 1.0)
else {return nil}
UIGraphicsEndImageContext()
return data
}
The returned image data doubles the size of the image, so set the size of the views at half the desired size.
EXAMPLE: I wanted the width and height of the image to be 612, so I set the view frames width and height to 306)
// Enjoy :)
Slightly modified version of answer by budidino. This implementation also handles negative posX and posY correctly.
extension UIImage {
func overlayWith(image: UIImage, posX: CGFloat, posY: CGFloat) -> UIImage {
let newWidth = posX < 0 ? abs(posX) + max(self.size.width, image.size.width) :
size.width < posX + image.size.width ? posX + image.size.width : size.width
let newHeight = posY < 0 ? abs(posY) + max(size.height, image.size.height) :
size.height < posY + image.size.height ? posY + image.size.height : size.height
let newSize = CGSize(width: newWidth, height: newHeight)
UIGraphicsBeginImageContextWithOptions(newSize, false, 0.0)
let originalPoint = CGPoint(x: posX < 0 ? abs(posX) : 0, y: posY < 0 ? abs(posY) : 0)
self.draw(in: CGRect(origin: originalPoint, size: self.size))
let overLayPoint = CGPoint(x: posX < 0 ? 0 : posX, y: posY < 0 ? 0 : posY)
image.draw(in: CGRect(origin: overLayPoint, size: image.size))
let newImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
}

How can I color a UIImage in Swift?

I have an image called arrowWhite. I want to colour this image to black.
func attachDropDownArrow() -> NSMutableAttributedString {
let image:UIImage = UIImage(named: "arrowWhite.png")!
let attachment = NSTextAttachment()
attachment.image = image
attachment.bounds = CGRectMake(2.25, 2, attachment.image!.size.width - 2.25, attachment.image!.size.height - 2.25)
let attachmentString = NSAttributedString(attachment: attachment)
let myString = NSMutableAttributedString(string: NSString(format: "%#", self.privacyOptions[selectedPickerRow]) as String)
myString.appendAttributedString(attachmentString)
return myString
}
I want to get this image in blackColour.
tintColor is not working...
Swift 4 and 5
extension UIImageView {
func setImageColor(color: UIColor) {
let templateImage = self.image?.withRenderingMode(.alwaysTemplate)
self.image = templateImage
self.tintColor = color
}
}
Call like this:
let imageView = UIImageView(image: UIImage(named: "your_image_name"))
imageView.setImageColor(color: UIColor.purple)
Alternativ
For Swift 3, 4 or 5
extension UIImage {
func maskWithColor(color: UIColor) -> UIImage? {
let maskImage = cgImage!
let width = size.width
let height = size.height
let bounds = CGRect(x: 0, y: 0, width: width, height: height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let context = CGContext(data: nil, width: Int(width), height: Int(height), bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)!
context.clip(to: bounds, mask: maskImage)
context.setFillColor(color.cgColor)
context.fill(bounds)
if let cgImage = context.makeImage() {
let coloredImage = UIImage(cgImage: cgImage)
return coloredImage
} else {
return nil
}
}
}
For Swift 2.3
extension UIImage {
func maskWithColor(color: UIColor) -> UIImage? {
let maskImage = self.CGImage
let width = self.size.width
let height = self.size.height
let bounds = CGRectMake(0, 0, width, height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedLast.rawValue)
let bitmapContext = CGBitmapContextCreate(nil, Int(width), Int(height), 8, 0, colorSpace, bitmapInfo.rawValue) //needs rawValue of bitmapInfo
CGContextClipToMask(bitmapContext, bounds, maskImage)
CGContextSetFillColorWithColor(bitmapContext, color.CGColor)
CGContextFillRect(bitmapContext, bounds)
//is it nil?
if let cImage = CGBitmapContextCreateImage(bitmapContext) {
let coloredImage = UIImage(CGImage: cImage)
return coloredImage
} else {
return nil
}
}
}
Call like this:
let image = UIImage(named: "your_image_name")
testImage.image = image?.maskWithColor(color: UIColor.blue)
There's a built in method to obtain a UIImage that is automatically rendered in template mode. This uses a view's tintColor to color the image:
let templateImage = originalImage.imageWithRenderingMode(UIImageRenderingModeAlwaysTemplate)
myImageView.image = templateImage
myImageView.tintColor = UIColor.orangeColor()
First you have to change the rendering property of the image to "Template Image" in the .xcassets folder.
You can then just change the tint color property of the instance of your UIImageView like so:
imageView.tintColor = UIColor.whiteColor()
I ended up with this because other answers either lose resolution or work with UIImageView, not UIImage, or contain unnecessary actions:
Swift 3
extension UIImage {
public func mask(with color: UIColor) -> UIImage {
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
let context = UIGraphicsGetCurrentContext()!
let rect = CGRect(origin: CGPoint.zero, size: size)
color.setFill()
self.draw(in: rect)
context.setBlendMode(.sourceIn)
context.fill(rect)
let resultImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return resultImage
}
}
This function uses core graphics to achieve this.
func overlayImage(color: UIColor) -> UIImage {
UIGraphicsBeginImageContextWithOptions(self.size, false, UIScreen.main.scale)
let context = UIGraphicsGetCurrentContext()
color.setFill()
context!.translateBy(x: 0, y: self.size.height)
context!.scaleBy(x: 1.0, y: -1.0)
context!.setBlendMode(CGBlendMode.colorBurn)
let rect = CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height)
context!.draw(self.cgImage!, in: rect)
context!.setBlendMode(CGBlendMode.sourceIn)
context!.addRect(rect)
context!.drawPath(using: CGPathDrawingMode.fill)
let coloredImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return coloredImage
}
For swift 4.2 to change UIImage color as you want (solid color)
extension UIImage {
func imageWithColor(color: UIColor) -> UIImage {
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
color.setFill()
let context = UIGraphicsGetCurrentContext()
context?.translateBy(x: 0, y: self.size.height)
context?.scaleBy(x: 1.0, y: -1.0)
context?.setBlendMode(CGBlendMode.normal)
let rect = CGRect(origin: .zero, size: CGSize(width: self.size.width, height: self.size.height))
context?.clip(to: rect, mask: self.cgImage!)
context?.fill(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
}
How to use
self.imgVw.image = UIImage(named: "testImage")?.imageWithColor(UIColor.red)
I found the solution by H R to be most helpful but adapted it slightly for Swift 3
extension UIImage {
func maskWithColor( color:UIColor) -> UIImage {
UIGraphicsBeginImageContextWithOptions(self.size, false, UIScreen.main.scale)
let context = UIGraphicsGetCurrentContext()!
color.setFill()
context.translateBy(x: 0, y: self.size.height)
context.scaleBy(x: 1.0, y: -1.0)
let rect = CGRect(x: 0.0, y: 0.0, width: self.size.width, height: self.size.height)
context.draw(self.cgImage!, in: rect)
context.setBlendMode(CGBlendMode.sourceIn)
context.addRect(rect)
context.drawPath(using: CGPathDrawingMode.fill)
let coloredImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return coloredImage!
}
}
This takes into consideration scale and also does not produce a lower res image like some other solutions.
Usage :
image = image.maskWithColor(color: .green )
Create an extension on UIImage:
/// UIImage Extensions
extension UIImage {
func maskWithColor(color: UIColor) -> UIImage {
var maskImage = self.CGImage
let width = self.size.width
let height = self.size.height
let bounds = CGRectMake(0, 0, width, height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue)
let bitmapContext = CGBitmapContextCreate(nil, Int(width), Int(height), 8, 0, colorSpace, bitmapInfo)
CGContextClipToMask(bitmapContext, bounds, maskImage)
CGContextSetFillColorWithColor(bitmapContext, color.CGColor)
CGContextFillRect(bitmapContext, bounds)
let cImage = CGBitmapContextCreateImage(bitmapContext)
let coloredImage = UIImage(CGImage: cImage)
return coloredImage!
}
}
Then you can use it like that:
image.maskWithColor(UIColor.redColor())
For iOS13+ there are withTintColor(__:) and withTintColor(_:renderingMode:) methods.
Example usage:
let newImage = oldImage.withTintColor(.red)
or
let newImage = oldImage.withTintColor(.red, renderingMode: .alwaysTemplate)
Swift 3 extension wrapper from #Nikolai Ruhe answer.
extension UIImageView {
func maskWith(color: UIColor) {
guard let tempImage = image?.withRenderingMode(.alwaysTemplate) else { return }
image = tempImage
tintColor = color
}
}
It can be use for UIButton as well, e.g:
button.imageView?.maskWith(color: .blue)
Add this extension in your code and change image color in storyboard itself.
Swift 4 & 5:
extension UIImageView {
#IBInspectable
var changeColor: UIColor? {
get {
let color = UIColor(cgColor: layer.borderColor!);
return color
}
set {
let templateImage = self.image?.withRenderingMode(.alwaysTemplate)
self.image = templateImage
self.tintColor = newValue
}
}
}
Storyboard Preview:
Swift 4
let image: UIImage? = #imageLiteral(resourceName: "logo-1").withRenderingMode(.alwaysTemplate)
topLogo.image = image
topLogo.tintColor = UIColor.white
Simpleminded way:
yourIcon.image = yourIcon.image?.withRenderingMode(.alwaysTemplate)
yourIcon.tintColor = .someColor
BTW it's more fun on Android!
yourIcon.setColorFilter(getColor(R.color.someColor), PorterDuff.Mode.MULTIPLY);
Add extension Function:
extension UIImageView {
func setImage(named: String, color: UIColor) {
self.image = #imageLiteral(resourceName: named).withRenderingMode(.alwaysTemplate)
self.tintColor = color
}
}
Use like:
anyImageView.setImage(named: "image_name", color: .red)
Post iOS 13 you can use it something like this
arrowWhiteImage.withTintColor(.black, renderingMode: .alwaysTemplate)
Swift 3
21 June 2017
I use CALayer to mask the given image with Alpha Channel
import Foundation
extension UIImage {
func maskWithColor(color: UIColor) -> UIImage? {
let maskLayer = CALayer()
maskLayer.bounds = CGRect(x: 0, y: 0, width: size.width, height: size.height)
maskLayer.backgroundColor = color.cgColor
maskLayer.doMask(by: self)
let maskImage = maskLayer.toImage()
return maskImage
}
}
extension CALayer {
func doMask(by imageMask: UIImage) {
let maskLayer = CAShapeLayer()
maskLayer.bounds = CGRect(x: 0, y: 0, width: imageMask.size.width, height: imageMask.size.height)
bounds = maskLayer.bounds
maskLayer.contents = imageMask.cgImage
maskLayer.frame = CGRect(x: 0, y: 0, width: frame.size.width, height: frame.size.height)
mask = maskLayer
}
func toImage() -> UIImage?
{
UIGraphicsBeginImageContextWithOptions(bounds.size,
isOpaque,
UIScreen.main.scale)
guard let context = UIGraphicsGetCurrentContext() else {
UIGraphicsEndImageContext()
return nil
}
render(in: context)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
}
Swift 3 version with scale and Orientation from #kuzdu answer
extension UIImage {
func mask(_ color: UIColor) -> UIImage? {
let maskImage = cgImage!
let width = (cgImage?.width)!
let height = (cgImage?.height)!
let bounds = CGRect(x: 0, y: 0, width: width, height: height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let context = CGContext(data: nil, width: Int(width), height: Int(height), bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)!
context.clip(to: bounds, mask: maskImage)
context.setFillColor(color.cgColor)
context.fill(bounds)
if let cgImage = context.makeImage() {
let coloredImage = UIImage.init(cgImage: cgImage, scale: scale, orientation: imageOrientation)
return coloredImage
} else {
return nil
}
}
}
Swift 4.
Use this extension to create a solid colored image
extension UIImage {
public func coloredImage(color: UIColor) -> UIImage? {
return coloredImage(color: color, size: CGSize(width: 1, height: 1))
}
public func coloredImage(color: UIColor, size: CGSize) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(size, false, 0)
color.setFill()
UIRectFill(CGRect(origin: CGPoint(), size: size))
guard let image = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
UIGraphicsEndImageContext()
return image
}
}
Here is swift 3 version of H R's solution.
func overlayImage(color: UIColor) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(self.size, false, UIScreen.main.scale)
let context = UIGraphicsGetCurrentContext()
color.setFill()
context!.translateBy(x: 0, y: self.size.height)
context!.scaleBy(x: 1.0, y: -1.0)
context!.setBlendMode(CGBlendMode.colorBurn)
let rect = CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height)
context!.draw(self.cgImage!, in: rect)
context!.setBlendMode(CGBlendMode.sourceIn)
context!.addRect(rect)
context!.drawPath(using: CGPathDrawingMode.fill)
let coloredImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return coloredImage
}
Since I found Darko's answer very helpful in colorizing custom pins for mapView annotations, but had to do some conversions for Swift 3, thought I'd share the updated code along with my recommendation for his answer:
extension UIImage {
func maskWithColor(color: UIColor) -> UIImage {
var maskImage = self.CGImage
let width = self.size.width
let height = self.size.height
let bounds = CGRect(x: 0, y: 0, width: width, height: height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let bitmapContext = CGContext(data: nil, width: Int(width), height: Int(height), bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)
bitmapContext!.clip(to: bounds, mask: maskImage!)
bitmapContext!.setFillColor(color.cgColor)
bitmapContext!.fill(bounds)
let cImage = bitmapContext!.makeImage()
let coloredImage = UIImage(CGImage: cImage)
return coloredImage!
}
}
I have modified the extension found here: Github Gist, for Swift 3 which I have tested in the context of an extension for UIImage.
func tint(with color: UIColor) -> UIImage
{
UIGraphicsBeginImageContext(self.size)
guard let context = UIGraphicsGetCurrentContext() else { return self }
// flip the image
context.scaleBy(x: 1.0, y: -1.0)
context.translateBy(x: 0.0, y: -self.size.height)
// multiply blend mode
context.setBlendMode(.multiply)
let rect = CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height)
context.clip(to: rect, mask: self.cgImage!)
color.setFill()
context.fill(rect)
// create UIImage
guard let newImage = UIGraphicsGetImageFromCurrentImageContext() else { return self }
UIGraphicsEndImageContext()
return newImage
}

Resources