How to apply scale when drawing and composing UIImage - ios

I have the following functions.
extension UIImage
{
var width: CGFloat
{
return size.width
}
var height: CGFloat
{
return size.height
}
private static func circularImage(diameter: CGFloat, color: UIColor) -> UIImage
{
UIGraphicsBeginImageContextWithOptions(CGSize(width: diameter, height: diameter), false, 0)
let context = UIGraphicsGetCurrentContext()!
context.saveGState()
let rect = CGRect(x: 0, y: 0, width: diameter, height: diameter)
context.setFillColor(color.cgColor)
context.fillEllipse(in: rect)
context.restoreGState()
let image = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
private func addCentered(image: UIImage, tintColor: UIColor) -> UIImage
{
let topImage = image.withTintColor(tintColor, renderingMode: .alwaysTemplate)
let bottomImage = self
UIGraphicsBeginImageContext(size)
let bottomRect = CGRect(x: 0, y: 0, width: bottomImage.width, height: bottomImage.height)
bottomImage.draw(in: bottomRect)
let topRect = CGRect(x: (bottomImage.width - topImage.width) / 2.0,
y: (bottomImage.height - topImage.height) / 2.0,
width: topImage.width,
height: topImage.height)
topImage.draw(in: topRect, blendMode: .normal, alpha: 1.0)
let mergedImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return mergedImage
}
}
They work fine, but how do I properly apply UIScreen.main.scale to support retina screens?
I've looked at what's been done here but can't figure it out yet.
Any ideas?

Accessing UIScreen.main.scale itself is a bit problematic, as you have to access it only from main thread (while you usually want to put a heavier image processing on a background thread). So I suggest one of these ways instead.
First of all, you can replace UIGraphicsBeginImageContext(size) with
UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
The last argument (0.0) is a scale, and based on docs "if you specify a value of 0.0, the scale factor is set to the scale factor of the device’s main screen."
If instead you want to retain original image's scale on resulting UIImage, you can do this: after topImage.draw, instead of getting the UIImage with UIGraphicsGetImageFromCurrentImageContext, get CGImage with
let cgImage = context.makeImage()
and then construct UIImage with the scale and orientation of the original image (as opposed to defaults)
let mergedImage = UIImage(
cgImage: cgImage,
scale: image.scale,
orientation: image.opientation)

Related

Using UIGraphicsImageRenderer to resize UIImage to fixed pixel size

func thumbImage(image: UIImage) -> UIImage {
let cgSize: CGSize = CGSize(width: 100, height: 100)
let thumb = UIGraphicsImageRenderer(size: cgSize)
return thumb.image { _ in
image.draw(in: CGRect(origin: .zero, size: cgSize))
}
}
The final image is 300x300.
I would like, not matter the iPhone screen resolution, to have the image to be 100x100 (it is a square image of course).
How modify this code to achieve this result?
(I'm open to alternate ways of achieving this)
func thumbImage(image: UIImage, pxWidth: Int, pxHeight:Int ) -> UIImage {
let cgSize: CGSize = CGSize(width: pxWidth, height: pxHeight)
let rect = CGRect(x: 0, y: 0, width: pxWidth, height: pxHeight)
UIGraphicsBeginImageContextWithOptions(cgSize, false, 1.0)
image.draw(in: rect)
let thumb = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
let compressedThumb = thumb!.jpegData(compressionQuality: 0.70)
return UIImage(data: compressedThumb!)!
}
This alternative with UIGraphicsBeginImageContextWithOptions works and keeps the code as short as the initial one. (I also added some compression and conversion code).

Set UIImage color

I am on swift 4, and my goal is to change the color of a system UIImage in code. I declare the image with
let defaultImage = UIImage(systemName: "person.circle.fill")
But when I change its tint color with:
let grayDefaultImage = defaultImage?.withTintColor(.gray)
The grayDefaultImage still display the standard blue color. Next I use an extension
//MARK: -UIImage extension
extension UIImage {
/// #Todo: this blurs the image for some reason
func imageWithColor(color: UIColor) -> UIImage {
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
color.setFill()
let context = UIGraphicsGetCurrentContext()
context?.translateBy(x: 0, y: self.size.height)
context?.scaleBy(x: 1.0, y: -1.0)
context?.setBlendMode(CGBlendMode.normal)
let rect = CGRect(origin: .zero, size: CGSize(width: self.size.width, height: self.size.height))
context?.clip(to: rect, mask: self.cgImage!)
context?.fill(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
}
And the image renders in a very "pixelated" or "low res" form.
withTintColor(_:) say: The new image uses the same rendering mode as the original image.
So if original image's rendering mode is .alwaysTemplate, means image will ignoring its color information. you need set mode to .alwaysOriginal:
defaultImage?.withTintColor(.gray, renderingMode: .alwaysOriginal)

How to make image to watermarkimage in swift [duplicate]

I know there are several other ways to do this; I don't want to import anything that I don't need to. If someone can help me with his code, that would be great.
Currently, it is only saving the original image without the watermark image.
extension UIImage {
class func imageWithWatermark(image1: UIImageView, image2: UIImageView) -> UIImage {
UIGraphicsBeginImageContextWithOptions(image1.bounds.size, false, 0.0)
image2.layer.renderInContext(UIGraphicsGetCurrentContext()!)
image1.layer.renderInContext(UIGraphicsGetCurrentContext()!)
let img = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return img
}
}
func addWatermark() {
let newImage = UIImage.imageWithWatermark(imageView, image2: watermarkImageView)
UIImageWriteToSavedPhotosAlbum(newImage, nil, nil, nil)
}
EDIT: I've got the watermark appearing on the saved images.
I had to switch the order of the layers:
image1.layer.renderInContext(UIGraphicsGetCurrentContext()!)
image2.layer.renderInContext(UIGraphicsGetCurrentContext()!)
HOWEVER, it is not appearing in the correct place.It seems to always appear in the center of the image.
If you grab the UIImageViews' images you could use the following concept:
if let img = UIImage(named: "image.png"), img2 = UIImage(named: "watermark.png") {
let rect = CGRect(x: 0, y: 0, width: img.size.width, height: img.size.height)
UIGraphicsBeginImageContextWithOptions(img.size, true, 0)
let context = UIGraphicsGetCurrentContext()
CGContextSetFillColorWithColor(context, UIColor.whiteColor().CGColor)
CGContextFillRect(context, rect)
img.drawInRect(rect, blendMode: .Normal, alpha: 1)
img2.drawInRect(CGRectMake(x,y,width,height), blendMode: .Normal, alpha: 1)
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
UIImageWriteToSavedPhotosAlbum(result, nil, nil, nil)
}
SWIFT 4
Use this
let backgroundImage = imageData!
let watermarkImage = #imageLiteral(resourceName: "jodi_url_icon")
let size = backgroundImage.size
let scale = backgroundImage.scale
UIGraphicsBeginImageContextWithOptions(size, false, scale)
backgroundImage.draw(in: CGRect(x: 0.0, y: 0.0, width: size.width, height: size.height))
watermarkImage.draw(in: CGRect(x: 10, y: 10, width: size.width, height: size.height - 40))
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
Use result to UIImageView, tested.

Visible Artifacts Compositing two UIImages with UIGraphicsContext using Hard Light transfer mode

I am compositing two UIImages together using a UIGraphicsContext.
It works for the most part, but using the Hard Light CGBlendMode, I am getting strange artifacts that are not present if I use a Hard Light transfer mode in Photoshop.
I would try to use Core Image, but I need to be able to control the opacity of the top layer being transferred with Hard Light, and as far as I can tell that is not possible.
Here is the image created with a UIGraphicsContext:
Here it is using the same layers and opacity in Photoshop:
Here is the base image:
And the layer being composited with 60% opacity using Hard Light:
I have tried setting the context interpolation quality to high, using a transparency layer, but nothing has made any improvement.
My code is below. Does anyone know how to fix this, or alternative ways of achieving the same results that I am getting in Photoshop?
public extension UIImage {
public func compImagesWithImage(_ topImg: UIImage, blendMode: CGBlendMode, opacity: CGFloat) -> UIImage? {
let baseImg = self
if let cgImage = baseImg.cgImage {
let baseW = CGFloat(baseImg.cgImage!.width as size_t)
let baseH = CGFloat(baseImg.cgImage!.height as size_t)
UIGraphicsBeginImageContext(CGSize(width: baseW, height: baseH))
if let context = UIGraphicsGetCurrentContext() {
context.saveGState();
context.interpolationQuality = .high
let flipVertical = CGAffineTransform(a: 1, b: 0, c: 0, d: -1, tx: 0, ty: baseH)
context.concatenate(flipVertical)
context.setAlpha(1.0)
context.setBlendMode(CGBlendMode.normal)
context.draw(cgImage, in: CGRect(origin: CGPoint.zero, size: CGSize(width: CGFloat(baseW), height: CGFloat(baseH))))
context.setAlpha(opacity)
context.setBlendMode(blendMode)
context.beginTransparencyLayer(auxiliaryInfo: nil)
context.draw(topImg.cgImage!, in: CGRect(origin: CGPoint.zero, size: CGSize(width: CGFloat(baseW), height: CGFloat(baseH))))
context.endTransparencyLayer()
context.restoreGState();
let finalImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return finalImage
}
}
return nil
}
}
This fixed it: Higlight Artifacts When Using Hard Light Blend Mode
I just had to set the alpha of the base layer to 0.99.
Here is the updated code:
public func compImagesWithImage(_ topImg: UIImage, blendMode: CGBlendMode, opacity: CGFloat) -> UIImage? {
let baseImg = self
if let cgImage = baseImg.cgImage {
let baseW = CGFloat(baseImg.cgImage!.width as size_t)
let baseH = CGFloat(baseImg.cgImage!.height as size_t)
UIGraphicsBeginImageContext(CGSize(width: baseW, height: baseH))
if let context = UIGraphicsGetCurrentContext() {
context.saveGState();
context.interpolationQuality = .high
let flipVertical = CGAffineTransform(a: 1, b: 0, c: 0, d: -1, tx: 0, ty: baseH)
context.concatenate(flipVertical)
context.setAlpha(0.99)
context.setBlendMode(CGBlendMode.normal)
context.draw(cgImage, in: CGRect(origin: CGPoint.zero, size: CGSize(width: CGFloat(baseW), height: CGFloat(baseH))))
context.setAlpha(opacity)
context.setBlendMode(blendMode)
context.beginTransparencyLayer(auxiliaryInfo: nil)
context.draw(topImg.cgImage!, in: CGRect(origin: CGPoint.zero, size: CGSize(width: CGFloat(baseW), height: CGFloat(baseH))))
context.endTransparencyLayer()
context.restoreGState();
let finalImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return finalImage
}
}

Resize and Crop 2 Images affected the original image quality

Supposed that I have a UIImage's object on the UIViewController, and I want to set the image from the Controller. Basically what I want to do is, merging two images together, that the first image is the 5 star with blue color :
and the second image is the 5 star with grey color :
It's intended for rating image. Since the maximum rating is 5, then I have to multiply it by 20 to get 100 point to make the calculation easier. Please see code for more details logic.
So I have this (BM_RatingHelper.swift) :
static func getRatingImageBasedOnRating(rating: CGFloat, width: CGFloat, height: CGFloat) -> UIImage {
// available maximum rating is 5.0, so we have to multiply it by 20 to achieve 100.0 point
let ratingImageWidth = ( width / 100.0 ) * ( rating * 20.0 )
// get active rating image
let activeRatingImage = BM_ImageHelper.resize(UIImage(named: "StarRatingFullActive")!, targetSize: CGSize(width: width, height: height))
let activeRatingImageView = UIImageView(frame: CGRectMake(0, 0, ratingImageWidth, height));
activeRatingImageView.image = BM_ImageHelper.crop(activeRatingImage, x: 0, y: 0, width: ratingImageWidth, height: height);
// get inactive rating image
let inactiveRatingImage = BM_ImageHelper.resize(UIImage(named: "StarRatingFullInactive")!, targetSize: CGSize(width: width, height: height))
let inactiveRatingImageView = UIImageView(frame: CGRectMake(ratingImageWidth, 0, ( 100.0 - ratingImageWidth ), height));
inactiveRatingImageView.image = BM_ImageHelper.crop(inactiveRatingImage, x: ratingImageWidth, y: 0, width: ( 100.0 - ratingImageWidth ), height: height);
// combine the images
let ratingView = UIView.init(frame: CGRect(x: 0, y: 0, width: width, height: height))
ratingView.backgroundColor = BM_Color.colorForType(BM_ColorType.ColorWhiteTransparent)
ratingView.addSubview(activeRatingImageView)
ratingView.addSubview(inactiveRatingImageView)
return ratingView.capture()
}
The BM_ImageHelper.swift :
import UIKit
class BM_ImageHelper: NSObject {
// http://stackoverflow.com/questions/158914/cropping-an-uiimage
static func crop(image: UIImage, x: CGFloat, y: CGFloat, width: CGFloat, height: CGFloat) -> UIImage {
let rect = CGRect(x: x, y: y, width: width, height: height)
let imageRef = CGImageCreateWithImageInRect(image.CGImage, rect)!
let croppedImage = UIImage(CGImage: imageRef)
return croppedImage
}
// http://iosdevcenters.blogspot.com/2015/12/how-to-resize-image-in-swift-in-ios.html
static func resize(image: UIImage, targetSize: CGSize) -> UIImage {
let size = image.size
let widthRatio = targetSize.width / image.size.width
let heightRatio = targetSize.height / image.size.height
// Figure out what our orientation is, and use that to form the rectangle
var newSize: CGSize
if(widthRatio > heightRatio) {
newSize = CGSizeMake(size.width * heightRatio, size.height * heightRatio)
} else {
newSize = CGSizeMake(size.width * widthRatio, size.height * widthRatio)
}
// This is the rect that we've calculated out and this is what is actually used below
let rect = CGRectMake(0, 0, newSize.width, newSize.height)
// Actually do the resizing to the rect using the ImageContext stuff
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.drawInRect(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
extension UIView {
// http://stackoverflow.com/a/34895760/897733
func capture() -> UIImage {
UIGraphicsBeginImageContextWithOptions(self.frame.size, self.opaque, UIScreen.mainScreen().scale)
self.layer.renderInContext(UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
}
I call that function like (supposed that the image need to filled is ratingImage) :
self.ratingImage.image =
BM_RatingHelper.getRatingImageBasedOnRating(3.7, width: 100.0, height: 20.0)
The code works perfectly, but the merged image is so low in quality although I have use the high quality image. This is the image for 3.7 rating :
What should I do to merge the images without lose the original quality? Thanks.
In your BM_ImageHelper.resize method its giving the scale 1.0. It should be the devices's screens scale.
Change it to
UIGraphicsBeginImageContextWithOptions(newSize, false, UIScreen.mainScreen().scale)
UPDATE
Also change your crop method to address the scale, like
static func crop(image: UIImage, x: CGFloat, y: CGFloat, width: CGFloat, height: CGFloat) -> UIImage {
let transform = CGAffineTransformMakeScale(image.scale, image.scale)
let rect = CGRect(x: x, y: y, width: width, height: height)
let transformedCropRect = CGRectApplyAffineTransform(rect, transform);
let imageRef = CGImageCreateWithImageInRect(image.CGImage, transformedCropRect)!
let croppedImage = UIImage(CGImage: imageRef, scale: image.scale, orientation: image.imageOrientation)
return croppedImage
}

Resources