iOS memory warning in Swift - ios

I user a timer firing a function repeat per 0.1 second to get a UIImage using UIGraphicsGetImageFromCurrentImageContext and assign to the a #IBOutllet weak UImageView.image.(coverImageView)
var timer:Timer?
func fireTimer() {
timer = Timer.scheduledTimer(withTimeInterval: 0.1, repeats: true, block: { [weak self] _ in
self?.lightTheFier()
})
}
func invalidateTimer() {
timer?.invalidate()
timer = nil
}
func lightTheFier() {
var points = [CGPoint]()
for pin in pins {
let point = mapView.convert(pin.coordinate, toPointTo: coverImageView)
points.append(point)
}
coverImageView.image = getMaskedImage(points: points, zoomLevel: Int(mapView.zoomLevel()))
}
func getOringinalImageFromPath() -> UIImage{
UIGraphicsBeginImageContextWithOptions(coverImageView.bounds.size, false, 0.0)
let path = UIBezierPath(rect: coverImageView.bounds)
UIColor.black.setFill()
path.fill()
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image!
}
func getMaskImageFromPath(points:[CGPoint], scale:CGFloat) -> UIImage {
UIGraphicsBeginImageContextWithOptions(coverImageView.bounds.size, false, 0.0)
let radius:CGFloat = 10.0
UIColor.blue.setFill()
for point in points {
let rect = CGRect(x: point.x - (scale*radius/2), y: point.y - (scale*radius/2), width: scale*radius, height: scale*radius)
let path = UIBezierPath(ovalIn: rect)
path.fill()
}
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image!
}
func getMaskedImage(points:[CGPoint], zoomLevel:Int) -> UIImage{
let orImage = getOringinalImageFromPath().cgImage
let maskImage = getMaskImageFromPath(points: points, scale: CGFloat(zoomLevel)).cgImage
let mask = CGImage(maskWidth: maskImage!.width, height: maskImage!.height, bitsPerComponent: maskImage!.bitsPerComponent, bitsPerPixel: maskImage!.bitsPerPixel, bytesPerRow: maskImage!.bytesPerRow, provider: maskImage!.dataProvider!, decode: nil, shouldInterpolate: true)
let maskRef = orImage?.masking(mask!)
let image = UIImage(cgImage: maskRef!)
return image
}
All the code above are in a ViewController.swift for test.
It's fine that the memory is increasing during the function is running, what I want is it can release the memory when I invalidate the timer.
How should I do to resolve this issue?

You're starting an image context: UIGraphicsBeginImageContextWithOptions, but you're never ending it. You need to add UIGraphicsEndImageContext() to your getMaskImageFromPath and getOringinalImageFromPath methods:
func getMaskImageFromPath(points:[CGPoint], scale:CGFloat) -> UIImage {
UIGraphicsBeginImageContextWithOptions(coverImageView.bounds.size, false, 0.0)
let radius:CGFloat = 10.0
UIColor.blue.setFill()
for point in points {
let rect = CGRect(x: point.x - (scale*radius/2), y: point.y - (scale*radius/2), width: scale*radius, height: scale*radius)
let path = UIBezierPath(ovalIn: rect)
path.fill()
}
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext() // Missing this line
return image!
}

Related

How do I set the line width of a UIBezierPath when drawing to a CGContext?

I'm trying to create a UIImage using a provided UIBezierPath. Unfortunately, no matter what I set setLineWidth to, the result is always a stroke of 1 point:
extension UIBezierPath {
func image(fillColor: UIColor, strokeColor: UIColor) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(bounds.size, false, 1.0)
guard let context = UIGraphicsGetCurrentContext() else {
return nil
}
context.setLineWidth(10)
context.setFillColor(fillColor.cgColor)
context.setStrokeColor(strokeColor.cgColor)
self.fill()
self.stroke()
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
}
Trying this out in a test project with a circle, for example:
class ViewController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
let imageView = UIImageView()
imageView.frame = CGRect(x: 100, y: 100, width: 100, height: 100)
view.addSubview(imageView)
let bezierPath = UIBezierPath(ovalIn: CGRect(x: 0, y: 0, width: 100, height: 100))
let image = bezierPath.image(fillColor: UIColor.blue, strokeColor: UIColor.red)
imageView.image = image
}
}
No matter what I set setLineWidth to, it always appears to be 1 point.
You are calling stroke on the UIBezierPath, so you need to set the lineWidth property of that using self.lineWidth = 10.
extension UIBezierPath {
func image(fillColor: UIColor, strokeColor: UIColor) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(bounds.size, false, 1.0)
guard let context = UIGraphicsGetCurrentContext() else {
return nil
}
context.setFillColor(fillColor.cgColor)
self.lineWidth = 10
context.setStrokeColor(strokeColor.cgColor)
self.fill()
self.stroke()
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
}

how to add colored border to uiimage in swift

It is pretty easy to add border to UIImageView, using layers (borderWidth, borderColor etc.). Is there any possibility to add border to image, not to image view? Does somebody know?
Update:
I tried to follow the suggestion below und used extension. Thank you for that but I did not get the desired result. Here is my code. What is wrong?
import UIKit
class ViewController: UIViewController {
var imageView: UIImageView!
var sizeW = CGFloat()
var sizeH = CGFloat()
override func viewDidLoad() {
super.viewDidLoad()
sizeW = view.frame.width
sizeH = view.frame.height
setImage()
}
func setImage(){
//add image view
imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: sizeW/2, height: sizeH/2))
imageView.center = view.center
imageView.tintColor = UIColor.orange
imageView.contentMode = UIViewContentMode.scaleAspectFit
let imgOriginal = UIImage(named: "plum")!.withRenderingMode(.alwaysTemplate)
let borderImage = imgOriginal.imageWithBorder(width: 2, color: UIColor.blue)
imageView.image = borderImage
view.addSubview(imageView)
}
}
extension UIImage {
func imageWithBorder(width: CGFloat, color: UIColor) -> UIImage? {
let square = CGSize(width: min(size.width, size.height) + width * 2, height: min(size.width, size.height) + width * 2)
let imageView = UIImageView(frame: CGRect(origin: CGPoint(x: 0, y: 0), size: square))
imageView.contentMode = .center
imageView.image = self
imageView.layer.borderWidth = width
imageView.layer.borderColor = color.cgColor
UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
imageView.layer.render(in: context)
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return result
}
}
The second image with the red border is more or less what I need:
Strongly inspired by #herme5, refactored into more compact Swift 5/iOS12+ code as follows (fixed vertical flip issue as well):
public extension UIImage {
/**
Returns the flat colorized version of the image, or self when something was wrong
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- Returns: the flat colorized version of the image, or the self if something was wrong
*/
func colorized(with color: UIColor = .white) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer {
UIGraphicsEndImageContext()
}
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
color.setFill()
context.translateBy(x: 0, y: size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.clip(to: rect, mask: cgImage)
context.fill(rect)
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return colored
}
/**
Returns the stroked version of the fransparent image with the given stroke color and the thickness.
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- thickness: the thickness of the border. Default to `2`
- quality: The number of degrees (out of 360): the smaller the best, but the slower. Defaults to `10`.
- Returns: the stroked version of the image, or self if something was wrong
*/
func stroked(with color: UIColor = .white, thickness: CGFloat = 2, quality: CGFloat = 10) -> UIImage {
guard let cgImage = cgImage else { return self }
// Colorize the stroke image to reflect border color
let strokeImage = colorized(with: color)
guard let strokeCGImage = strokeImage.cgImage else { return self }
/// Rendering quality of the stroke
let step = quality == 0 ? 10 : abs(quality)
let oldRect = CGRect(x: thickness, y: thickness, width: size.width, height: size.height).integral
let newSize = CGSize(width: size.width + 2 * thickness, height: size.height + 2 * thickness)
let translationVector = CGPoint(x: thickness, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return self }
defer {
UIGraphicsEndImageContext()
}
context.translateBy(x: 0, y: newSize.height)
context.scaleBy(x: 1.0, y: -1.0)
context.interpolationQuality = .high
for angle: CGFloat in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: angle)
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeCGImage, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(cgImage, in: oldRect)
guard let stroked = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return stroked
}
}
extension CGPoint {
/**
Rotates the point from the center `origin` by `byDegrees` degrees along the Z axis.
- Parameters:
- origin: The center of he rotation;
- byDegrees: Amount of degrees to rotate around the Z axis.
- Returns: The rotated point.
*/
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = x - origin.x
let dy = y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + byDegrees * .pi / 180.0 // to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
Here is a UIImage extension I wrote in Swift 4. As IOSDealBreaker said this is all about image processing, and some particular cases may occur. You should have a png image with a transparent background, and manage the size if larger than the original.
First get a colorised "shade" version of your image.
Then draw and redraw the shade image all around a given origin point (In our case around (0,0) at a distance that is the border thickness)
Draw your source image at the origin point so that it appears on the foreground.
You may have to enlarge your image if the borders go out of the original rect.
My method uses a lot of util methods and class extensions. Here is some maths to rotate a vector (which is actually a point) around another point: Rotating a CGPoint around another CGPoint
extension CGPoint {
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = self.x - origin.x
let dy = self.y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + (byDegrees * CGFloat.pi / 180.0) // convert it to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
I wrote my custom CIFilter to colorise an image which have a transparent background: Colorize a UIImage in Swift
class ColorFilter: CIFilter {
var inputImage: CIImage?
var inputColor: CIColor?
private let kernel: CIColorKernel = {
let kernelString =
"""
kernel vec4 colorize(__sample pixel, vec4 color) {
pixel.rgb = pixel.a * color.rgb;
pixel.a *= color.a;
return pixel;
}
"""
return CIColorKernel(source: kernelString)!
}()
override var outputImage: CIImage? {
guard let inputImage = inputImage, let inputColor = inputColor else { return nil }
let inputs = [inputImage, inputColor] as [Any]
return kernel.apply(extent: inputImage.extent, arguments: inputs)
}
}
extension UIImage {
func colorized(with color: UIColor) -> UIImage {
guard let cgInput = self.cgImage else {
return self
}
let colorFilter = ColorFilter()
colorFilter.inputImage = CIImage(cgImage: cgInput)
colorFilter.inputColor = CIColor(color: color)
if let ciOutputImage = colorFilter.outputImage {
let context = CIContext(options: nil)
let cgImg = context.createCGImage(ciOutputImage, from: ciOutputImage.extent)
return UIImage(cgImage: cgImg!, scale: self.scale, orientation: self.imageOrientation).alpha(color.rgba.alpha).withRenderingMode(self.renderingMode)
} else {
return self
}
}
At this point you should have everything to make this work:
extension UIImage {
func stroked(with color: UIColor, size: CGFloat) -> UIImage {
let strokeImage = self.colorized(with: color)
let oldRect = CGRect(x: size, y: size, width: self.size.width, height: self.size.height).integral
let newSize = CGSize(width: self.size.width + (2*size), height: self.size.height + (2*size))
let translationVector = CGPoint(x: size, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, self.scale)
if let context = UIGraphicsGetCurrentContext() {
context.interpolationQuality = .high
let step = 10 // reduce the step to increase quality
for angle in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: CGFloat(angle))
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeImage.cgImage!, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(self.cgImage!, in: oldRect)
let newImage = UIImage(cgImage: context.makeImage()!, scale: self.scale, orientation: self.imageOrientation)
UIGraphicsEndImageContext()
return newImage.withRenderingMode(self.renderingMode)
}
UIGraphicsEndImageContext()
return self
}
}
Borders to the images belongs to image processing area of iOS. It's not easy as borders for a UIView, It's pretty deep but if you're willing to go the distance, here is a library and a hint for the journey
https://github.com/BradLarson/GPUImage
try using GPUImageThresholdEdgeDetectionFilter
or try OpenCV https://docs.opencv.org/2.4/doc/tutorials/ios/image_manipulation/image_manipulation.html
Use this simple extension for UIImage
extension UIImage {
func outline() -> UIImage? {
UIGraphicsBeginImageContext(size)
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
self.draw(in: rect, blendMode: .normal, alpha: 1.0)
let context = UIGraphicsGetCurrentContext()
context?.setStrokeColor(red: 1.0, green: 0.5, blue: 1.0, alpha: 1.0)
context?.setLineWidth(5.0)
context?.stroke(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
It will give you an image with pink border.

Cut rounded image with the face from CIDetector and CIFaceFeature

How to cut the frame that I receive as faceViewBounds to make a big circle around the face? It's like a badge with the face of the person.
Maybe I should get the center of faceViewBounds then I have to find this center in theImageView.image and draw a circle with big diameter and then cut the rest outside of the circle by logic, but with code I don't know how to do it.. Any suggestions?
func detectFaceFrom(ImageView theImageView: UIImageView) {
guard let personImage = CIImage(image: theImageView.image!) else {
return
}
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
let faces = faceDetector?.features(in: personImage)
let ciImageSize = personImage.extent.size
var transform = CGAffineTransform(scaleX: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
if(faces?.count==1){
for face in faces as! [CIFaceFeature] {
var faceViewBounds = face.bounds.applying(transform)
let viewSize = theImageView.bounds.size
let scale = min(viewSize.width / ciImageSize.width,
viewSize.height / ciImageSize.height)
let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
faceViewBounds.origin.x += offsetX
faceViewBounds.origin.y += offsetY
let faceBox = UIView(frame: faceViewBounds)
faceBox.layer.borderWidth = 3
faceBox.layer.borderColor = UIColor.green.cgColor
faceBox.backgroundColor = UIColor.clear
drawCircleFromCenter(faceViewBounds.center ???
}
return cuttedCircleWithFace
}else{
return theImageView.image
}
}
I just saw an ad in Facebook with that exact same thing that I want to accomplish:
The problem is that you should use your image.size instead of using theImageView.bounds.size. You should also handle features options CIDetectorImageOrientation.
extension UIImage{
var faces: [UIImage] {
guard let ciimage = CIImage(image: self) else { return [] }
var orientation: NSNumber {
switch imageOrientation {
case .up: return 1
case .upMirrored: return 2
case .down: return 3
case .downMirrored: return 4
case .leftMirrored: return 5
case .right: return 6
case .rightMirrored: return 7
case .left: return 8
}
}
return CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyLow])?
.features(in: ciimage, options: [CIDetectorImageOrientation: orientation])
.compactMap {
let rect = $0.bounds.insetBy(dx: -10, dy: -10)
UIGraphicsBeginImageContextWithOptions(rect.size, false, scale)
defer { UIGraphicsEndImageContext() }
UIImage(ciImage: ciimage.cropped(to: rect)).draw(in: CGRect(origin: .zero, size: rect.size))
guard let face = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
// now that you have your face image you need to properly apply a circle mask to it
let size = face.size
let breadth = min(size.width, size.height)
let breadthSize = CGSize(width: breadth, height: breadth)
UIGraphicsBeginImageContextWithOptions(breadthSize, false, scale)
defer { UIGraphicsEndImageContext() }
guard let cgImage = face.cgImage?.cropping(to: CGRect(origin: CGPoint(x: size.width > size.height ? (size.width-size.height).rounded(.down)/2 : 0, y: size.height > size.width ? (size.height-size.width).rounded(.down)/2 : 0), size: breadthSize))
else { return nil }
let faceRect = CGRect(origin: .zero, size: CGSize(width: min(size.width, size.height), height: min(size.width, size.height)))
UIBezierPath(ovalIn: faceRect).addClip()
UIImage(cgImage: cgImage).draw(in: faceRect)
return UIGraphicsGetImageFromCurrentImageContext()
} ?? []
}
}
let profilePicture = UIImage(data: try! Data(contentsOf: URL(string:"http://i.stack.imgur.com/Xs4RX.jpg")!))!
if let face = profilePicture.faces.first {
print(face.size)
}
If you just want to focus on a face inside of image. You should first set up an image view and mask it into a circle:
let image = UIImage(named: "face.jpg")
let imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: 50.0, height: 50.0))
imageView.image = image
imageView.contentMode = .scaleAspectFill
imageView.layer.cornerRadius = imageView.bounds.height * 0.5
imageView.layer.masksToBounds = true
Next you run the CIDetector
func focusOnFace(in imageView: UIImageView)
{
guard let image = imageView.image,
var personImage = CIImage(image: image) else { return }
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
// This will just take the first detected face but you can do something more sophisticated
guard let face = faceDetector?.features(in: personImage).first as? CIFaceFeature else { return }
// Make the facial rect a square so it will mask nicely to a circle (may not be strictly necessary as `CIFaceFeature` bounds is typically a square)
var rect = face.bounds
rect.size.height = max(face.bounds.height, face.bounds.width)
rect.size.width = max(face.bounds.height, face.bounds.width)
rect = rect.insetBy(dx: -30, dy: -30) // Adds padding around the face so it's not so tightly cropped
// Crop to the face detected
personImage = personImage.cropping(to: rect)
// Set the new cropped image as the image view image
imageView.image = UIImage(ciImage: personImage)
}
Example
Before running focusOnFace:
After running focusOnFace:
Updated Example
Before running focusOnFace:
After running focusOnFace:

How do I draw on an image in Swift?

I need to be able to programmatically draw on an image, and save that image for later use. Say, draw a line on specific x and y coordinates on the image, save the image, and display it onto a simple view controller. How would I go about doing this in Swift? (Preferably Swift 2, I am still in development and haven't updated my mac to Sierra)
Update: Possibly something to do with converting a UIImage to a CGLayer, drawing on it, and then converting it back to a UIImage.
All you need to do is create and get an Image Context object and access all its powerful drawing methods. You can learn more about the CGContext object features here.
This function draws a line and a circle on an UIImage and returns the modified image:
Swift 4
func drawOnImage(_ image: UIImage) -> UIImage {
// Create a context of the starting image size and set it as the current one
UIGraphicsBeginImageContext(image.size)
// Draw the starting image in the current context as background
image.draw(at: CGPoint.zero)
// Get the current context
let context = UIGraphicsGetCurrentContext()!
// Draw a red line
context.setLineWidth(2.0)
context.setStrokeColor(UIColor.red.cgColor)
context.move(to: CGPoint(x: 100, y: 100))
context.addLine(to: CGPoint(x: 200, y: 200))
context.strokePath()
// Draw a transparent green Circle
context.setStrokeColor(UIColor.green.cgColor)
context.setAlpha(0.5)
context.setLineWidth(10.0)
context.addEllipse(in: CGRect(x: 100, y: 100, width: 100, height: 100))
context.drawPath(using: .stroke) // or .fillStroke if need filling
// Save the context as a new UIImage
let myImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
// Return modified image
return myImage
}
It's simple:
Make an image graphics context. (Before iOS 10, you would do this by calling UIGraphicsBeginImageContextWithOptions. In iOS 10 there's another way, UIGraphicsImageRenderer, but you don't have to use it if you don't want to.)
Draw (i.e. copy) the image into the context. (UIImage actually has draw... methods for this very purpose.)
Draw your line into the context. (There are CGContext functions for this.)
Extract the resulting image from the context. (For example, if you used UIGraphicsBeginImageContextWithOptions, you would use UIGraphicsGetImageFromCurrentImageContext.) Then close the context.
Details
Xcode 9.1, Swift 4
Solution
extension UIImage
extension UIImage {
typealias RectCalculationClosure = (_ parentSize: CGSize, _ newImageSize: CGSize)->(CGRect)
func with(image named: String, rectCalculation: RectCalculationClosure) -> UIImage {
return with(image: UIImage(named: named), rectCalculation: rectCalculation)
}
func with(image: UIImage?, rectCalculation: RectCalculationClosure) -> UIImage {
if let image = image {
UIGraphicsBeginImageContext(size)
draw(in: CGRect(origin: .zero, size: size))
image.draw(in: rectCalculation(size, image.size))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
return self
}
}
extension UIImageView
extension UIImageView {
enum ImageAddingMode {
case changeOriginalImage
case addSubview
}
func drawOnCurrentImage(anotherImage: UIImage?, mode: ImageAddingMode, rectCalculation: UIImage.RectCalculationClosure) {
guard let image = image else {
return
}
switch mode {
case .changeOriginalImage:
self.image = image.with(image: anotherImage, rectCalculation: rectCalculation)
case .addSubview:
let newImageView = UIImageView(frame: rectCalculation(frame.size, image.size))
newImageView.image = anotherImage
addSubview(newImageView)
}
}
}
Images samples
Parent Image:
Child Image:
Usage example 1
func sample1(imageView: UIImageView) {
imageView.contentMode = .scaleAspectFit
imageView.image = UIImage(named: "parent")?.with(image: "child") { parentSize, newImageSize in
print("parentSize = \(parentSize)")
print("newImageSize = \(newImageSize)")
return CGRect(x: 50, y: 50, width: 90, height: 90)
}
}
Result 1
Usage example 2
func sample2(imageView: UIImageView) {
imageView.contentMode = .scaleAspectFit
imageView.image = UIImage(named: "parent")
imageView.drawOnCurrentImage(anotherImage: UIImage(named: "child"), mode: .changeOriginalImage) { parentSize, newImageSize in
print("parentSize = \(parentSize)")
print("newImageSize = \(newImageSize)")
let sideLength:CGFloat = 90
let indent:CGFloat = 50
return CGRect(x: parentSize.width-sideLength-indent, y: parentSize.height-sideLength-indent, width: sideLength, height: sideLength)
}
}
Result 2
Usage example 3
func sample3(imageView: UIImageView) {
imageView.contentMode = .scaleAspectFill
imageView.clipsToBounds = true
imageView.image = UIImage(named: "parent")
imageView.drawOnCurrentImage(anotherImage: UIImage(named: "child"), mode: .addSubview) { parentSize, newImageSize in
print("parentSize = \(parentSize)")
print("newImageSize = \(newImageSize)")
let sideLength:CGFloat = 90
let indent:CGFloat = 15
return CGRect(x: parentSize.width-sideLength-indent, y: indent, width: sideLength, height: sideLength)
}
}
Result 3
Full sample code
Don't forget to add Solution code here
import UIKit
class ViewController: UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
let imageView = UIImageView(frame: UIScreen.main.bounds)
view.addSubview(imageView)
sample1(imageView: imageView)
// sample2(imageView: imageView)
// sample3(imageView: imageView)
}
func sample1(imageView: UIImageView) {
imageView.contentMode = .scaleAspectFit
imageView.image = UIImage(named: "parent")?.with(image: "child") { parentSize, newImageSize in
print("parentSize = \(parentSize)")
print("newImageSize = \(newImageSize)")
return CGRect(x: 50, y: 50, width: 90, height: 90)
}
}
func sample2(imageView: UIImageView) {
imageView.contentMode = .scaleAspectFit
imageView.image = UIImage(named: "parent")
imageView.drawOnCurrentImage(anotherImage: UIImage(named: "child"), mode: .changeOriginalImage) { parentSize, newImageSize in
print("parentSize = \(parentSize)")
print("newImageSize = \(newImageSize)")
let sideLength:CGFloat = 90
let indent:CGFloat = 50
return CGRect(x: parentSize.width-sideLength-indent, y: parentSize.height-sideLength-indent, width: sideLength, height: sideLength)
}
}
func sample3(imageView: UIImageView) {
imageView.contentMode = .scaleAspectFill
imageView.clipsToBounds = true
imageView.image = UIImage(named: "parent")
imageView.drawOnCurrentImage(anotherImage: UIImage(named: "child"), mode: .addSubview) { parentSize, newImageSize in
print("parentSize = \(parentSize)")
print("newImageSize = \(newImageSize)")
let sideLength:CGFloat = 90
let indent:CGFloat = 15
return CGRect(x: parentSize.width-sideLength-indent, y: indent, width: sideLength, height: sideLength)
}
}
}
Since iOS 10 you can use the UIGraphicImageRenderer, which has a better syntax and has some great features!
Swift 4
let renderer = UIGraphicsImageRenderer(size: view.bounds.size)
let image = renderer.image { context in
// draw your image into your view
context.cgContext.draw(UIImage(named: "myImage")!.cgImage!, in: view.frame)
// draw even more...
context.cgContext.setFillColor(UIColor.red.cgColor)
context.cgContext.setStrokeColor(UIColor.black.cgColor)
context.cgContext.setLineWidth(10)
context.cgContext.addRect(view.frame)
context.cgContext.drawPath(using: .fillStroke)
}
Updated Answer: Once you get the From and To coordinates, here is how to draw a line in a UIImage with those coordinates. From and To coordinates are in image pixels.
func drawLineOnImage(size: CGSize, image: UIImage, from: CGPoint, to: CGPoint) -> UIImage {
// begin a graphics context of sufficient size
UIGraphicsBeginImageContext(size)
// draw original image into the context
image.drawAtPoint(CGPointZero)
// get the context for CoreGraphics
let context = UIGraphicsGetCurrentContext()
// set stroking width and color of the context
CGContextSetLineWidth(context, 1.0)
CGContextSetStrokeColorWithColor(context, UIColor.blueColor().CGColor)
// set stroking from & to coordinates of the context
CGContextMoveToPoint(context, from.x, from.y)
CGContextAddLineToPoint(context, to.x, to.y)
// apply the stroke to the context
CGContextStrokePath(context)
// get the image from the graphics context
let resultImage = UIGraphicsGetImageFromCurrentImageContext()
// end the graphics context
UIGraphicsEndImageContext()
return resultImage }

Cut a UIImage into a circle

I want to cut a UIImage into a circle so that I can then use it as an annotation. Every answer on this site that I've found describes creating an UIImageView, then modifying that and displaying it, but you cant set the image of an annotation to an UIImageView, only a UIImage.
How should I go about this?
Xcode 11 • Swift 5.1 or later
edit/update: For iOS10+ We can use UIGraphicsImageRenderer. For older Swift syntax check edit history.
extension UIImage {
var isPortrait: Bool { size.height > size.width }
var isLandscape: Bool { size.width > size.height }
var breadth: CGFloat { min(size.width, size.height) }
var breadthSize: CGSize { .init(width: breadth, height: breadth) }
var breadthRect: CGRect { .init(origin: .zero, size: breadthSize) }
var circleMasked: UIImage? {
guard let cgImage = cgImage?
.cropping(to: .init(origin: .init(x: isLandscape ? ((size.width-size.height)/2).rounded(.down) : 0,
y: isPortrait ? ((size.height-size.width)/2).rounded(.down) : 0),
size: breadthSize)) else { return nil }
let format = imageRendererFormat
format.opaque = false
return UIGraphicsImageRenderer(size: breadthSize, format: format).image { _ in
UIBezierPath(ovalIn: breadthRect).addClip()
UIImage(cgImage: cgImage, scale: format.scale, orientation: imageOrientation)
.draw(in: .init(origin: .zero, size: breadthSize))
}
}
}
Playground Testing
let profilePicture = UIImage(data: try! Data(contentsOf: URL(string:"http://i.stack.imgur.com/Xs4RX.jpg")!))!
profilePicture.circleMasked
Make sure to import QuarzCore if needed.
func maskRoundedImage(image: UIImage, radius: CGFloat) -> UIImage {
let imageView: UIImageView = UIImageView(image: image)
let layer = imageView.layer
layer.masksToBounds = true
layer.cornerRadius = radius
UIGraphicsBeginImageContext(imageView.bounds.size)
layer.render(in: UIGraphicsGetCurrentContext()!)
let roundedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return roundedImage!
}
UIImage extension:
extension UIImage {
func circularImage(size size: CGSize?) -> UIImage {
let newSize = size ?? self.size
let minEdge = min(newSize.height, newSize.width)
let size = CGSize(width: minEdge, height: minEdge)
UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
let context = UIGraphicsGetCurrentContext()
self.drawInRect(CGRect(origin: CGPoint.zero, size: size), blendMode: .Copy, alpha: 1.0)
CGContextSetBlendMode(context, .Copy)
CGContextSetFillColorWithColor(context, UIColor.clearColor().CGColor)
let rectPath = UIBezierPath(rect: CGRect(origin: CGPoint.zero, size: size))
let circlePath = UIBezierPath(ovalInRect: CGRect(origin: CGPoint.zero, size: size))
rectPath.appendPath(circlePath)
rectPath.usesEvenOddFillRule = true
rectPath.fill()
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return result
}
}
Usage:
UIImageView:
#IBDesignable class CircularImageView: UIImageView {
override var image: UIImage? {
didSet {
super.image = image?.circularImage(size: nil)
}
}
}
UIButton:
#IBDesignable class CircularImageButton: UIButton {
override func setImage(image: UIImage?, forState state: UIControlState) {
let circularImage = image?.circularImage(size: nil)
super.setImage(circularImage, forState: state)
}
}
Based on Nikos answer:
public extension UIImage {
func roundedImage() -> UIImage {
let imageView: UIImageView = UIImageView(image: self)
let layer = imageView.layer
layer.masksToBounds = true
layer.cornerRadius = imageView.frame.width / 2
UIGraphicsBeginImageContext(imageView.bounds.size)
layer.render(in: UIGraphicsGetCurrentContext()!)
let roundedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return roundedImage!
}
}
//Usage
let roundedImage = image.roundedImage()
You can use this code to circle Image
extension UIImage {
func circleImage(_ cornerRadius: CGFloat, size: CGSize) -> UIImage? {
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
UIGraphicsBeginImageContextWithOptions(size, false, 0)
if let context = UIGraphicsGetCurrentContext() {
var path: UIBezierPath
if size.height == size.width {
if cornerRadius == size.width/2 {
path = UIBezierPath(arcCenter: CGPoint(x: size.width/2, y: size.height/2), radius: cornerRadius, startAngle: 0, endAngle: 2.0*CGFloat(Double.pi), clockwise: true)
}else {
path = UIBezierPath(roundedRect: rect, cornerRadius: cornerRadius)
}
}else {
path = UIBezierPath(roundedRect: rect, cornerRadius: cornerRadius)
}
context.addPath(path.cgPath)
context.clip()
self.draw(in: rect)
// 从上下文上获取剪裁后的照片
guard let uncompressedImage = UIGraphicsGetImageFromCurrentImageContext() else {
UIGraphicsEndImageContext()
return nil
}
// 关闭上下文
UIGraphicsEndImageContext()
return uncompressedImage
}else {
return nil
}
}}
All these answers were really complex for a straight forward solution. I just replicated my Objective-C code and adjusted for Swift.
self.myImageView?.layer.cornerRadius = (self.myImageView?.frame.size.width)! / 2;
self.myImageView?.clipsToBounds = true
Xcode 8.1, Swift 3.0.1
My code will look like this:
let image = yourImage.resize(CGSize(width: 20, height: 20))?.circled(forRadius: 20)
Add UIImage Extension, then:
func resize(_ size: CGSize) -> UIImage? {
let rect = CGRect(origin: .zero, size: size)
return redraw(in: rect)
}
func redraw(in rect: CGRect) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(rect.size, false, UIScreen.main.scale)
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return nil }
let rect = CGRect(origin: .zero, size: size)
let flipVertical = CGAffineTransform(a: 1, b: 0, c: 0, d: -1, tx: 0, ty: rect.size.height)
context.concatenate(flipVertical)
context.draw(cgImage, in: rect)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
func circled(forRadius radius: CGFloat) -> UIImage? {
let rediusSize = CGSize(width: radius, height: radius)
let rect = CGRect(origin: .zero, size: size)
UIGraphicsBeginImageContextWithOptions(size, false, UIScreen.main.scale)
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return nil }
let flipVertical = CGAffineTransform(a: 1, b: 0, c: 0, d: -1, tx: 0, ty: rect.size.height)
context.concatenate(flipVertical)
let bezierPath = UIBezierPath(roundedRect: rect, byRoundingCorners: [.allCorners], cornerRadii: rediusSize)
context.addPath(bezierPath.cgPath)
context.clip()
context.drawPath(using: .fillStroke)
context.draw(cgImage, in: rect)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
I managed to answer my own question by finding a use of BezierPath!
if let xyz = UIImage(contentsOfFile: readPath) {
var Rect: CGRect = CGRectMake(0, 0, xyz.size.width, xyz.size.height)
var x = UIBezierPath(roundedRect: Rect, cornerRadius: 200).addClip()
UIGraphicsBeginImageContextWithOptions(xyz.size, false, xyz.scale)
xyz.drawInRect(Rect)
var ImageNew = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
annotation.image = ImageNew
}
Swift 5.3, Xcode 12.2, Handles all imageOrientations
Based on answer Leo Dabus
Thanks, works perfectly! BUT only for images with imageOrientation .up or .down. For images with .right or .left orientation there are distortions in result. And from iPhone/iPad camera for original photos initially we get .right orientation.
Code below takes into account imageOrientation property:
extension UIImage {
func cropToCircle() -> UIImage? {
let isLandscape = size.width > size.height
let isUpOrDownImageOrientation = [0,1,4,5].contains(imageOrientation.rawValue)
let breadth: CGFloat = min(size.width, size.height)
let breadthSize = CGSize(width: breadth, height: breadth)
let breadthRect = CGRect(origin: .zero, size: breadthSize)
let xOriginPoint = CGFloat(isLandscape ?
(isUpOrDownImageOrientation ? ((size.width-size.height)/2).rounded(.down) : 0) :
(isUpOrDownImageOrientation ? 0 : ((size.height-size.width)/2).rounded(.down)))
let yOriginPoint = CGFloat(isLandscape ?
(isUpOrDownImageOrientation ? 0 : ((size.width-size.height)/2).rounded(.down)) :
(isUpOrDownImageOrientation ? ((size.height-size.width)/2).rounded(.down) : 0))
guard let cgImage = cgImage?.cropping(to: CGRect(origin: CGPoint(x: xOriginPoint, y: yOriginPoint),
size: breadthSize)) else { return nil }
let format = imageRendererFormat
format.opaque = false
return UIGraphicsImageRenderer(size: breadthSize, format: format).image {_ in
UIBezierPath(ovalIn: breadthRect).addClip()
UIImage(cgImage: cgImage, scale: format.scale, orientation: imageOrientation).draw(in: CGRect(origin: .zero, size: breadthSize))
}
}
}
swift 3 conform to MVC pattern
create an external file
#IBDesignable
class RoundImage: UIImageView{
#IBInspectable var cornerRadius: CGFloat = 0 {
didSet{
self.layer.cornerRadius = cornerRadius
}
}
// set border width
#IBInspectable var borderWidth: CGFloat = 0 {
didSet{
self.layer.borderWidth = borderWidth
}
}
// set border color
#IBInspectable var borderColor: UIColor = UIColor.clear {
didSet{
self.layer.borderColor = borderColor.cgColor
}
}
override func awakeFromNib() {
self.clipsToBounds = true
}
}// class
call class in the IB on storyboard
set cornerradius as you please (1/2 of width if desire circle)
Done!
I am using RoundedImageView class, the problem facing is that when browse image from gallery the image not show in round or circle.. I simply change the properties of UIImageView/RoundedImageView -> view -> Content Mode -> Aspect Fillsee screenshot
The accepted answer by #Leo Dabus is good but here's a better approach ✅
import UIKit
public extension UIImage {
/// Returns a circle image with diameter, color and optional padding
class func circle(_ color: UIColor, diameter: CGFloat, padding: CGFloat = .zero) -> UIImage {
let rectangle = CGSize(width: diameter + padding * 2, height: diameter + padding * 2)
return UIGraphicsImageRenderer(size: rectangle).image { context in
let rect = CGRect(x: padding, y: padding, width: diameter + padding, height: diameter + padding)
color.setFill()
UIBezierPath(ovalIn: rect).fill()
}
}
}
How to use
let image = UIImage.circle(.black, diameter: 8.0)
Fewer code lines
Ability to add padding
Non-optional result

Resources