UIGraphicsImageRenderer used in a loop ends with memory issue [duplicate] - ios

This question already has answers here:
Swift5 MacOS ImageResize memory issue
(2 answers)
Closed last year.
I have a simple function like this:
let filteredImages = //array of images
let numbersArray = //array of array of numbers for ex [[1, 2], [2, 3]], total number of elements is 57
for (index, numbers) in numbersArray.enumerated() {
print(">>>>1 INDEX: \(index)")
if let cardimage = Box.card(for: filteredImages, numbers: numbers) {
//nothing here for now
}
}
class Box {
class func card(for images: [UIImage], numbers: [Int]) -> CardImage? {
var filteredImages = [UIImage]()
for number in numbers {
guard images.count > number - 1 else {
return nil
}
filteredImages.append(images[number - 1])
}
if filteredImages.count < 8 {
return nil
}
let creator = ImageCreator(radius: 500, backgroundColor: UIColor.athensGray)
return creator.card(from: filteredImages)
}
}
class ImageCreator {
private let radius: CGFloat
private let backgroundColor: UIColor
private let sets: [Set] //Array of predefined objects of my type Set, some numbers like x, y, width, height, radius
init(radius: CGFloat, backgroundColor: UIColor) {
self.radius = radius
self.backgroundColor = backgroundColor
}
func card(from images: [UIImage]) -> CardImage {
let renderer = UIGraphicsImageRenderer(size: CGSize(width: radius, height: radius))
var coordinates = [ImageCoordinate]()
let image = renderer.image { [weak self] context in
let size = renderer.format.bounds.size
self?.backgroundColor.setFill()
context.cgContext.fillEllipse(in: CGRect(x: 0, y: 0, width: size.width, height: size.height))
let set = sets.random
for (index, image) in images.enumerated() {
if let positions = set?.positions[index], let cgimage = image.flippedVertically?.cgImage {
context.cgContext.translateBy(x: CGFloat(positions.x), y: CGFloat(positions.y))
context.cgContext.rotate(by: CGFloat(positions.r))
context.cgContext.translateBy(x: CGFloat(-positions.w/2), y: CGFloat(-positions.h/2))
let width = CGFloat(positions.w)
let height = CGFloat(positions.h)
var newWidth: CGFloat = 0
var newHeight: CGFloat = 0
let imageWidth = image.size.width
let imageHeight = image.size.height
let ratio = imageWidth / imageHeight
if ratio > 1 {
newWidth = CGFloat(width)
newHeight = newWidth / imageWidth * imageHeight
} else {
newHeight = CGFloat(height)
newWidth = newHeight / imageHeight * imageWidth
}
context.cgContext.translateBy(x: CGFloat((width - newWidth) / 2), y: CGFloat((height - newHeight) / 2))
context.cgContext.draw(cgimage, in: CGRect(x: 0, y: 0, width: newWidth, height: newHeight))
context.cgContext.translateBy(x: CGFloat(-(width - newWidth) / 2), y: CGFloat(-(height - newHeight) / 2))
context.cgContext.translateBy(x: CGFloat(positions.w/2), y: CGFloat(positions.h/2))
context.cgContext.rotate(by: CGFloat(-positions.r))
context.cgContext.translateBy(x: CGFloat(-positions.x), y: CGFloat(-positions.y))
coordinates.append(ImageCoordinate(x: positions.x, y: positions.y, w: Double(newWidth), h: Double(newHeight), r: positions.r))
}
}
}
return CardImage(image: image, coordinates: coordinates)
}
}
I think it is not complicated as much because I do here a lot of staff with delivered set of images (always 8 images), but when I do it with a loop for i in 1...57 it ends up with memory issue (app is closed after ~31 iterations). Why?
How can I avoid that? Is there a way to fix that problem?

I suspect the problem you are encountering is caused by accumulating too many temporary (autorelease) objects (in this case the CGImages in the card method) in the loop that is rendering your cards.
I would try adding an autoreleasepool around the rendering of the card images.
for (index, numbers) in numbersArray.enumerated() {
autoreleasepool {
print(">>>>1 INDEX: \(index)")
if let cardimage = Box.card(for: filteredImages, numbers: numbers) {
//nothing here for now
}
}
}

Related

Want to change Text color according to Background image in swiftUI [duplicate]

I'm attempting to overlay a chevron button that will allow the user to dismiss the current view. The colour of the chevron should be light on dark images, and dark on light images. I've attached a screenshot of what I'm describing.
However, there is a significant performance impact when trying to calculate the lightness/darkness of an image, which I'm doing like so (operating on `CGImage):
var isDark: Bool {
guard let imageData = dataProvider?.data else { return false }
guard let ptr = CFDataGetBytePtr(imageData) else { return false }
let length = CFDataGetLength(imageData)
let threshold = Int(Double(width * height) * 0.45)
var darkPixels = 0
for i in stride(from: 0, to: length, by: 4) {
let r = ptr[i]
let g = ptr[i + 1]
let b = ptr[i + 2]
let luminance = (0.299 * Double(r) + 0.587 * Double(g) + 0.114 * Double(b))
if luminance < 150 {
darkPixels += 1
if darkPixels > threshold {
return true
}
}
}
return false
}
In addition, it doesn't do well when the particular area under the chevron is dark, but the rest of the image is light, for example.
I'd like to calculate it for just a small subsection of the image, since the chevron is very small. I tried cropping the image using CGImage's cropping(to rect: CGRect), but the challenge is that the image is set to aspect fill, meaning the top of the UIImageView's frame isn't the top of the UIImage (e.g. the image might be zoomed in and centred). Is there a way that I can isolate just the part of the image that appears below the chevron's frame, after the image has been adjusted by the aspect fill?
Edit
I was able to achieve this thanks to the first link in the accepted answer. I created a series of extensions that I think should work for situations other than mine.
extension UIImage {
var isDark: Bool {
return cgImage?.isDark ?? false
}
}
extension CGImage {
var isDark: Bool {
guard let imageData = dataProvider?.data else { return false }
guard let ptr = CFDataGetBytePtr(imageData) else { return false }
let length = CFDataGetLength(imageData)
let threshold = Int(Double(width * height) * 0.45)
var darkPixels = 0
for i in stride(from: 0, to: length, by: 4) {
let r = ptr[i]
let g = ptr[i + 1]
let b = ptr[i + 2]
let luminance = (0.299 * Double(r) + 0.587 * Double(g) + 0.114 * Double(b))
if luminance < 150 {
darkPixels += 1
if darkPixels > threshold {
return true
}
}
}
return false
}
func cropping(to rect: CGRect, scale: CGFloat) -> CGImage? {
let scaledRect = CGRect(x: rect.minX * scale, y: rect.minY * scale, width: rect.width * scale, height: rect.height * scale)
return self.cropping(to: scaledRect)
}
}
extension UIImageView {
func hasDarkImage(at subsection: CGRect) -> Bool {
guard let image = image, let aspectSize = aspectFillSize() else { return false }
let scale = image.size.width / frame.size.width
let cropRect = CGRect(x: (aspectSize.width - frame.width) / 2,
y: (aspectSize.height - frame.height) / 2,
width: aspectSize.width,
height: frame.height)
let croppedImage = image.cgImage?
.cropping(to: cropRect, scale: scale)?
.cropping(to: subsection, scale: scale)
return croppedImage?.isDark ?? false
}
private func aspectFillSize() -> CGSize? {
guard let image = image else { return nil }
var aspectFillSize = CGSize(width: frame.width, height: frame.height)
let widthScale = frame.width / image.size.width
let heightScale = frame.height / image.size.height
if heightScale > widthScale {
aspectFillSize.width = heightScale * image.size.width
}
else if widthScale > heightScale {
aspectFillSize.height = widthScale * image.size.height
}
return aspectFillSize
}
}
There's a couple of options here for finding the size of your image once it's been fitted to the view: How to know the image size after applying aspect fit for the image in an UIImageView
Once you've got that, you can figure out where the chevron lies (you may need to convert its frame first https://developer.apple.com/documentation/uikit/uiview/1622498-convert)
If the performance was still lacking, I'd look into using CoreImage to perform the calculations: https://www.hackingwithswift.com/example-code/media/how-to-read-the-average-color-of-a-uiimage-using-ciareaaverage
There's a few ways of doing it with CoreImage, but getting the average is the simplest.

How to set outline as per image shape?

I am stuck with this point. I want an outline as per image and I want output as per this video
I tried this code but it was not working smooth.
extension CGPoint {
/**
Rotates the point from the center `origin` by `byDegrees` degrees along the Z axis.
- Parameters:
- origin: The center of he rotation;
- byDegrees: Amount of degrees to rotate around the Z axis.
- Returns: The rotated point.
*/
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = x - origin.x
let dy = y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + byDegrees * .pi / 180.0 // to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
public extension UIImage {
/**
Returns the flat colorized version of the image, or self when something was wrong
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- Returns: the flat colorized version of the image, or the self if something was wrong
*/
func colorized(with color: UIColor = .white) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer {
UIGraphicsEndImageContext()
}
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
color.setFill()
context.translateBy(x: 0, y: size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.clip(to: rect, mask: cgImage)
context.fill(rect)
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return colored
}
/**
Returns the stroked version of the fransparent image with the given stroke color and the thickness.
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- thickness: the thickness of the border. Default to `2`
- quality: The number of degrees (out of 360): the smaller the best, but the slower. Defaults to `10`.
- Returns: the stroked version of the image, or self if something was wrong
*/
func stroked(with color: UIColor = .white, thickness: CGFloat = 2, quality: CGFloat = 10) -> UIImage {
guard let cgImage = cgImage else { return self }
// Colorize the stroke image to reflect border color
let strokeImage = colorized(with: color)
guard let strokeCGImage = strokeImage.cgImage else { return self }
/// Rendering quality of the stroke
let step = quality == 0 ? 10 : abs(quality)
let oldRect = CGRect(x: thickness, y: thickness, width: size.width, height: size.height).integral
let newSize = CGSize(width: size.width + 2 * thickness, height: size.height + 2 * thickness)
let translationVector = CGPoint(x: thickness, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return self }
defer {
UIGraphicsEndImageContext()
}
context.translateBy(x: 0, y: newSize.height)
context.scaleBy(x: 1.0, y: -1.0)
context.interpolationQuality = .high
for angle: CGFloat in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: angle)
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeCGImage, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(cgImage, in: oldRect)
guard let stroked = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return stroked
}
}
try this one
PLS do NOT add constraints to imageView except for top/left in Storyboard.
//
// ViewController.swift
// AddBorderANDZoom
//
// Created by ing.conti on 06/01/22.
//
import UIKit
import CoreGraphics
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var slider: UISlider!
let maZoom = 20.0
override func viewDidLoad() {
super.viewDidLoad()
self.slider.value = 1
self.imageView.image = UIImage(named: "apple")?.outline(borderSize: self.maZoom)
}
#IBAction func didSlide(_ sender: UISlider) {
let value = CGFloat(sender.value * 20)
self.imageView.image = UIImage(named: "apple")?.outline(borderSize: value)
}
}
/////
extension UIImage {
func outline(borderSize: CGFloat = 6.0) -> UIImage? {
let color = UIColor.black
let W = self.size.width
let H = self.size.height
let scale = 1 + (borderSize/W)
let outlinedImageRect = CGRect(x: -borderSize/2,
y: -borderSize/2,
width: W * scale,
height: H * scale)
let imageRect = CGRect(x: 0,
y: 0,
width: W,
height: H)
UIGraphicsBeginImageContextWithOptions(outlinedImageRect.size, false, scale)
self.draw(in: outlinedImageRect)
let context = UIGraphicsGetCurrentContext()!
context.setBlendMode(.sourceIn)
context.setFillColor(color.cgColor)
context.fill(outlinedImageRect)
self.draw(in: imageRect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}

how to add colored border to uiimage in swift

It is pretty easy to add border to UIImageView, using layers (borderWidth, borderColor etc.). Is there any possibility to add border to image, not to image view? Does somebody know?
Update:
I tried to follow the suggestion below und used extension. Thank you for that but I did not get the desired result. Here is my code. What is wrong?
import UIKit
class ViewController: UIViewController {
var imageView: UIImageView!
var sizeW = CGFloat()
var sizeH = CGFloat()
override func viewDidLoad() {
super.viewDidLoad()
sizeW = view.frame.width
sizeH = view.frame.height
setImage()
}
func setImage(){
//add image view
imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: sizeW/2, height: sizeH/2))
imageView.center = view.center
imageView.tintColor = UIColor.orange
imageView.contentMode = UIViewContentMode.scaleAspectFit
let imgOriginal = UIImage(named: "plum")!.withRenderingMode(.alwaysTemplate)
let borderImage = imgOriginal.imageWithBorder(width: 2, color: UIColor.blue)
imageView.image = borderImage
view.addSubview(imageView)
}
}
extension UIImage {
func imageWithBorder(width: CGFloat, color: UIColor) -> UIImage? {
let square = CGSize(width: min(size.width, size.height) + width * 2, height: min(size.width, size.height) + width * 2)
let imageView = UIImageView(frame: CGRect(origin: CGPoint(x: 0, y: 0), size: square))
imageView.contentMode = .center
imageView.image = self
imageView.layer.borderWidth = width
imageView.layer.borderColor = color.cgColor
UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
imageView.layer.render(in: context)
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return result
}
}
The second image with the red border is more or less what I need:
Strongly inspired by #herme5, refactored into more compact Swift 5/iOS12+ code as follows (fixed vertical flip issue as well):
public extension UIImage {
/**
Returns the flat colorized version of the image, or self when something was wrong
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- Returns: the flat colorized version of the image, or the self if something was wrong
*/
func colorized(with color: UIColor = .white) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer {
UIGraphicsEndImageContext()
}
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
color.setFill()
context.translateBy(x: 0, y: size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.clip(to: rect, mask: cgImage)
context.fill(rect)
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return colored
}
/**
Returns the stroked version of the fransparent image with the given stroke color and the thickness.
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- thickness: the thickness of the border. Default to `2`
- quality: The number of degrees (out of 360): the smaller the best, but the slower. Defaults to `10`.
- Returns: the stroked version of the image, or self if something was wrong
*/
func stroked(with color: UIColor = .white, thickness: CGFloat = 2, quality: CGFloat = 10) -> UIImage {
guard let cgImage = cgImage else { return self }
// Colorize the stroke image to reflect border color
let strokeImage = colorized(with: color)
guard let strokeCGImage = strokeImage.cgImage else { return self }
/// Rendering quality of the stroke
let step = quality == 0 ? 10 : abs(quality)
let oldRect = CGRect(x: thickness, y: thickness, width: size.width, height: size.height).integral
let newSize = CGSize(width: size.width + 2 * thickness, height: size.height + 2 * thickness)
let translationVector = CGPoint(x: thickness, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return self }
defer {
UIGraphicsEndImageContext()
}
context.translateBy(x: 0, y: newSize.height)
context.scaleBy(x: 1.0, y: -1.0)
context.interpolationQuality = .high
for angle: CGFloat in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: angle)
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeCGImage, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(cgImage, in: oldRect)
guard let stroked = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return stroked
}
}
extension CGPoint {
/**
Rotates the point from the center `origin` by `byDegrees` degrees along the Z axis.
- Parameters:
- origin: The center of he rotation;
- byDegrees: Amount of degrees to rotate around the Z axis.
- Returns: The rotated point.
*/
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = x - origin.x
let dy = y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + byDegrees * .pi / 180.0 // to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
Here is a UIImage extension I wrote in Swift 4. As IOSDealBreaker said this is all about image processing, and some particular cases may occur. You should have a png image with a transparent background, and manage the size if larger than the original.
First get a colorised "shade" version of your image.
Then draw and redraw the shade image all around a given origin point (In our case around (0,0) at a distance that is the border thickness)
Draw your source image at the origin point so that it appears on the foreground.
You may have to enlarge your image if the borders go out of the original rect.
My method uses a lot of util methods and class extensions. Here is some maths to rotate a vector (which is actually a point) around another point: Rotating a CGPoint around another CGPoint
extension CGPoint {
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = self.x - origin.x
let dy = self.y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + (byDegrees * CGFloat.pi / 180.0) // convert it to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
I wrote my custom CIFilter to colorise an image which have a transparent background: Colorize a UIImage in Swift
class ColorFilter: CIFilter {
var inputImage: CIImage?
var inputColor: CIColor?
private let kernel: CIColorKernel = {
let kernelString =
"""
kernel vec4 colorize(__sample pixel, vec4 color) {
pixel.rgb = pixel.a * color.rgb;
pixel.a *= color.a;
return pixel;
}
"""
return CIColorKernel(source: kernelString)!
}()
override var outputImage: CIImage? {
guard let inputImage = inputImage, let inputColor = inputColor else { return nil }
let inputs = [inputImage, inputColor] as [Any]
return kernel.apply(extent: inputImage.extent, arguments: inputs)
}
}
extension UIImage {
func colorized(with color: UIColor) -> UIImage {
guard let cgInput = self.cgImage else {
return self
}
let colorFilter = ColorFilter()
colorFilter.inputImage = CIImage(cgImage: cgInput)
colorFilter.inputColor = CIColor(color: color)
if let ciOutputImage = colorFilter.outputImage {
let context = CIContext(options: nil)
let cgImg = context.createCGImage(ciOutputImage, from: ciOutputImage.extent)
return UIImage(cgImage: cgImg!, scale: self.scale, orientation: self.imageOrientation).alpha(color.rgba.alpha).withRenderingMode(self.renderingMode)
} else {
return self
}
}
At this point you should have everything to make this work:
extension UIImage {
func stroked(with color: UIColor, size: CGFloat) -> UIImage {
let strokeImage = self.colorized(with: color)
let oldRect = CGRect(x: size, y: size, width: self.size.width, height: self.size.height).integral
let newSize = CGSize(width: self.size.width + (2*size), height: self.size.height + (2*size))
let translationVector = CGPoint(x: size, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, self.scale)
if let context = UIGraphicsGetCurrentContext() {
context.interpolationQuality = .high
let step = 10 // reduce the step to increase quality
for angle in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: CGFloat(angle))
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeImage.cgImage!, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(self.cgImage!, in: oldRect)
let newImage = UIImage(cgImage: context.makeImage()!, scale: self.scale, orientation: self.imageOrientation)
UIGraphicsEndImageContext()
return newImage.withRenderingMode(self.renderingMode)
}
UIGraphicsEndImageContext()
return self
}
}
Borders to the images belongs to image processing area of iOS. It's not easy as borders for a UIView, It's pretty deep but if you're willing to go the distance, here is a library and a hint for the journey
https://github.com/BradLarson/GPUImage
try using GPUImageThresholdEdgeDetectionFilter
or try OpenCV https://docs.opencv.org/2.4/doc/tutorials/ios/image_manipulation/image_manipulation.html
Use this simple extension for UIImage
extension UIImage {
func outline() -> UIImage? {
UIGraphicsBeginImageContext(size)
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
self.draw(in: rect, blendMode: .normal, alpha: 1.0)
let context = UIGraphicsGetCurrentContext()
context?.setStrokeColor(red: 1.0, green: 0.5, blue: 1.0, alpha: 1.0)
context?.setLineWidth(5.0)
context?.stroke(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
It will give you an image with pink border.

Divide image in array of images with swift

I'm trying to divide an image to create 16 out of it (in a matrix). I'm using swift 2.1. Here's the code:
let cellSize = Int(originalImage.size.height) / 4
for i in 0...4 {
for p in 0...4 {
let tmpImgRef: CGImage = originalImage.CGImage!
let rect: CGImage = CGImageCreateWithImageInRect(tmpImgRef, CGRectMake(CGFloat(i * cellSize), CGFloat(p * cellSize), CGFloat(cellSize), CGFloat(cellSize)))!
gameCells.append(cell)
}
}
This works but the images that returns are only part of the original image. I've been searching and I know that's because when I create a CGImage it has a size different than the UIImage, but I don't know how to fix it. If I could make the variable cellSize with the height of the CGImage instead of the UIImage I suppose I would fix it but I can't get the CGImage height.
Thanks for the help!
The fundamental issue is the difference between how UIImage and CGImage interpret their size. UIImage uses "points" and CGImage uses pixels. And the conversion factor is the scale.
For example, if a UIImage has a scale of 3, every "point" in any given direction the UIImage, there are three pixels in that direction in the underlying CGImage. So for a UIImage that has a scale of 3 and a size of 100x100 points, the underlying CGImage has a size of 300x300 pixels.
To return a simple array of images sliced by n x n (e.g. if n is three, there will be nine images in the array), you can do something like the following in Swift 3:
/// Slice image into array of tiles
///
/// - Parameters:
/// - image: The original image.
/// - howMany: How many rows/columns to slice the image up into.
///
/// - Returns: An array of images.
///
/// - Note: The order of the images that are returned will correspond
/// to the `imageOrientation` of the image. If the image's
/// `imageOrientation` is not `.up`, take care interpreting
/// the order in which the tiled images are returned.
func slice(image: UIImage, into howMany: Int) -> [UIImage] {
let width: CGFloat
let height: CGFloat
switch image.imageOrientation {
case .left, .leftMirrored, .right, .rightMirrored:
width = image.size.height
height = image.size.width
default:
width = image.size.width
height = image.size.height
}
let tileWidth = Int(width / CGFloat(howMany))
let tileHeight = Int(height / CGFloat(howMany))
let scale = Int(image.scale)
var images = [UIImage]()
let cgImage = image.cgImage!
var adjustedHeight = tileHeight
var y = 0
for row in 0 ..< howMany {
if row == (howMany - 1) {
adjustedHeight = Int(height) - y
}
var adjustedWidth = tileWidth
var x = 0
for column in 0 ..< howMany {
if column == (howMany - 1) {
adjustedWidth = Int(width) - x
}
let origin = CGPoint(x: x * scale, y: y * scale)
let size = CGSize(width: adjustedWidth * scale, height: adjustedHeight * scale)
let tileCgImage = cgImage.cropping(to: CGRect(origin: origin, size: size))!
images.append(UIImage(cgImage: tileCgImage, scale: image.scale, orientation: image.imageOrientation))
x += tileWidth
}
y += tileHeight
}
return images
}
Or, in Swift 2.3:
func slice(image image: UIImage, into howMany: Int) -> [UIImage] {
let width: CGFloat
let height: CGFloat
switch image.imageOrientation {
case .Left, .LeftMirrored, .Right, .RightMirrored:
width = image.size.height
height = image.size.width
default:
width = image.size.width
height = image.size.height
}
let tileWidth = Int(width / CGFloat(howMany))
let tileHeight = Int(height / CGFloat(howMany))
let scale = Int(image.scale)
var images = [UIImage]()
let cgImage = image.CGImage!
var adjustedHeight = tileHeight
var y = 0
for row in 0 ..< howMany {
if row == (howMany - 1) {
adjustedHeight = Int(height) - y
}
var adjustedWidth = tileWidth
var x = 0
for column in 0 ..< howMany {
if column == (howMany - 1) {
adjustedWidth = Int(width) - x
}
let origin = CGPoint(x: x * scale, y: y * scale)
let size = CGSize(width: adjustedWidth * scale, height: adjustedHeight * scale)
let tileCgImage = CGImageCreateWithImageInRect(cgImage, CGRect(origin: origin, size: size))!
images.append(UIImage(CGImage: tileCgImage, scale: image.scale, orientation: image.imageOrientation))
x += tileWidth
}
y += tileHeight
}
return images
}
This makes sure that the resulting images are in the correct scale (this is why the above strides through the image in "points" and the multiplies to get the correct pixels in the CGImage). This also, if the dimensions, measured in "points") are not evenly divisible by n, it will making up the difference in the last image for that row or column, respectively. E.g. when you make three tiles for an image with a height of 736 points, the first two will be 245 points, but the last one will be 246 points).
There is one exception that this does not (entirely) handle gracefully. Namely, if the UIImage has an imageOrientation of something other than .up, the order in which the images is retrieved corresponds to that orientation, not the upper left corner of the image as you view it.
you can split your image im two parts vertically and horizontally and sub split the result as needed:
extension UIImage {
var topHalf: UIImage? {
guard let cgImage = cgImage, let image = cgImage.cropping(to: CGRect(origin: .zero, size: CGSize(width: size.width, height: size.height/2))) else { return nil }
return UIImage(cgImage: image, scale: scale, orientation: imageOrientation)
}
var bottomHalf: UIImage? {
guard let cgImage = cgImage, let image = cgImage.cropping(to: CGRect(origin: CGPoint(x: 0, y: CGFloat(Int(size.height)-Int(size.height/2))), size: CGSize(width: size.width, height: CGFloat(Int(size.height) - Int(size.height/2))))) else { return nil }
return UIImage(cgImage: image, scale: scale, orientation: imageOrientation)
}
var leftHalf: UIImage? {
guard let cgImage = cgImage, let image = cgImage.cropping(to: CGRect(origin: .zero, size: CGSize(width: size.width/2, height: size.height))) else { return nil }
return UIImage(cgImage: image, scale: scale, orientation: imageOrientation)
}
var rightHalf: UIImage? {
guard let cgImage = cgImage, let image = cgImage.cropping(to: CGRect(origin: CGPoint(x: CGFloat(Int(size.width)-Int((size.width/2))), y: 0), size: CGSize(width: CGFloat(Int(size.width)-Int((size.width/2))), height: size.height)))
else { return nil }
return UIImage(cgImage: image, scale: scale, orientation: imageOrientation)
}
var splitedInFourParts: [UIImage] {
guard case let topHalf = topHalf,
case let bottomHalf = bottomHalf,
let topLeft = topHalf?.leftHalf,
let topRight = topHalf?.rightHalf,
let bottomLeft = bottomHalf?.leftHalf,
let bottomRight = bottomHalf?.rightHalf else{ return [] }
return [topLeft, topRight, bottomLeft, bottomRight]
}
var splitedInSixteenParts: [UIImage] {
var array = splitedInFourParts.flatMap({$0.splitedInFourParts})
// if you need it in reading order you need to swap some image positions
swap(&array[2], &array[4])
swap(&array[3], &array[5])
swap(&array[10], &array[12])
swap(&array[11], &array[13])
return array
}
}
Splitting the image by columns and rows:
extension UIImage {
func matrix(_ rows: Int, _ columns: Int) -> [UIImage] {
let y = (size.height / CGFloat(rows)).rounded()
let x = (size.width / CGFloat(columns)).rounded()
var images: [UIImage] = []
images.reserveCapacity(rows * columns)
guard let cgImage = cgImage else { return [] }
(0..<rows).forEach { row in
(0..<columns).forEach { column in
var width = Int(x)
var height = Int(y)
if row == rows-1 && size.height.truncatingRemainder(dividingBy: CGFloat(rows)) != 0 {
height = Int(size.height - size.height / CGFloat(rows) * (CGFloat(rows)-1))
}
if column == columns-1 && size.width.truncatingRemainder(dividingBy: CGFloat(columns)) != 0 {
width = Int(size.width - (size.width / CGFloat(columns) * (CGFloat(columns)-1)))
}
if let image = cgImage.cropping(to: CGRect(origin: CGPoint(x: column * Int(x), y: row * Int(x)), size: CGSize(width: width, height: height))) {
images.append(UIImage(cgImage: image, scale: scale, orientation: imageOrientation))
}
}
}
return images
}
}
let myPicture = UIImage(data: try! Data(contentsOf: URL(string:"https://i.stack.imgur.com/Xs4RX.jpg")!))!
let images = myPicture.matrix(4, 4)
images.count // 16
I've used this to slice an image into a matrix. The matrix is represented as a 1D array.
func snapshotImage(image: UIImage, rect: CGRect) -> UIImage {
var imageRect: CGRect! = rect
if image.scale > 1.0 {
imageRect = CGRect(origin: CGPoint(x: rect.origin.x * image.scale, y: rect.origin.y * image.scale), size: CGSize(width: rect.size.width * image.scale, height: rect.size.height * image.scale))
}
let imageRef: CGImage = image.cgImage!.cropping(to: imageRect)!
let result: UIImage = UIImage(cgImage: imageRef, scale: image.scale, orientation: image.imageOrientation)
return result
}
func sliceImage(image: UIImage, size: CGSize) -> [UIImage] {
var slices: [UIImage] = [UIImage]()
var rect = CGRect(x: 0.0, y: 0.0, width: size.width, height: size.height)
var y: Float = 0.0
let width: Int = Int(image.size.width / size.width)
let height: Int = Int(image.size.height / size.height)
for _ in 0...height {
var x: Float = 0.0
for _ in 0...width {
rect.origin.x = CGFloat(x);
slices.append(self.snapshotImage(image: image, rect: rect))
x += Float(size.width)
}
y += Float(size.height)
rect.origin.y = CGFloat(y)
}
return slices
}

Get grid location from click in Swift with Spritekit

I have a dynamically generated grid that can give me the location of the center of ones its tiles given the row and column with my gridPosition function, but I can't figure out the opposite. I am trying to keep everything dynamic so this will work on all screens and devices.
How do I get the row and column from the location? I tried reversing how I get the gridPosition but it is off by a lot. Is there an easier way to do this? or did I make an error?
import SpriteKit
import Foundation
class Grid:SKSpriteNode {
var rows:Int!
var cols:Int!
var tileIsValid:[[Bool]]!
var blockSize:CGFloat!
convenience init(blockSize:CGFloat,rows:Int,cols:Int) {
let texture = Grid.gridTexture(blockSize,rows: rows, cols:cols)
self.init(texture: texture, color:SKColor.clearColor(), size: texture.size())
self.blockSize = blockSize
self.rows = rows
self.cols = cols
self.tileIsValid = Array.init(count: rows, repeatedValue: Array.init(count: cols, repeatedValue: false))
}
override init(texture: SKTexture!, color: SKColor, size: CGSize) {
super.init(texture: texture, color: color, size: size)
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
class func gridTexture(blockSize:CGFloat,rows:Int,cols:Int) -> SKTexture {
// Add 1 to the height and width to ensure the borders are within the sprite
let size = CGSize(width: CGFloat(cols)*blockSize+1.0, height: CGFloat(rows)*blockSize+1.0)
UIGraphicsBeginImageContext(size)
let context = UIGraphicsGetCurrentContext()
let bezierPath = UIBezierPath()
let offset:CGFloat = 0.5
// Draw vertical lines
for i in 0...cols {
let x = CGFloat(i)*blockSize + offset
bezierPath.moveToPoint(CGPoint(x: x, y: 0))
bezierPath.addLineToPoint(CGPoint(x: x, y: size.height))
}
// Draw horizontal lines
for i in 0...rows {
let y = CGFloat(i)*blockSize + offset
bezierPath.moveToPoint(CGPoint(x: 0, y: y))
bezierPath.addLineToPoint(CGPoint(x: size.width, y: y))
}
SKColor.blackColor().setStroke()
bezierPath.lineWidth = 1.0
bezierPath.stroke()
CGContextAddPath(context, bezierPath.CGPath)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return SKTexture(image: image)
}
// give row, col and get cgpoint of tile
func gridPosition(row:Int, col:Int) -> CGPoint {
let offset = blockSize / 2.0 + 0.5
let colsMiddle = (blockSize * (CGFloat(cols)) / 2.0)
let tileWidth = (CGFloat(col) * blockSize)
let x = tileWidth - colsMiddle + offset
let rowsMiddle = ((blockSize * CGFloat(rows)) / 2.0)
let gridHeightNum = CGFloat(rows - row - 1)
let gridHeight = (gridHeightNum * blockSize)
let y = gridHeight - rowsMiddle + offset
return CGPoint(x:x, y:y)
}
func gridIsValid(location: CGPoint) -> Bool {
let offset = blockSize / 2.0 + 0.5
let colsMiddle = (blockSize * (CGFloat(cols)) / 2.0)
let midColCalc = location.x + colsMiddle - offset
let midColCalc2 = midColCalc / blockSize
let col = Int(round(abs(midColCalc2)))
let rowsMiddle = ((blockSize * CGFloat(rows)) / 2.0)
let midRowCalc = location.y + rowsMiddle - offset
let midRowCalc2 = midRowCalc / blockSize
let midRowCalc3 = midRowCalc2 + 1 + CGFloat(rows)
let row = Int(round(abs(midRowCalc3)))
print ("\(col) \(row)")
return self.tileIsValid[col][row]
}

Resources