Iterating through all pixels of a UIImage - ios

I need some help with how to iterate through every pixel in a UIImage. I am finding the red, green, blue, and alpha color values in every pixel then comparing them to see if a certain color exists in the photo.
I have an extension that returns these color values of a pixel:
extension UIImage { func getPixelColor(pos: CGPoint) -> UIColor {
let pixelData = self.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
I am able to find the color value of a specific pixel in the image and check it to see if it matches a specific color value:
let specificPoint = CGPoint(x: 25, y: 25)
let findColorValue = displayImage.image?.getPixelColor(pos: specificPoint)
var redval: CGFloat = 0
var greenval: CGFloat = 0
var blueval: CGFloat = 0
var alphaval: CGFloat = 0
resultLabel.text = "r: \(redval) g: \(greenval) b: \(blueval) a: \(alphaval)"
findColorValue?.getRed(&redval, green: &greenval, blue: &blueval, alpha: &alphaval)
if resultLabel.text == "r: 1.0 g: 0.792156862745098 b: 0.474509803921569 a: 0.0156862745098039" {
resultLabel.text = "YES"
} else {
resultLabel.text = "NO"
}
I need help creating a for loop to loop through all the pixels in the image and being able to test them for the specific color value.
I'm also not sure if how I get the color values of a pixel is 100% right so feel free to tell me if I'm doing something wrong.
Thanks

Use a nested for loop:
for yCo in 0 ..< Int(image.size.height) {
for xCo in 0 ..< Int(image.size.width) {
if image.getPixelColor(pos: CGPoint(x: xCo, y: yCo)) == UIColor(red: 1, green: 1, blue: 1, alpha: 1) {
//Next step
}
}
}

Related

How to apply CIFilter on Image Pixel

I know it is possible to change color of pixel using pixel buffer, like in the below code, but I just want to blur a pixel using 'CIFilter' rather than changing color. I don't want to apply a 'CIFilter' on whole image.
//data pointer – stores an array of the pixel components. For example (r0, b0, g0, a0, r1, g1, b1, a1 .... rn, gn, bn, an)
let data : UnsafeMutablePointer<UInt8> = calloc(bytesPerRow, height)!.assumingMemoryBound(to: UInt8.self)
//get the index of the pixel (4 components times the x position plus the y position times the row width)
let pixelIndex = 4 * (location.x + (location.y * width))
//set the pixel components to the color components
data[pixelIndex] = red
data[pixelIndex+1] = green
data[pixelIndex+2] = blue
data[pixelIndex+3] = alpha
Also can we use below code for applying CIFilter on Pixel?
if let pixelData = self.cgImage?.dataProvider?.data {
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let red = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let green = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let blue = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let alpha = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
}
Check out the CIBlendWithMask filter. It allows you to create a mask of any shape (even a single pixel) and use that to blend the input with another input. If you make inputBackgroundImage be the original image, and make inputImage be the original image with your desired filter applied, the inputImageMask is an all-black image with only the single pixel you want to change in white.
I typed this up pretty quick without testing code - could be a few errors. I have done something very similar recently, so shouldn't be too off. I'd like to know whatcha get and if this doesn't work, I bet it's close.
/*
Implementations
Notes:
- I don't know what kind of `CIFilter` blur you'd like to apply, so I'm just using one from here
- https://developer.apple.com/library/archive/documentation/GraphicsImaging/Reference/CoreImageFilterReference/#//apple_ref/doc/filter/ci/CIBoxBlur
*/
//Normal Image
let inputImage:UIImage = originalImage
//Blurred Image of only the BLURRED PIXELS -- we change the rest of the pixels to clear -- thus we can use this as the backgroundImage and the maskedImage
let unblurredImage = getBackgroundImage()
let filter = CIFilter(name: "CIBoxBlur")
filter?.setValue(unblurredImage, kCIInputImageKey)
let blurredImage = filter?.outputImage
//Now we can blend the 2 images
let blendFilter = CIFilter(name: "CIBlendWithAlphaMask")
blendFilter?.setValue(CIImage(image: inputImage), kCIInputImageKey)
blendFilter?.setValue(blurredImage, "inputBackgroundImage")
blendFilter?.setValue(blurredImage, "inputMaskImage")
let finalCIImage = blendFilter?.outputImage
let finalImage = UIImage(ciImage: finalCIImage)
/*
Functions used in the process
*/
//Create an image of only the pixels we want to blur
func getBackgroundImage(ciimage: CIImage) -> UIImage {
let inputCGImage = ciimage.convertCIImageToCGImage()!
let colorSpace = CGColorSpaceCreateDeviceRGB()
let width = inputCGImage.width
let height = inputCGImage.height
let bytesPerPixel = 4
let bitsPerComponent = 8
let bytesPerRow = bytesPerPixel * width
let bitmapInfo = RGBA32.bitmapInfo
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else {
print("Couldn't create CGContext")
return nil
}
context.draw(inputCGImage, in: CGRect(x: 0, y: 0, width: width, height: height))
let pixelBuffer = buffer.bindMemory(to: RGBA32.self, capacity: width * height)
for row in 0 ..< Int(height) {
for column in 0 ..< Int(width) {
let offset = row * width + column
/*
You need to define aPixelIWantBlurred however you desire
Also, we don't edit the pixels we want to blur - we edit the other pixels to a transparent value. This allows us to use this as the background image and the masked image
*/
if pixelBuffer[offset] != aPixelIWantBlurred {
pixelBuffer[offset] = RGBA32.init(red: 0, green: 0, blue: 0, alpha: 0)
}
}
}
let outputCGImage = context.makeImage()!
let outputImage = UIImage(cgImage: outputCGImage, scale: image.scale, orientation: image.imageOrientation)
return outputImage
}
extension CIImage {
func convertCIImageToCGImage() -> CGImage! {
let context = CIContext(options: nil)
return context.createCGImage(self, from: self.extent)
}
}
struct RGBA32: Equatable {
private var color: UInt32
var redComponent: UInt8 {
return UInt8((color >> 24) & 255)
}
var greenComponent: UInt8 {
return UInt8((color >> 16) & 255)
}
var blueComponent: UInt8 {
return UInt8((color >> 8) & 255)
}
var alphaComponent: UInt8 {
return UInt8((color >> 0) & 255)
}
init(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) {
let red = UInt32(red)
let green = UInt32(green)
let blue = UInt32(blue)
let alpha = UInt32(alpha)
color = (red << 24) | (green << 16) | (blue << 8) | (alpha << 0)
}
static let red = RGBA32(red: 255, green: 0, blue: 0, alpha: 255)
static let green = RGBA32(red: 0, green: 255, blue: 0, alpha: 255)
static let blue = RGBA32(red: 0, green: 0, blue: 255, alpha: 255)
static let white = RGBA32(red: 255, green: 255, blue: 255, alpha: 255)
static let black = RGBA32(red: 0, green: 0, blue: 0, alpha: 255)
static let magenta = RGBA32(red: 255, green: 0, blue: 255, alpha: 255)
static let yellow = RGBA32(red: 255, green: 255, blue: 0, alpha: 255)
static let cyan = RGBA32(red: 0, green: 255, blue: 255, alpha: 255)
static let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
static func ==(lhs: RGBA32, rhs: RGBA32) -> Bool {
return lhs.color == rhs.color
}
}

Get the coordinates of dominant colours from UIImageView

I'm not able to find the coordinates from UIImageView possessing dominant colours. This is the code:
for yCo in 0 ..< Int(imageView.frame.height) {
for xCo in 0 ..< Int(imageView.frame.width) where image.getPixelColor(pos: CGPoint(x: xCo, y: yCo)) == dominantColorFirst {
print(CGPoint(x: xCo, y: yCo)) // uses 99% CPU -> Leads to hang app
}
}
extension UIImage {
func getPixelColor(pos: CGPoint) -> UIColor { // get pixel color
let pixelData = self.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)}
Due to large no. of iterations, Im not able to find out the coordinates. It uses more memory and hangs the app, Is there any other way to work around?
I did a image process project before. I get alpha of all pixel and cache it. (my image only 1200x1200px)
Don't know why but when I using print on the for loop, it take too much memory and delay the app.
I must walk around with
var cachePointArray = [CGPoint]()
for yCo in 0 ..< Int(imageView.frame.height) {
for xCo in 0 ..< Int(imageView.frame.width) {
let point = CGPoint(x: xCo, y: yCo)
if image.getPixelColor(pos: point) == dominantColorFirst {
cachePointArray.append(point)
}
}
}
print(cachePointArray)
and in run much faster
You can try this for test
(Sorry I dont enough point to comment, if it doesn't help, ignore it)

change an UI Image into a String array

I have an UIImage made of only 4 types of pixel.
static let black = RGBA32(red: 0, green: 0, blue: 0, alpha: 255)
static let red = RGBA32(red: 255, green: 0, blue: 0, alpha: 255)
static let green = RGBA32(red: 0, green: 255, blue: 0, alpha: 255)
static let blue = RGBA32(red: 0, green: 0, blue: 255, alpha: 255)
( reference to the code used to create the Image https://stackoverflow.com/a/40207142/7010252 )
I saved the image in the phone's photo library then I open the image in my app again. I know that the number of pixel is the same.
So how can I get the RGBA data of all the pixels?
The first thing done here is to loop through your image and create a map of all the points in it.
func getPixelColor(image: UIImage) {
let width = image.size.width
let height = image.size.height
for x in 0..<Int(width) {
for y in 0..<Int(height) {
let color = image.pixelColor(CGPoint(x: CGFloat(x), y: CGFloat(y)))
print(color.RGBA)
}
}
}
Once it has the color it wants, it then calls the RGBA function in the UIColor extension to provide the string you want.
Next, we use an extension of UIImage to then take the points provided by the above function and query the image data for its UIColor data.
extension UIImage {
func pixelColor(_ point: CGPoint) -> UIColor {
let pixelData = cgImage?.dataProvider?.data
let pointerData: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo = Int(((self.size.width * point.y) + point.x)) * 4
let maxValue: CGFloat = 255.0
let compare: CGFloat = 0.99
let r: CGFloat = (CGFloat(pointerData[pixelInfo]) / maxValue) > compare ? 1.0 : 0.0
let g: CGFloat = (CGFloat(pointerData[pixelInfo + 1]) / maxValue) > compare ? 1.0 : 0.0
let b: CGFloat = (CGFloat(pointerData[pixelInfo + 2]) / maxValue) > compare ? 1.0 : 0.0
let a = CGFloat(pointerData[pixelInfo + 3]) / maxValue
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
This extension uses the raw data to determine the RGB values of each pixel. It then performs a crude check to play safe with CGFloats to see if the value is 255.0 for that particular color. If it is, it will return a value of 1.0, otherwise it returns 0.0.
Next, there is an extension for UIColor that will provide the formatted string you are looking for.
extension UIColor {
var RGBA: String {
guard let components = cgColor.components, components.count == 4 else {
return ""
}
return "\(components[0])-\(components[1])-\(components[2])-\(components[3])"
}
}
This should provide the 1-0-0-1 type of values you seek. You can also modify this to include any additional information you need.

How do I get the color of a pixel in a UIImage with Swift?

I'm trying to get the color of a pixel in a UIImage with Swift, but it seems to always return 0. Here is the code, translated from #Minas' answer on this thread:
func getPixelColor(pos: CGPoint) -> UIColor {
var pixelData = CGDataProviderCopyData(CGImageGetDataProvider(self.CGImage))
var data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
var pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
var r = CGFloat(data[pixelInfo])
var g = CGFloat(data[pixelInfo+1])
var b = CGFloat(data[pixelInfo+2])
var a = CGFloat(data[pixelInfo+3])
return UIColor(red: r, green: g, blue: b, alpha: a)
}
Thanks in advance!
A bit of searching leads me here since I was facing the similar problem.
You code works fine. The problem might be raised from your image.
Code:
//On the top of your swift
extension UIImage {
func getPixelColor(pos: CGPoint) -> UIColor {
let pixelData = CGDataProviderCopyData(CGImageGetDataProvider(self.CGImage))
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
What happens is this method will pick the pixel colour from the image's CGImage. So make sure you are picking from the right image. e.g. If you UIImage is 200x200, but the original image file from Imgaes.xcassets or wherever it came from, is 400x400, and you are picking point (100,100), you are actually picking the point on the upper left section of the image, instead of middle.
Two Solutions:
1, Use image from Imgaes.xcassets, and only put one #1x image in 1x field. Leave the #2x, #3x blank. Make sure you know the image size, and pick a point that is within the range.
//Make sure only 1x image is set
let image : UIImage = UIImage(named:"imageName")
//Make sure point is within the image
let color : UIColor = image.getPixelColor(CGPointMake(xValue, yValue))
2, Scale you CGPoint up/down the proportion to match the UIImage. e.g. let point = CGPoint(100,100) in the example above,
let xCoordinate : Float = Float(point.x) * (400.0/200.0)
let yCoordinate : Float = Float(point.y) * (400.0/200.0)
let newCoordinate : CGPoint = CGPointMake(CGFloat(xCoordinate), CGFloat(yCoordinate))
let image : UIImage = largeImage
let color : UIColor = image.getPixelColor(CGPointMake(xValue, yValue))
I've only tested the first method, and I am using it to get a colour off a colour palette. Both should work.
Happy coding :)
SWIFT 3, XCODE 8 Tested and working
extension UIImage {
func getPixelColor(pos: CGPoint) -> UIColor {
let pixelData = self.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
If you are calling the answered question more than once, than you should not use the function on every pixel, because you are recreating the same set of data. If you want all of the colors in an image, do something more like this:
func findColors(_ image: UIImage) -> [UIColor] {
let pixelsWide = Int(image.size.width)
let pixelsHigh = Int(image.size.height)
guard let pixelData = image.cgImage?.dataProvider?.data else { return [] }
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
var imageColors: [UIColor] = []
for x in 0..<pixelsWide {
for y in 0..<pixelsHigh {
let point = CGPoint(x: x, y: y)
let pixelInfo: Int = ((pixelsWide * Int(point.y)) + Int(point.x)) * 4
let color = UIColor(red: CGFloat(data[pixelInfo]) / 255.0,
green: CGFloat(data[pixelInfo + 1]) / 255.0,
blue: CGFloat(data[pixelInfo + 2]) / 255.0,
alpha: CGFloat(data[pixelInfo + 3]) / 255.0)
imageColors.append(color)
}
}
return imageColors
}
Here is an Example Project
As a side note, this function is significantly faster than the accepted answer, but it gives a less defined result.. I just put the UIImageView in the sourceView parameter.
func getPixelColorAtPoint(point: CGPoint, sourceView: UIView) -> UIColor {
let pixel = UnsafeMutablePointer<CUnsignedChar>.allocate(capacity: 4)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let context = CGContext(data: pixel, width: 1, height: 1, bitsPerComponent: 8, bytesPerRow: 4, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)
context!.translateBy(x: -point.x, y: -point.y)
sourceView.layer.render(in: context!)
let color: UIColor = UIColor(red: CGFloat(pixel[0])/255.0,
green: CGFloat(pixel[1])/255.0,
blue: CGFloat(pixel[2])/255.0,
alpha: CGFloat(pixel[3])/255.0)
pixel.deallocate(capacity: 4)
return color
}
I was getting swapped colors for red and blue.
The original function also did not account for the actual bytes per row and bytes per pixel.
I also avoid unwrapping optionals whenever possible.
Here's an updated function.
import UIKit
extension UIImage {
/// Get the pixel color at a point in the image
func pixelColor(atLocation point: CGPoint) -> UIColor? {
guard let cgImage = cgImage, let pixelData = cgImage.dataProvider?.data else { return nil }
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let bytesPerPixel = cgImage.bitsPerPixel / 8
let pixelInfo: Int = ((cgImage.bytesPerRow * Int(point.y)) + (Int(point.x) * bytesPerPixel))
let b = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let r = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
Swift3 (IOS 10.3)
Important: - This will works only for #1x image.
Request: -
if you have solution for #2x and #3x images please share. Thank you :)
extension UIImage {
func getPixelColor(atLocation location: CGPoint, withFrameSize size: CGSize) -> UIColor {
let x: CGFloat = (self.size.width) * location.x / size.width
let y: CGFloat = (self.size.height) * location.y / size.height
let pixelPoint: CGPoint = CGPoint(x: x, y: y)
let pixelData = self.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelIndex: Int = ((Int(self.size.width) * Int(pixelPoint.y)) + Int(pixelPoint.x)) * 4
let r = CGFloat(data[pixelIndex]) / CGFloat(255.0)
let g = CGFloat(data[pixelIndex+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelIndex+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelIndex+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
Usage
print(yourImageView.image!.getPixelColor(atLocation: location, withFrameSize: yourImageView.frame.size))
You can use tapGestureRecognizer for location.
Your code works fine for me, as an extension to UIImage. How are your testing your colour? here's my example:
let green = UIImage(named: "green.png")
let topLeft = CGPoint(x: 0, y: 0)
// Use your extension
let greenColour = green.getPixelColor(topLeft)
// Dump RGBA values
var redval: CGFloat = 0
var greenval: CGFloat = 0
var blueval: CGFloat = 0
var alphaval: CGFloat = 0
greenColour.getRed(&redval, green: &greenval, blue: &blueval, alpha: &alphaval)
println("Green is r: \(redval) g: \(greenval) b: \(blueval) a: \(alphaval)")
This prints:
Green is r: 0.0 g: 1.0 b: 1.0 a: 1.0
...which is correct, given that my image is a solid green square.
(What do you mean by "it always seems to return 0"? You don't happen to be testing on a black pixel, do you?)
Im getting backwards colours in terms of R and B being swapped, not sure why this I thought the order was RGBA.
func testGeneratedColorImage() {
let color = UIColor(red: 0.5, green: 0, blue: 1, alpha: 1)
let size = CGSize(width: 10, height: 10)
let image = UIImage.image(fromColor: color, size: size)
XCTAssert(image.size == size)
XCTAssertNotNil(image.cgImage)
XCTAssertNotNil(image.cgImage!.dataProvider)
let pixelData = image.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let position = CGPoint(x: 1, y: 1)
let pixelInfo: Int = ((Int(size.width) * Int(position.y)) + Int(position.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
let testColor = UIColor(red: r, green: g, blue: b, alpha: a)
XCTAssert(testColor == color, "Colour: \(testColor) does not match: \(color)")
}
Where color looks like this:
image looks like this:
and testColor looks like:
(I can understand that the blue value might be off a little bit and be 0.502 with floating point inaccuracy)
With the code switched to:
let b = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let r = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
I get testColor as:
I think you need to divide each component by 255:
var r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
var g = CGFloat(data[pixelInfo + 1]) / CGFloat(255.0)
var b = CGFloat(data[pixelInfo + 2]) / CGFloat(255.0)
var a = CGFloat(data[pixelInfo + 3]) / CGFloat(255.0)
I was trying to find the colors of all four corners of an image and was getting unexpected results, including UIColor.clear.
The issue is that the pixels start at 0, so requesting a pixel at the width of the image would actually wrap back around and give me the first pixel of the second row.
For example, the top right pixel of a 640 x 480 image would actually be x: 639, y: 0, and the bottom right pixel would be x: 639, y: 479.
Here's my implementation of the UIImage extension with this adjustment:
func getPixelColor(pos: CGPoint) -> UIColor {
guard let cgImage = cgImage, let pixelData = cgImage.dataProvider?.data else { return UIColor.clear }
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let bytesPerPixel = cgImage.bitsPerPixel / 8
// adjust the pixels to constrain to be within the width/height of the image
let y = pos.y > 0 ? pos.y - 1 : 0
let x = pos.x > 0 ? pos.x - 1 : 0
let pixelInfo = ((Int(self.size.width) * Int(y)) + Int(x)) * bytesPerPixel
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
I found no answer anywhere on the internet that supplied
Simple code
HDR support
Color profile support for bgr etc.
Scale support for #2x #3x
So here it is. The as far as I can tell definitive solution:
Swift 5
import UIKit
public extension CGBitmapInfo {
// https://stackoverflow.com/a/60247693/2585092
enum ComponentLayout {
case bgra
case abgr
case argb
case rgba
case bgr
case rgb
var count: Int {
switch self {
case .bgr, .rgb: return 3
default: return 4
}
}
}
var componentLayout: ComponentLayout? {
guard let alphaInfo = CGImageAlphaInfo(rawValue: rawValue & Self.alphaInfoMask.rawValue) else { return nil }
let isLittleEndian = contains(.byteOrder32Little)
if alphaInfo == .none {
return isLittleEndian ? .bgr : .rgb
}
let alphaIsFirst = alphaInfo == .premultipliedFirst || alphaInfo == .first || alphaInfo == .noneSkipFirst
if isLittleEndian {
return alphaIsFirst ? .bgra : .abgr
} else {
return alphaIsFirst ? .argb : .rgba
}
}
var chromaIsPremultipliedByAlpha: Bool {
let alphaInfo = CGImageAlphaInfo(rawValue: rawValue & Self.alphaInfoMask.rawValue)
return alphaInfo == .premultipliedFirst || alphaInfo == .premultipliedLast
}
}
extension UIImage {
// https://stackoverflow.com/a/68103748/2585092
subscript(_ point: CGPoint) -> UIColor? {
guard
let cgImage = cgImage,
let space = cgImage.colorSpace,
let pixelData = cgImage.dataProvider?.data,
let layout = cgImage.bitmapInfo.componentLayout
else {
return nil
}
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let comp = CGFloat(layout.count)
let isHDR = CGColorSpaceUsesITUR_2100TF(space)
let hdr = CGFloat(isHDR ? 2 : 1)
let pixelInfo = Int((size.width * point.y * scale + point.x * scale) * comp * hdr)
let i = Array(0 ... Int(comp - 1)).map {
CGFloat(data[pixelInfo + $0 * Int(hdr)]) / CGFloat(255)
}
switch layout {
case .bgra:
return UIColor(red: i[2], green: i[1], blue: i[0], alpha: i[3])
case .abgr:
return UIColor(red: i[3], green: i[2], blue: i[1], alpha: i[0])
case .argb:
return UIColor(red: i[1], green: i[2], blue: i[3], alpha: i[0])
case .rgba:
return UIColor(red: i[0], green: i[1], blue: i[2], alpha: i[3])
case .bgr:
return UIColor(red: i[2], green: i[1], blue: i[0], alpha: 1)
case .rgb:
return UIColor(red: i[0], green: i[1], blue: i[2], alpha: 1)
}
}
}
Swift 5, includes solution for #2x & #3x image
extension UIImage {
subscript(_ point: CGPoint) -> UIColor? {
guard let pixelData = self.cgImage?.dataProvider?.data else { return nil }
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = Int((size.width * point.y + point.x) * 4.0 * scale * scale)
let i = Array(0 ... 3).map { CGFloat(data[pixelInfo + $0]) / CGFloat(255) }
return UIColor(red: i[0], green: i[1], blue: i[2], alpha: i[3])
}
}
I use this extension :
public extension UIImage {
var pixelWidth: Int {
return cgImage?.width ?? 0
}
var pixelHeight: Int {
return cgImage?.height ?? 0
}
func pixelColor(x: Int, y: Int) -> UIColor {
if 0..<pixelWidth ~= x && 0..<pixelHeight ~= y {
log.info("Pixel coordinates are in bounds")
}else {
log.info("Pixel coordinates are out of bounds")
return .black
}
guard
let cgImage = cgImage,
let data = cgImage.dataProvider?.data,
let dataPtr = CFDataGetBytePtr(data),
let colorSpaceModel = cgImage.colorSpace?.model,
let componentLayout = cgImage.bitmapInfo.componentLayout
else {
assertionFailure("Could not get a pixel of an image")
return .clear
}
assert(
colorSpaceModel == .rgb,
"The only supported color space model is RGB")
assert(
cgImage.bitsPerPixel == 32 || cgImage.bitsPerPixel == 24,
"A pixel is expected to be either 4 or 3 bytes in size")
let bytesPerRow = cgImage.bytesPerRow
let bytesPerPixel = cgImage.bitsPerPixel/8
let pixelOffset = y*bytesPerRow + x*bytesPerPixel
if componentLayout.count == 4 {
let components = (
dataPtr[pixelOffset + 0],
dataPtr[pixelOffset + 1],
dataPtr[pixelOffset + 2],
dataPtr[pixelOffset + 3]
)
var alpha: UInt8 = 0
var red: UInt8 = 0
var green: UInt8 = 0
var blue: UInt8 = 0
switch componentLayout {
case .bgra:
alpha = components.3
red = components.2
green = components.1
blue = components.0
case .abgr:
alpha = components.0
red = components.3
green = components.2
blue = components.1
case .argb:
alpha = components.0
red = components.1
green = components.2
blue = components.3
case .rgba:
alpha = components.3
red = components.0
green = components.1
blue = components.2
default:
return .clear
}
// If chroma components are premultiplied by alpha and the alpha is `0`,
// keep the chroma components to their current values.
if cgImage.bitmapInfo.chromaIsPremultipliedByAlpha && alpha != 0 {
let invUnitAlpha = 255/CGFloat(alpha)
red = UInt8((CGFloat(red)*invUnitAlpha).rounded())
green = UInt8((CGFloat(green)*invUnitAlpha).rounded())
blue = UInt8((CGFloat(blue)*invUnitAlpha).rounded())
}
return .init(red: red, green: green, blue: blue, alpha: alpha)
} else if componentLayout.count == 3 {
let components = (
dataPtr[pixelOffset + 0],
dataPtr[pixelOffset + 1],
dataPtr[pixelOffset + 2]
)
var red: UInt8 = 0
var green: UInt8 = 0
var blue: UInt8 = 0
switch componentLayout {
case .bgr:
red = components.2
green = components.1
blue = components.0
case .rgb:
red = components.0
green = components.1
blue = components.2
default:
return .clear
}
return .init(red: red, green: green, blue: blue, alpha: UInt8(255))
} else {
assertionFailure("Unsupported number of pixel components")
return .clear
}
}
}
But for a right pixel color you need use only a image in xcasset in x1 otherwise your reference is wrong and you need to use this: let correctedImage = UIImage(data: image.pngData()!) for retrive the correct origin for your point .
The solution of https://stackoverflow.com/a/40237504/3286489, only works on sRGB colorspace type of image. However, for a different colorspace (extended sRGB??), it doesn't work.
So to make it work, need to convert it to a normal sRGB image type first, before getting the color from the cgImage. Note we need to add padding to the calculation to ensure the width is always a factor of 8
public extension UIImage {
func getPixelColor(pos: CGPoint) -> UIColor {
// convert to standard sRGB image
guard let cgImage = cgImage,
let colorSpace = CGColorSpace(name: CGColorSpace.sRGB),
let context = CGContext(data: nil,
width: Int(size.width), height: Int(size.height),
bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
else { return .white }
context.draw(cgImage, in: CGRect(origin: .zero, size: size))
// Get the newly converted cgImage
guard let newCGImage = context.makeImage(),
let newDataProvider = newCGImage.dataProvider,
let data = newDataProvider.data
else { return .white }
let pixelData: UnsafePointer<UInt8> = CFDataGetBytePtr(data)
// Calculate the pixel position based on point given
let remaining = 8 - ((Int(size.width)) % 8)
let padding = (remaining < 8) ? remaining : 0
let pixelInfo: Int = (((Int(size.width) + padding) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(pixelData[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(pixelData[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(pixelData[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(pixelData[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
Optionally, if one doesn't want to convert to cgImage, just replace
// Get the newly converted cgImage
guard let newCGImage = context.makeImage(),
let newDataProvider = newCGImage.dataProvider,
let newData = newDataProvider.data
else { return .white }
let pixelData: UnsafePointer<UInt8> = CFDataGetBytePtr(newData)
With
// Get the data and bind it from UnsafeMutableRawPointer to UInt8
guard let data = context.data else { return .white }
let pixelData = data.bindMemory(
to: UInt8.self, capacity: Int(size.width * size.height * 4))
Updated
To get an even more concise code, we can improve the convert to sRGB using UIGraphicsImageRenderer directly. The calculation does changes a bit as due such redrawing refine the pixel to be 2x further.
func getPixelColor(pos: CGPoint) -> UIColor {
let newImage = UIGraphicsImageRenderer(size: size).image { _ in
draw(in: CGRect(origin: .zero, size: size))
}
guard let cgImage = newImage.cgImage,
let dataProvider = cgImage.dataProvider,
let data = dataProvider.data else { return .white }
let pixelData: UnsafePointer<UInt8> = CFDataGetBytePtr(data)
let remaining = 8 - ((Int(size.width) * 2) % 8)
let padding = (remaining < 8) ? remaining : 0
let pixelInfo: Int = (((Int(size.width * 2) + padding) * Int(pos.y * 2)) + Int(pos.x * 2)) * 4
let r = CGFloat(pixelData[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(pixelData[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(pixelData[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(pixelData[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
This is as per solution of convert to sRGB in https://stackoverflow.com/a/64538344/3286489
As usual, late to the party, but I wanted to mention that the method indicated above, doesn't always work. If the image is not RGBA, then it can crash. In my experience, running release (optimized) code, can crash, when the debug code works fine.
I tend to use a lot of vector images in my apps, and iOS can sometimes render them in monochrome color spaces. I have experienced a number of crashes, with the code given here.
Also, we should use bytesPerRow, when skipping on the vertical. Apple tends to add padding to bitmaps, and a simple 4-byte pixel offset may not work.
I draw the image into an offscreen context, then take the sample from there.
Here's what I did. It works, but is not exactly performant. In my case, it's fine, because I only use it once, at startup:
extension UIImage {
/* ################################################################## */
/**
This returns the RGB color (as a UIColor) of the pixel in the image, at the given point. It is restricted to 32-bit (RGBA/8-bit pixel) values.
This was inspired by several of the answers [in this StackOverflow Question](https://stackoverflow.com/questions/25146557/how-do-i-get-the-color-of-a-pixel-in-a-uiimage-with-swift).
**NOTE:** This is unlikely to be highly performant!
- parameter at: The point in the image to sample (NOTE: Must be within image bounds, or nil is returned).
- returns: A UIColor (or nil).
*/
func getRGBColorOfThePixel(at inPoint: CGPoint) -> UIColor? {
guard (0..<size.width).contains(inPoint.x),
(0..<size.height).contains(inPoint.y)
else { return nil }
// We draw the image into a context, in order to be sure that we are accessing image data in our required format (RGBA).
UIGraphicsBeginImageContextWithOptions(size, false, 0)
draw(at: .zero)
let imageData = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
guard let cgImage = imageData?.cgImage,
let pixelData = cgImage.dataProvider?.data
else { return nil }
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let bytesPerPixel = (cgImage.bitsPerPixel + 7) / 8
let pixelByteOffset: Int = (cgImage.bytesPerRow * Int(inPoint.y)) + (Int(inPoint.x) * bytesPerPixel)
let divisor = CGFloat(255.0)
let r = CGFloat(data[pixelByteOffset]) / divisor
let g = CGFloat(data[pixelByteOffset + 1]) / divisor
let b = CGFloat(data[pixelByteOffset + 2]) / divisor
let a = CGFloat(data[pixelByteOffset + 3]) / divisor
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
If you use image from Imgaes.xcassets, and only put one #1x image in 1x field. Leave the #2x, #3x blank.

iOS Find Color at Point Between Two Colors

I have a problem: I need to be able to take two colors and make a 'virtual gradient' out of them. I then need to be able to find the color at any point on this line. My current approach is this:
if (fahrenheit < kBottomThreshold)
{
return [UIColor colorWithRed:kBottomR/255.0f green:kBottomG/255.0f blue:kBottomB/255.0f alpha:1];
}
if (fahrenheit > kTopThreshold)
{
return [UIColor colorWithRed:kTopR/255.0f green:kTopG/255.0f blue:kTopB/255.0f alpha:1];
}
double rDiff = kTopR - kBottomR;
double gDiff = kTopG - kBottomG;
double bDiff = kTopB - kBottomB;
double tempDiff = kTopThreshold - kBottomThreshold;
double rValue;
double gValue;
double bValue;
rValue = kBottomR + ((rDiff/tempDiff) * fahrenheit);
gValue = kBottomG + ((gDiff/tempDiff) * fahrenheit);
bValue = kBottomB + ((bDiff/tempDiff) * fahrenheit);
return [UIColor colorWithRed:rValue/255.0f green:gValue/255.0f blue:bValue/255.0f alpha:1];
Variables:
fahrenheit is a variable passed into my function that is the number on this virtual line that I want to find the color for.
kTopR, kTopB, and kTopG are the RGB values for one end of the gradient. Same for their kBottom counterparts.
kBottomThreshold and kTopThreshold are the endpoints of my gradient.
Here's my problem: When fahrenheit goes over either end of the gradient, the gradient seems to 'jump' to a different value.
I've included an example project, hosted on my S3 server, here.
You really need to download the project and try it on the simulator/device to see what I mean (unless you are crazy smart and can tell just by looking at the code)
Swift - 3.0 && 4.0
extension UIColor {
func toColor(_ color: UIColor, percentage: CGFloat) -> UIColor {
let percentage = max(min(percentage, 100), 0) / 100
switch percentage {
case 0: return self
case 1: return color
default:
var (r1, g1, b1, a1): (CGFloat, CGFloat, CGFloat, CGFloat) = (0, 0, 0, 0)
var (r2, g2, b2, a2): (CGFloat, CGFloat, CGFloat, CGFloat) = (0, 0, 0, 0)
guard self.getRed(&r1, green: &g1, blue: &b1, alpha: &a1) else { return self }
guard color.getRed(&r2, green: &g2, blue: &b2, alpha: &a2) else { return self }
return UIColor(red: CGFloat(r1 + (r2 - r1) * percentage),
green: CGFloat(g1 + (g2 - g1) * percentage),
blue: CGFloat(b1 + (b2 - b1) * percentage),
alpha: CGFloat(a1 + (a2 - a1) * percentage))
}
}
}
Usage:-
let colorRed = UIColor.red
let colorBlue = UIColor.blue
let colorOutput = colorRed.toColor(colorBlue, percentage: 50)
Result
The problem is that you're not subtracting kBottomThreshold from farenheit.
But let's simplify.
First, we want to map the input temperature to a parameter t in the range [0 ... 1]. Then, we want to map t to an output in the range [kBottomR ... kTopR], and also to an output in the range [kBottomG ... kTopG], and also to an output in the range [kBottomB ... kTopB].
UIColor *colorForDegreesFahrenheit(double fahrenheit) {
double t = (fahrenheit - kBottomThreshold) / (kTopThreshold - kBottomThreshold);
// Clamp t to the range [0 ... 1].
t = MAX(0.0, MIN(t, 1.0));
double r = kBottomR + t * (kTopR - kBottomR);
double g = kBottomG + t * (kTopG - kBottomG);
double b = kBottomB + t * (kTopB - kBottomB);
return [UIColor colorWithRed:r/255 green:g/255 blue:b/255 alpha:1];
}
Thanks #ramchandra-n I implemented the extension to get the intermediate color from an array of colors by percentage
extension Array where Element: UIColor {
func intermediate(percentage: CGFloat) -> UIColor {
let percentage = Swift.max(Swift.min(percentage, 100), 0) / 100
switch percentage {
case 0: return first ?? .clear
case 1: return last ?? .clear
default:
let approxIndex = percentage / (1 / CGFloat(count - 1))
let firstIndex = Int(approxIndex.rounded(.down))
let secondIndex = Int(approxIndex.rounded(.up))
let fallbackIndex = Int(approxIndex.rounded())
let firstColor = self[firstIndex]
let secondColor = self[secondIndex]
let fallbackColor = self[fallbackIndex]
var (r1, g1, b1, a1): (CGFloat, CGFloat, CGFloat, CGFloat) = (0, 0, 0, 0)
var (r2, g2, b2, a2): (CGFloat, CGFloat, CGFloat, CGFloat) = (0, 0, 0, 0)
guard firstColor.getRed(&r1, green: &g1, blue: &b1, alpha: &a1) else { return fallbackColor }
guard secondColor.getRed(&r2, green: &g2, blue: &b2, alpha: &a2) else { return fallbackColor }
let intermediatePercentage = approxIndex - CGFloat(firstIndex)
return UIColor(
red: CGFloat(r1 + (r2 - r1) * intermediatePercentage),
green: CGFloat(g1 + (g2 - g1) * intermediatePercentage),
blue: CGFloat(b1 + (b2 - b1) * intermediatePercentage),
alpha: CGFloat(a1 + (a2 - a1) * intermediatePercentage)
)
}
}
}
You can use it to get an intermediate color between two or more colors:
let color = [.green, .yellow, .red].intermediate(percentage: 70)
In case your gradient is more complex than a 2 color gradient, you may consider drawing a CGGradientRef into a temporary CGImageRef and directly read RGBA values from the image buffer.
Here is something that I had to do with a 5 gradient stops and colors:
CGFloat tmpImagewidth = 1000.0f; // Make this bigger or smaller if you need more or less resolution (number of different colors).
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
// create a gradient
CGFloat locations[] = { 0.0,
0.35,
0.55,
0.8,
1.0 };
NSArray *colors = #[(__bridge id) [UIColor redColor].CGColor,
(__bridge id) [UIColor greenColor].CGColor,
(__bridge id) [UIColor blueColor].CGColor,
(__bridge id) [UIColor yellowColor].CGColor,
(__bridge id) [UIColor redColor].CGColor,
];
CGGradientRef gradient = CGGradientCreateWithColors(colorSpace, (__bridge CFArrayRef) colors, locations);
CGPoint startPoint = CGPointMake(0, 0);
CGPoint endPoint = CGPointMake(tmpImagewidth, 0);
// create a bitmap context to draw the gradient to, 1 pixel high.
CGContextRef context = CGBitmapContextCreate(NULL, tmpImagewidth, 1, 8, 0, colorSpace, kCGImageAlphaPremultipliedLast);
// draw the gradient into it
CGContextAddRect(context, CGRectMake(0, 0, tmpImagewidth, 1));
CGContextClip(context);
CGContextDrawLinearGradient(context, gradient, startPoint, endPoint, 0);
// Get our RGB bytes into a buffer with a couple of intermediate steps...
// CGImageRef -> CFDataRef -> byte array
CGImageRef cgImage = CGBitmapContextCreateImage(context);
CGDataProviderRef provider = CGImageGetDataProvider(cgImage);
CFDataRef pixelData = CGDataProviderCopyData(provider);
// cleanup:
CGGradientRelease(gradient);
CGColorSpaceRelease(colorSpace);
CGImageRelease(cgImage);
CGContextRelease(context);
const UInt8* data = CFDataGetBytePtr(pixelData);
// we got all the data we need.
// bytes in the data buffer are a succession of R G B A bytes
// For instance, the color of the point 27% in our gradient is:
CGFloat x = tmpImagewidth * .27;
int pixelIndex = (int)x * 4; // 4 bytes per color
UIColor *color = [UIColor colorWithRed:data[pixelIndex + 0]/255.0f
green:data[pixelIndex + 1]/255.0f
blue:data[pixelIndex + 2]/255.0f
alpha:data[pixelIndex + 3]/255.0f];
// done fetching color data, finally release the buffer
CGDataProviderRelease(provider);
I am not saying this is better than the "math way" in the answer above, certainly there is a memory and cpu tax that goes into producing the temporary image.
The advantage of this however, is that the code complexity stays the same no matter how many gradient stops you need...
I would like to extend/modify #Sebastien Windal´s answer, as his answer works great for my use case, but can be further improved by getting the pixelData directly from the context (see Get pixel data as array from UIImage/CGImage in swift).
I implemented his answer in swift, therefore the changes are also written in swift.
Just pass the Int Array to the context before drawing the gradient. The array will then be filled with the pixelData when drawing the context.
let dataSize = tmpImagewidth * 1 * 4
var pixelData = [UInt8](repeating: 0, count: Int(dataSize))
let context = CGContext(data: &pixelData, width: Int(tmpImagewidth), height: 1, bitsPerComponent: 8, bytesPerRow: 4 * Int(tmpImagewidth), space: colorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
Then we can skip creating an image first to read the pixelData from there.
Swift 5.3
Get n colors between two colors:
import UIKit
extension CGFloat {
var getPercentageValues: [CGFloat] {
let increment: CGFloat = 100/(self-1)
var values = [CGFloat]()
let last: CGFloat = 100
var value: CGFloat = 0
while value <= last {
values.append(value)
value += increment
}
return values
}
}
extension UIColor {
func toColor(_ color: UIColor, percentage: CGFloat) -> UIColor {
let percentage = max(min(percentage, 100), 0) / 100
switch percentage {
case 0: return self
case 1: return color
default:
var (r1, g1, b1, a1): (CGFloat, CGFloat, CGFloat, CGFloat) = (.zero, .zero, .zero, .zero)
var (r2, g2, b2, a2): (CGFloat, CGFloat, CGFloat, CGFloat) = (.zero, .zero, .zero, .zero)
guard self.getRed(&r1, green: &g1, blue: &b1, alpha: &a1) else { return self }
guard color.getRed(&r2, green: &g2, blue: &b2, alpha: &a2) else { return self }
return UIColor(red: CGFloat(r1 + (r2 - r1) * percentage),
green: CGFloat(g1 + (g2 - g1) * percentage),
blue: CGFloat(b1 + (b2 - b1) * percentage),
alpha: CGFloat(a1 + (a2 - a1) * percentage))
}
}
func getColors(to color: UIColor, with quantity: CGFloat) -> [UIColor] {
quantity.getPercentageValues.map { self.toColor(color, percentage: $0 ) }
}
}
let redColor = UIColor.red
let blueColor = UIColor.blue
print(redColor.getColors(to: blueColor, with: 10))
Result
SwiftUI Color operator + overload
func +(lhs: Color, rhs: Color) -> Color {
let lhsUIColor = UIColor(lhs)
let rhsUIColor = UIColor(rhs)
var (r1, g1, b1, a1): (CGFloat, CGFloat, CGFloat, CGFloat) = (0, 0, 0, 0)
var (r2, g2, b2, a2): (CGFloat, CGFloat, CGFloat, CGFloat) = (0, 0, 0, 0)
guard lhsUIColor.getRed(&r1, green: &g1, blue: &b1, alpha: &a1) else { return Color(lhsUIColor) }
guard rhsUIColor.getRed(&r2, green: &g2, blue: &b2, alpha: &a2) else { return Color(rhsUIColor) }
return Color(Color.RGBColorSpace.sRGB, red: Double(r1 + r2) / 2, green: Double(g1 + g2) / 2, blue: Double(b1 + b2) / 2, opacity: Double(a1 + a2) / 2)
}

Resources