Swift self image creation and change - ios

In Swift I have an image: (but {I think} the problem is how the 'self' is implemented)
(in my viewDidLoad)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bytesPerPixel = 4
let bitsPerComponent = 8
let bytesPerRow = bytesPerPixel * self.cols // width
let bitmapInfo = RGBA32.bitmapInfo
let minimapContext = CGContext(data: nil, width: self.cols, height: self.rows, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
let buffer = minimapContext!.data
self.pixelBuffer = buffer!.bindMemory(to: RGBA32.self, capacity: self.cols * self.rows) // was let
var pixelIndex = 1
Then I loop over an array of like:
self.pixelBuffer[pixelIndex] = .blue
This all works fine without the 'self'.
I want to change some of the pixels, so I added the 'self' and defined it at the top of the ViewController class
Now change the pixels like:
self.pixelBuffer[newPixelIndex] = .lightblue
But I get a variety of errors:
Cannot assign value of type 'UnsafeMutablePointer' to type 'RGBA32'
Cannot infer contextual base in reference to member 'lightblue'
Value of type 'RGBA32?' has no subscripts
Don't know if this helps but the pixelBuffer is defined ike:
var pixelBuffer: RGBA32? = nil
and here is RGBA32:
struct RGBA32: Equatable {
private var color: UInt32
init(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) {
color = (UInt32(red) << 24) | (UInt32(green) << 16) | (UInt32(blue) << 8) | (UInt32(alpha) << 0)
}
static let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
static func ==(lhs: RGBA32, rhs: RGBA32) -> Bool {
return lhs.color == rhs.color
}
static let black = RGBA32(red: 0, green: 0, blue: 0, alpha: 255)
static let red = RGBA32(red: 255, green: 0, blue: 0, alpha: 255)
static let green = RGBA32(red: 24, green: 183, blue: 3, alpha: 255)
static let darkgreen = RGBA32(red: 70, green: 105, blue: 35, alpha: 255)
static let blue = RGBA32(red: 0, green: 127, blue: 255, alpha: 255)
static let lightblue = RGBA32(red: 33, green: 255, blue: 255, alpha: 255)
static let brown = RGBA32(red: 127, green: 63, blue: 0, alpha: 255)
}
THANKS!!

The main problem is that you have declared self.pixelBuffer to be an Optional RGBA32. But that is not what pixelBuffer was before. It was a UnsafeMutablePointer<RGBA32>. Here is the declaration you need:
var pixelBuffer : UnsafeMutablePointer<RGBA32>!
All your issues will then go away, I think. Your
self.pixelBuffer[newPixelIndex] = .lightblue
then compiles for me.

Related

How convert RGBA to UIColor and back

How can i convert my struct RGBA to UIColor and back
struct RGBA {
var r: UInt8
var g: UInt8
var b: UInt8
var a: UInt8
var toUIColor: UIColor {
let red = CGFloat(r) / CGFloat(255)
let green = CGFloat(g) / CGFloat(255)
let blue = CGFloat(b) / CGFloat(255)
let alpha = CGFloat(a) / CGFloat(255)
return UIColor(displayP3Red: CGFloat(red),
green: CGFloat(green),
blue: CGFloat(blue),
alpha: CGFloat(alpha))
}
}
extension UIColor {
var toRGBA: RGBA {
var red: CGFloat = 0
var green: CGFloat = 0
var blue: CGFloat = 0
var alpha: CGFloat = 0
getRed(&red, green: &green, blue: &blue, alpha: &alpha)
return RGBA(r: UInt8(max(0, min(255, red * 255))),
g: UInt8(max(0, min(255, green * 255))),
b: UInt8(max(0, min(255, blue * 255))),
a: UInt8(max(0, min(255, alpha * 255))))
}
}
When I use t his code, im loose color accuracy
Draw pipline
select UIColor -> toRGBA ->
0.7490196078431373 0.35294117647058826 0.9490196078431372 1.0
select color on canvas -> toUIColor
draw line -> toRGBA ->
0.803921568627451 0.3215686274509804 0.9803921568627451 1.0
var canvasSize = PixelSize(width: 16, height: 16) // (Int, Int)
func createImage(by pixels: [RGBA]) -> UIImage? {
let colorSpace = CGColorSpaceCreateDeviceRGB()
let data = UnsafeMutableRawPointer(mutating: pixels)
let bitmapContext = CGContext(data: data,
width: canvasSize.width,
height: canvasSize.height,
bitsPerComponent: 8,
bytesPerRow: 4 * canvasSize.width,
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)
guard let image = bitmapContext?.makeImage() else { return nil }
return UIImage(cgImage: image)
}
Whats wrong?
Welcome!
The issue is that you use two different color spaces:
When converting from RGBA to UIColor, you assume the RGBA values are in Display P3 (UIColor(displayP3Red:...).
But the CGContext you create with the "device RGB" color space, which is an sRGB color space as far as I know.
If you'd use the init(red:, green:, blue:, alpha:) initializer of UIColor it should work.

How to apply CIFilter on Image Pixel

I know it is possible to change color of pixel using pixel buffer, like in the below code, but I just want to blur a pixel using 'CIFilter' rather than changing color. I don't want to apply a 'CIFilter' on whole image.
//data pointer – stores an array of the pixel components. For example (r0, b0, g0, a0, r1, g1, b1, a1 .... rn, gn, bn, an)
let data : UnsafeMutablePointer<UInt8> = calloc(bytesPerRow, height)!.assumingMemoryBound(to: UInt8.self)
//get the index of the pixel (4 components times the x position plus the y position times the row width)
let pixelIndex = 4 * (location.x + (location.y * width))
//set the pixel components to the color components
data[pixelIndex] = red
data[pixelIndex+1] = green
data[pixelIndex+2] = blue
data[pixelIndex+3] = alpha
Also can we use below code for applying CIFilter on Pixel?
if let pixelData = self.cgImage?.dataProvider?.data {
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let red = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let green = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let blue = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let alpha = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
}
Check out the CIBlendWithMask filter. It allows you to create a mask of any shape (even a single pixel) and use that to blend the input with another input. If you make inputBackgroundImage be the original image, and make inputImage be the original image with your desired filter applied, the inputImageMask is an all-black image with only the single pixel you want to change in white.
I typed this up pretty quick without testing code - could be a few errors. I have done something very similar recently, so shouldn't be too off. I'd like to know whatcha get and if this doesn't work, I bet it's close.
/*
Implementations
Notes:
- I don't know what kind of `CIFilter` blur you'd like to apply, so I'm just using one from here
- https://developer.apple.com/library/archive/documentation/GraphicsImaging/Reference/CoreImageFilterReference/#//apple_ref/doc/filter/ci/CIBoxBlur
*/
//Normal Image
let inputImage:UIImage = originalImage
//Blurred Image of only the BLURRED PIXELS -- we change the rest of the pixels to clear -- thus we can use this as the backgroundImage and the maskedImage
let unblurredImage = getBackgroundImage()
let filter = CIFilter(name: "CIBoxBlur")
filter?.setValue(unblurredImage, kCIInputImageKey)
let blurredImage = filter?.outputImage
//Now we can blend the 2 images
let blendFilter = CIFilter(name: "CIBlendWithAlphaMask")
blendFilter?.setValue(CIImage(image: inputImage), kCIInputImageKey)
blendFilter?.setValue(blurredImage, "inputBackgroundImage")
blendFilter?.setValue(blurredImage, "inputMaskImage")
let finalCIImage = blendFilter?.outputImage
let finalImage = UIImage(ciImage: finalCIImage)
/*
Functions used in the process
*/
//Create an image of only the pixels we want to blur
func getBackgroundImage(ciimage: CIImage) -> UIImage {
let inputCGImage = ciimage.convertCIImageToCGImage()!
let colorSpace = CGColorSpaceCreateDeviceRGB()
let width = inputCGImage.width
let height = inputCGImage.height
let bytesPerPixel = 4
let bitsPerComponent = 8
let bytesPerRow = bytesPerPixel * width
let bitmapInfo = RGBA32.bitmapInfo
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else {
print("Couldn't create CGContext")
return nil
}
context.draw(inputCGImage, in: CGRect(x: 0, y: 0, width: width, height: height))
let pixelBuffer = buffer.bindMemory(to: RGBA32.self, capacity: width * height)
for row in 0 ..< Int(height) {
for column in 0 ..< Int(width) {
let offset = row * width + column
/*
You need to define aPixelIWantBlurred however you desire
Also, we don't edit the pixels we want to blur - we edit the other pixels to a transparent value. This allows us to use this as the background image and the masked image
*/
if pixelBuffer[offset] != aPixelIWantBlurred {
pixelBuffer[offset] = RGBA32.init(red: 0, green: 0, blue: 0, alpha: 0)
}
}
}
let outputCGImage = context.makeImage()!
let outputImage = UIImage(cgImage: outputCGImage, scale: image.scale, orientation: image.imageOrientation)
return outputImage
}
extension CIImage {
func convertCIImageToCGImage() -> CGImage! {
let context = CIContext(options: nil)
return context.createCGImage(self, from: self.extent)
}
}
struct RGBA32: Equatable {
private var color: UInt32
var redComponent: UInt8 {
return UInt8((color >> 24) & 255)
}
var greenComponent: UInt8 {
return UInt8((color >> 16) & 255)
}
var blueComponent: UInt8 {
return UInt8((color >> 8) & 255)
}
var alphaComponent: UInt8 {
return UInt8((color >> 0) & 255)
}
init(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) {
let red = UInt32(red)
let green = UInt32(green)
let blue = UInt32(blue)
let alpha = UInt32(alpha)
color = (red << 24) | (green << 16) | (blue << 8) | (alpha << 0)
}
static let red = RGBA32(red: 255, green: 0, blue: 0, alpha: 255)
static let green = RGBA32(red: 0, green: 255, blue: 0, alpha: 255)
static let blue = RGBA32(red: 0, green: 0, blue: 255, alpha: 255)
static let white = RGBA32(red: 255, green: 255, blue: 255, alpha: 255)
static let black = RGBA32(red: 0, green: 0, blue: 0, alpha: 255)
static let magenta = RGBA32(red: 255, green: 0, blue: 255, alpha: 255)
static let yellow = RGBA32(red: 255, green: 255, blue: 0, alpha: 255)
static let cyan = RGBA32(red: 0, green: 255, blue: 255, alpha: 255)
static let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
static func ==(lhs: RGBA32, rhs: RGBA32) -> Bool {
return lhs.color == rhs.color
}
}

Image color bug?

I'd like to generate this cool like on an UIImage using the code I've added below. But there seems to be a crazy bug in relation to the color:
So I'd like my function to draw lines to the image with a given RGB value (in my case r:76, g:255, b:0 to get this green look) as it's color:
But the code I'm using just colors the image kind of magenta instead of green:
Please have a look at my code:
func colorImage(image: UIImage) -> UIImage {
let color = RGBA32(red: 76, green: 255, blue: 0, alpha: 255)
let img: CGImage = image.cgImage!
let context = CGContext(data: nil, width: img.width, height: img.height, bitsPerComponent: 8, bytesPerRow: 4 * img.width, space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)!
context.draw(img, in: CGRect(x: 0, y: 0, width: img.width, height: img.height))
let binaryData = context.data!.bindMemory(to: RGBA32.self, capacity: img.width * img.height)
for y in stride(from: 0, through: img.height, by: 10) {
for x in 0..<img.width {
let pixel = (y*img.width) + x
binaryData[pixel] = color
}
}
let output = context.makeImage()!
return UIImage(cgImage: output, scale: image.scale, orientation: image.imageOrientation)
}
struct RGBA32: Equatable {
private var color: UInt32
init(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) {
let red = UInt32(red)
let green = UInt32(green)
let blue = UInt32(blue)
let alpha = UInt32(alpha)
color = (red << 24) | (green << 16) | (blue << 8) | (alpha << 0)
}
static func ==(lhs: RGBA32, rhs: RGBA32) -> Bool {
return lhs.color == rhs.color
}
}
Tbh I'm absolutely clueless why this issue appears - so any help how to fix this would be very appreciated. Thanks :)
RGBA is a confusing word. It is not clear if R is the most significant byte in the 32-bit data or the first byte in the 4-byte memory region.
But with your bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue,
R takes the first place and G, B, and then Alpha last.
In little-endian representation of UInt32, R for the least significant byte.
Try this version of RGBA:
struct RGBA32: Equatable {
private var color: (red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8)
init(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) {
color = (red: red, green: green, blue: blue, alpha: alpha)
}
static func ==(lhs: RGBA32, rhs: RGBA32) -> Bool {
return lhs.color == rhs.color
}
}
(Memory allocations of Swift structs or tuples are not clearly defined, but the above code works as expected.)

Change color of certain pixels in a UIImage

For a given multi-color PNG UIImage (with transparency), what is the best/Swift-idiomatic way to:
create a duplicate UIImage
find all black pixels in the copy and change them to red
(return the modified copy)
There are a few related questions on SO but I haven't been able to find something that works.
You have to extract the pixel buffer of the image, at which point you can loop through, changing pixels as you see fit. At the end, create a new image from the buffer.
In Swift 3, this looks like:
func processPixels(in image: UIImage) -> UIImage? {
guard let inputCGImage = image.cgImage else {
print("unable to get cgImage")
return nil
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let width = inputCGImage.width
let height = inputCGImage.height
let bytesPerPixel = 4
let bitsPerComponent = 8
let bytesPerRow = bytesPerPixel * width
let bitmapInfo = RGBA32.bitmapInfo
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else {
print("unable to create context")
return nil
}
context.draw(inputCGImage, in: CGRect(x: 0, y: 0, width: width, height: height))
guard let buffer = context.data else {
print("unable to get context data")
return nil
}
let pixelBuffer = buffer.bindMemory(to: RGBA32.self, capacity: width * height)
for row in 0 ..< Int(height) {
for column in 0 ..< Int(width) {
let offset = row * width + column
if pixelBuffer[offset] == .black {
pixelBuffer[offset] = .red
}
}
}
let outputCGImage = context.makeImage()!
let outputImage = UIImage(cgImage: outputCGImage, scale: image.scale, orientation: image.imageOrientation)
return outputImage
}
struct RGBA32: Equatable {
private var color: UInt32
var redComponent: UInt8 {
return UInt8((color >> 24) & 255)
}
var greenComponent: UInt8 {
return UInt8((color >> 16) & 255)
}
var blueComponent: UInt8 {
return UInt8((color >> 8) & 255)
}
var alphaComponent: UInt8 {
return UInt8((color >> 0) & 255)
}
init(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) {
let red = UInt32(red)
let green = UInt32(green)
let blue = UInt32(blue)
let alpha = UInt32(alpha)
color = (red << 24) | (green << 16) | (blue << 8) | (alpha << 0)
}
static let red = RGBA32(red: 255, green: 0, blue: 0, alpha: 255)
static let green = RGBA32(red: 0, green: 255, blue: 0, alpha: 255)
static let blue = RGBA32(red: 0, green: 0, blue: 255, alpha: 255)
static let white = RGBA32(red: 255, green: 255, blue: 255, alpha: 255)
static let black = RGBA32(red: 0, green: 0, blue: 0, alpha: 255)
static let magenta = RGBA32(red: 255, green: 0, blue: 255, alpha: 255)
static let yellow = RGBA32(red: 255, green: 255, blue: 0, alpha: 255)
static let cyan = RGBA32(red: 0, green: 255, blue: 255, alpha: 255)
static let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
static func ==(lhs: RGBA32, rhs: RGBA32) -> Bool {
return lhs.color == rhs.color
}
}
For Swift 2 rendition, see previous revision of this answer.
For getting better result we can search color range in image pixels, referring to #Rob answer I made update and now the result is better.
func processByPixel(in image: UIImage) -> UIImage? {
guard let inputCGImage = image.cgImage else { print("unable to get cgImage"); return nil }
let colorSpace = CGColorSpaceCreateDeviceRGB()
let width = inputCGImage.width
let height = inputCGImage.height
let bytesPerPixel = 4
let bitsPerComponent = 8
let bytesPerRow = bytesPerPixel * width
let bitmapInfo = RGBA32.bitmapInfo
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else {
print("Cannot create context!"); return nil
}
context.draw(inputCGImage, in: CGRect(x: 0, y: 0, width: width, height: height))
guard let buffer = context.data else { print("Cannot get context data!"); return nil }
let pixelBuffer = buffer.bindMemory(to: RGBA32.self, capacity: width * height)
for row in 0 ..< Int(height) {
for column in 0 ..< Int(width) {
let offset = row * width + column
/*
* Here I'm looking for color : RGBA32(red: 231, green: 239, blue: 247, alpha: 255)
* and I will convert pixels color that in range of above color to transparent
* so comparetion can done like this (pixelColorRedComp >= ourColorRedComp - 1 && pixelColorRedComp <= ourColorRedComp + 1 && green && blue)
*/
if pixelBuffer[offset].redComponent >= 230 && pixelBuffer[offset].redComponent <= 232 &&
pixelBuffer[offset].greenComponent >= 238 && pixelBuffer[offset].greenComponent <= 240 &&
pixelBuffer[offset].blueComponent >= 246 && pixelBuffer[offset].blueComponent <= 248 &&
pixelBuffer[offset].alphaComponent == 255 {
pixelBuffer[offset] = .transparent
}
}
}
let outputCGImage = context.makeImage()!
let outputImage = UIImage(cgImage: outputCGImage, scale: image.scale, orientation: image.imageOrientation)
return outputImage
}
I hope this helps someone 🎉

Get average color of UIImage in Swift

I was recently attempting to convert the code from here into Swift. However, I keep getting a white color, no matter the image. Here's my code:
// Playground - noun: a place where people can play
import UIKit
extension UIImage {
func averageColor() -> UIColor {
var colorSpace = CGColorSpaceCreateDeviceRGB()
var rgba: [CGFloat] = [0,0,0,0]
var context = CGBitmapContextCreate(&rgba, 1, 1, 8, 4, colorSpace, CGBitmapInfo.fromRaw(CGImageAlphaInfo.PremultipliedLast.toRaw())!)
rgba
CGContextDrawImage(context, CGRectMake(0, 0, 1, 1), self.CGImage)
if rgba[3] > 0 {
var alpha = rgba[3] / 255
var multiplier = alpha / 255
return UIColor(red: rgba[0] * multiplier, green: rgba[1] * multiplier, blue: rgba[2] * multiplier, alpha: alpha)
} else {
return UIColor(red: rgba[0] / 255, green: rgba[1] / 255, blue: rgba[2] / 255, alpha: rgba[3] / 255)
}
}
}
var img = UIImage(data: NSData(contentsOfURL: NSURL(string: "http://upload.wikimedia.org/wikipedia/commons/c/c3/Aurora_as_seen_by_IMAGE.PNG")))
img.averageColor()
Thanks in advance.
CoreImage in iOS 9: use the CIAreaAverage filter and pass the extent of your entire image to be averaged.
Plus, it's much faster since it'll either be running on the GPU or as a highly-optimized CPU CIKernel.
import UIKit
extension UIImage {
func areaAverage() -> UIColor {
var bitmap = [UInt8](count: 4, repeatedValue: 0)
if #available(iOS 9.0, *) {
// Get average color.
let context = CIContext()
let inputImage = CIImage ?? CoreImage.CIImage(CGImage: CGImage!)
let extent = inputImage.extent
let inputExtent = CIVector(x: extent.origin.x, y: extent.origin.y, z: extent.size.width, w: extent.size.height)
let filter = CIFilter(name: "CIAreaAverage", withInputParameters: [kCIInputImageKey: inputImage, kCIInputExtentKey: inputExtent])!
let outputImage = filter.outputImage!
let outputExtent = outputImage.extent
assert(outputExtent.size.width == 1 && outputExtent.size.height == 1)
// Render to bitmap.
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: kCIFormatRGBA8, colorSpace: CGColorSpaceCreateDeviceRGB())
} else {
// Create 1x1 context that interpolates pixels when drawing to it.
let context = CGBitmapContextCreate(&bitmap, 1, 1, 8, 4, CGColorSpaceCreateDeviceRGB(), CGBitmapInfo.ByteOrderDefault.rawValue | CGImageAlphaInfo.PremultipliedLast.rawValue)!
let inputImage = CGImage ?? CIContext().createCGImage(CIImage!, fromRect: CIImage!.extent)
// Render to bitmap.
CGContextDrawImage(context, CGRect(x: 0, y: 0, width: 1, height: 1), inputImage)
}
// Compute result.
let result = UIColor(red: CGFloat(bitmap[0]) / 255.0, green: CGFloat(bitmap[1]) / 255.0, blue: CGFloat(bitmap[2]) / 255.0, alpha: CGFloat(bitmap[3]) / 255.0)
return result
}
}
Swift 3
func areaAverage() -> UIColor {
var bitmap = [UInt8](repeating: 0, count: 4)
if #available(iOS 9.0, *) {
// Get average color.
let context = CIContext()
let inputImage: CIImage = ciImage ?? CoreImage.CIImage(cgImage: cgImage!)
let extent = inputImage.extent
let inputExtent = CIVector(x: extent.origin.x, y: extent.origin.y, z: extent.size.width, w: extent.size.height)
let filter = CIFilter(name: "CIAreaAverage", withInputParameters: [kCIInputImageKey: inputImage, kCIInputExtentKey: inputExtent])!
let outputImage = filter.outputImage!
let outputExtent = outputImage.extent
assert(outputExtent.size.width == 1 && outputExtent.size.height == 1)
// Render to bitmap.
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: kCIFormatRGBA8, colorSpace: CGColorSpaceCreateDeviceRGB())
} else {
// Create 1x1 context that interpolates pixels when drawing to it.
let context = CGContext(data: &bitmap, width: 1, height: 1, bitsPerComponent: 8, bytesPerRow: 4, space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)!
let inputImage = cgImage ?? CIContext().createCGImage(ciImage!, from: ciImage!.extent)
// Render to bitmap.
context.draw(inputImage!, in: CGRect(x: 0, y: 0, width: 1, height: 1))
}
// Compute result.
let result = UIColor(red: CGFloat(bitmap[0]) / 255.0, green: CGFloat(bitmap[1]) / 255.0, blue: CGFloat(bitmap[2]) / 255.0, alpha: CGFloat(bitmap[3]) / 255.0)
return result
}
Here's a solution:
func averageColor() -> UIColor {
let rgba = UnsafeMutablePointer<CUnsignedChar>.alloc(4)
let colorSpace: CGColorSpaceRef = CGColorSpaceCreateDeviceRGB()
let info = CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue)
let context: CGContextRef = CGBitmapContextCreate(rgba, 1, 1, 8, 4, colorSpace, info)
CGContextDrawImage(context, CGRectMake(0, 0, 1, 1), self.CGImage)
if rgba[3] > 0 {
let alpha: CGFloat = CGFloat(rgba[3]) / 255.0
let multiplier: CGFloat = alpha / 255.0
return UIColor(red: CGFloat(rgba[0]) * multiplier, green: CGFloat(rgba[1]) * multiplier, blue: CGFloat(rgba[2]) * multiplier, alpha: alpha)
} else {
return UIColor(red: CGFloat(rgba[0]) / 255.0, green: CGFloat(rgba[1]) / 255.0, blue: CGFloat(rgba[2]) / 255.0, alpha: CGFloat(rgba[3]) / 255.0)
}
}
Swift 3:
func areaAverage() -> UIColor {
var bitmap = [UInt8](repeating: 0, count: 4)
let context = CIContext(options: nil)
let cgImg = context.createCGImage(CoreImage.CIImage(cgImage: self.cgImage!), from: CoreImage.CIImage(cgImage: self.cgImage!).extent)
let inputImage = CIImage(cgImage: cgImg!)
let extent = inputImage.extent
let inputExtent = CIVector(x: extent.origin.x, y: extent.origin.y, z: extent.size.width, w: extent.size.height)
let filter = CIFilter(name: "CIAreaAverage", withInputParameters: [kCIInputImageKey: inputImage, kCIInputExtentKey: inputExtent])!
let outputImage = filter.outputImage!
let outputExtent = outputImage.extent
assert(outputExtent.size.width == 1 && outputExtent.size.height == 1)
// Render to bitmap.
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: kCIFormatRGBA8, colorSpace: CGColorSpaceCreateDeviceRGB())
// Compute result.
let result = UIColor(red: CGFloat(bitmap[0]) / 255.0, green: CGFloat(bitmap[1]) / 255.0, blue: CGFloat(bitmap[2]) / 255.0, alpha: CGFloat(bitmap[3]) / 255.0)
return result
}
Are you setting up your context correctly? If I look at the documentation for the CGBitmapContext Reference:
https://developer.apple.com/library/ios/documentation/graphicsimaging/Reference/CGBitmapContext/index.html#//apple_ref/c/func/CGBitmapContextCreate
it looks like you are only allocating enough memory for the image that would be contained in the CGFloat array. It also looks like you are telling the compiler that your image is only going to be one pixel by one pixel.
It looks like that size is also being confirmed as one pixel by one pixel when you are setting your CGRect in CGContextDrawImage.
If the Playground is only creating an image one pixel by one pixel, that would explain why you are only seeing a white screen.

Resources