Swift color detection in center of screen - ios

I wants to find the pixel colors in the middle of the screen.
extension UIImage {
func getPixelColor(atLocation location: CGPoint, withFrameSize size: CGSize) -> UIColor {
let pixelPlace: CGPoint = CGPoint(x: location.x, y: location.y)
let pixelData = self.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelIndex: Int = ((Int(self.size.width) * Int(pixelPlace.y)) + Int(pixelPlace.x)) * 4
let r = CGFloat(data[pixelIndex])// / CGFloat(255.0)
let g = CGFloat(data[pixelIndex+1])// / CGFloat(255.0)
let b = CGFloat(data[pixelIndex+2])// / CGFloat(255.0)
let a = CGFloat(data[pixelIndex+3])// / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
Then I print the color with this.
let place = CGPoint(x: self.view.center.x , y: self.view.center.y)
print(yourImageView.image!.getPixelColor(atLocation: place, withFrameSize: self.view.frame.size))
How do I get the center pixel's color? I keep getting other color values than what is in the center. The color detection is functional so I don't believe the problem comes from that.
What I changed (same problem still)
extension UIView {
func getColor(at point: CGPoint) -> UIColor{
let pixel = UnsafeMutablePointer<CUnsignedChar>.allocate(capacity: 4)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let context = CGContext(data: pixel, width: 1, height: 1, bitsPerComponent: 8, bytesPerRow: 4, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)!
context.translateBy(x: -point.x, y: -point.y)
self.layer.render(in: context)
let color = UIColor(red: CGFloat(pixel[0]),
green: CGFloat(pixel[1]),
blue: CGFloat(pixel[2]),
alpha: CGFloat(pixel[3]))
pixel.deallocate(capacity: 4)
return color
}
}
And I access it this way
let loc = CGPoint(x: yourImageView.center.x, y: yourImageView.center.y)
var color = yourImageView.getColor(at: loc)
print(color)

Related

CIAreaHistogram does not return expected result

I want to get the CIAreaHistogram of a UIImage.
Below I copy the code I run. The print() statement currently prints 255 lines with 0.0 0.0 0.0 followed by the last line being 0.0 0.0 0.058823529.
My UIImage input is also displayed on screen, so I'm sure there is an image.
I've also tried this filter in the CIFilter.io app. Strangely enough, given a colourful input image, the output image was a single tint grey line. Of course I expected a line, but not the solid grey.
What could I be doing wrong?
let inputImage = CIImage(cgImage: outputImage!.cgImage!)
let histogramFilter = CIFilter(name: "CIAreaHistogram", parameters: [kCIInputImageKey : inputImage,
"inputExtent" : inputImage.extent,
"inputScale" : NSNumber(value: 1.0),
"inputCount" : NSNumber(value: 256)])!
func processHistogram(ciImage: CIImage)
{
let image = renderImage(ciImage: ciImage)
for x in 0 ... 255
{
getPixelColor(fromCGImage: (image?.cgImage!)!, pos: CGPoint(x: x, y: 0))
}
}
func getPixelColor(fromCGImage cgImage: CGImage, pos: CGPoint) -> UIColor
{
let pixelData = cgImage.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(cgImage.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
print("\(r) \(g) \(b)")
return UIColor(red: r, green: g, blue: b, alpha: a)
}
func renderImage(ciImage: CIImage) -> UIImage?
{
var outputImage: UIImage?
let size = ciImage.extent.size
UIGraphicsBeginImageContext(size)
if let context = UIGraphicsGetCurrentContext()
{
context.interpolationQuality = .high
context.setShouldAntialias(true)
let inputImage = UIImage(ciImage: ciImage)
inputImage.draw(in: CGRect(x: 0, y: 0, width: size.width, height: size.height))
outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
}
return outputImage
}

Detect Colour in a UIImage and save CGPoints

I have a UIImage and I want all the cgPoints of the specific colour this image has. For example I have this Image
Now I want to get the RED colour CGPoints from this UIImage. Red colour may be a straight horizontal/vertical line. It may also be a curved/zigzag line.
This is what I have tried but I m unable to detect the required RED coloured CGPoints.
loop through image size/pixels to detect colour
var requiredPointsInImage = [CGPoint]()
let testImage = UIImage.init(named: "imgToTest1")
for heightIteration in 0..<Int(testImage!.size.height) {
for widthIteration in 0..<Int(testImage!.size.width) {
let colorOfPoints = testImage!.getPixelColor(pos: CGPoint(x:CGFloat(widthIteration), y:CGFloat(heightIteration)), withFrameSize: testImage!.size)
if colorOfPoints == MYColor {
print(colorOfPoints)
requiredPointsInImage.append(CGPoint(x:CGFloat(widthIteration), y:CGFloat(heightIteration)))
}
}
}
let newImage = drawShapesOnImage(image: testImage!, points: requiredPointsInImage)
// Colour Detection
extension UIImage {
func getPixelColor(pos: CGPoint, withFrameSize size: CGSize) -> UIColor {
let x: CGFloat = (self.size.width) * pos.x / size.width
let y: CGFloat = (self.size.height) * pos.y / size.height
let pixelPoint: CGPoint = CGPoint(x: x, y: y)
let pixelData = self.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pixelPoint.y)) + Int(pixelPoint.x)) * 3 //4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
// let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: 1)
}
}
// Drawing on detected points
func drawShapesOnImage(image: UIImage, points:[CGPoint]) -> UIImage {
UIGraphicsBeginImageContext(image.size)
image.draw(at: CGPoint.zero)
let context = UIGraphicsGetCurrentContext()
context!.setLineWidth(2.0)
context!.setStrokeColor(UIColor.green.cgColor)
let radius: CGFloat = 5.0
for point in points {
let center = CGPoint(x: point.x, y: point.y);
context!.addArc(center: CGPoint(x: center.x,y: center.y), radius: radius, startAngle: 0, endAngle: 360, clockwise: true)
context!.strokePath()
}
let resultImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return resultImage!
}
This is what I have got .....
Note: Background may not always be white, but of course not RED.
Using the same getPixelColor() function I can get exact color from this below image of any CGPoint but with y constant (say y:1)
If you think there is any other better approach to detect all these points please suggest.
SO I have found the solution, Actually the problem was I was ignoring Alpha channel in UIImage extension getPixelColor(). using alpha solve my problem.
here is the updated code!
extension UIImage {
func getPixelColor(pos: CGPoint, withFrameSize size: CGSize) -> UIColor {
let x: CGFloat = (self.size.width) * pos.x / size.width
let y: CGFloat = (self.size.height) * pos.y / size.height
let pixelPoint: CGPoint = CGPoint(x: x, y: y)
let pixelData = self.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pixelPoint.y)) + Int(pixelPoint.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
And the result is:

reading colour of pixel on sprite

I am using the below extension to read the colour of a pixel on touch in spritekit.
Its working for r g b but alpha is always returning 1 even though my sprites have alpha chanels.
Ive tried setting the background of the view in the extension to .clear but this doesnt help.
Is there somewhere in the code I can ensure the alpha is preserved?
extension SKTexture {
func getPixelColorFromTexture(position: CGPoint) -> SKColor {
let view = SKView(frame: CGRectMake(0, 0, 1, 1))
let scene = SKScene(size: CGSize(width: 1, height: 1))
let sprite = SKSpriteNode(texture: self)
sprite.anchorPoint = CGPointZero
sprite.position = CGPoint(x: -floor(position.x), y: -floor(position.y))
scene.anchorPoint = CGPointZero
scene.addChild(sprite)
view.presentScene(scene)
var pixel: [UInt8] = [0, 0, 0, 0]
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedLast.rawValue).rawValue
let context = CGBitmapContextCreate(&pixel, 1, 1, 8, 4, CGColorSpaceCreateDeviceRGB(), bitmapInfo)
UIGraphicsPushContext(context!)
view.drawViewHierarchyInRect(view.bounds, afterScreenUpdates: true)
UIGraphicsPopContext()
return SKColor(red: CGFloat(pixel[0]) / 255.0, green: CGFloat(pixel[1]) / 255.0, blue: CGFloat(pixel[2]) / 255.0, alpha: CGFloat(pixel[3]) / 255.0)
}
}
this is the alternative code which sometimes crashes
extension UIImage{
func getPixelColor(tex: SKTexture, pos: CGPoint) -> CGFloat? {
guard let provider=cgImage?.dataProvider else{
return nil
}
// let provider = cgImage?.dataProvider
var posi = CGPoint()
let width = (tex.size().width)
let height = (tex.size().height)
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
sometimes crashes at this point --- CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
// let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
// let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
// let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return (a)
}
}

Why do I get the wrong color of a pixel with following code?

I create an UIImage with backgroundcolor RED:
let theimage:UIImage=imageWithColor(UIColor(red: 1, green: 0, blue: 0, alpha: 1) );
func imageWithColor(color: UIColor) -> UIImage {
let rect = CGRectMake(0.0, 0.0, 200.0, 200.0)
UIGraphicsBeginImageContext(rect.size)
let context = UIGraphicsGetCurrentContext()
CGContextSetFillColorWithColor(context, color.CGColor)
CGContextFillRect(context, rect)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
I am retrieving the color in the middle of the image as follow:
let h:CGFloat=theimage.size.height;
let w:CGFloat=theimage.size.width;
let test:UIColor=theimage.getPixelColor(CGPoint(x: 100, y: 100))
var rvalue:CGFloat = 0;
var gvalue:CGFloat = 0;
var bvalue:CGFloat = 0;
var alfaval:CGFloat = 0;
test.getRed(&rvalue, green: &gvalue, blue: &bvalue, alpha: &alfaval);
print("Blue Value : " + String(bvalue));
print("Red Value : " + String(rvalue));
extension UIImage {
func getPixelColor(pos: CGPoint) -> UIColor {
let pixelData = CGDataProviderCopyData(CGImageGetDataProvider(self.CGImage))
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let r = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let g = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let b = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let a = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
As result I get :
Blue Value : 1.0
Red Value : 0.0
Why this ? I couldn't find the mistake.
The problem is not the built-in getRed function, but rather the function that builds the UIColor object from the individual color components in the provider data. Your code is assuming that the provider data is stored in RGBA format, but it apparently is not. It would appear to be in ARGB format. Also, I'm not sure you have the byte order right, either.
When you have an image, there are a variety of ways of packing those into the provider data. A few examples are shown in the Quartz 2D Programming Guide:
If you're going to have a getPixelColor routine that is hard-coded for a particular format, I might check the alphaInfo and bitmapInfo like so (in Swift 4.2):
extension UIImage {
func getPixelColor(point: CGPoint) -> UIColor? {
guard let cgImage = cgImage,
let pixelData = cgImage.dataProvider?.data
else { return nil }
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let alphaInfo = cgImage.alphaInfo
assert(alphaInfo == .premultipliedFirst || alphaInfo == .first || alphaInfo == .noneSkipFirst, "This routine expects alpha to be first component")
let byteOrderInfo = cgImage.byteOrderInfo
assert(byteOrderInfo == .order32Little || byteOrderInfo == .orderDefault, "This routine expects little-endian 32bit format")
let bytesPerRow = cgImage.bytesPerRow
let pixelInfo = Int(point.y) * bytesPerRow + Int(point.x) * 4;
let a: CGFloat = CGFloat(data[pixelInfo+3]) / 255
let r: CGFloat = CGFloat(data[pixelInfo+2]) / 255
let g: CGFloat = CGFloat(data[pixelInfo+1]) / 255
let b: CGFloat = CGFloat(data[pixelInfo ]) / 255
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
And if you were to always build this image programmatically for code that is dependent upon the bit map info, I'd explicitly specify these details when I created the image:
func image(with color: UIColor, size: CGSize) -> UIImage? {
let rect = CGRect(origin: .zero, size: size)
let colorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: nil,
width: Int(rect.width),
height: Int(rect.height),
bitsPerComponent: 8,
bytesPerRow: Int(rect.width) * 4,
space: colorSpace,
bitmapInfo: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue) else {
return nil
}
context.setFillColor(color.cgColor)
context.fill(rect)
return context.makeImage().flatMap { UIImage(cgImage: $0) }
}
Perhaps even better, as shown in Technical Q&A 1509, you might want to have getPixelData explicitly create its own context of a predetermined format, draw the image to that context, and now the code is not contingent upon the format of the original image to which you are applying this.
extension UIImage {
func getPixelColor(point: CGPoint) -> UIColor? {
guard let cgImage = cgImage else { return nil }
let width = Int(size.width)
let height = Int(size.height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
guard let context = CGContext(data: nil,
width: width,
height: height,
bitsPerComponent: 8,
bytesPerRow: width * 4,
space: colorSpace,
bitmapInfo: CGBitmapInfo.byteOrder32Little.rawValue | CGImageAlphaInfo.premultipliedFirst.rawValue)
else {
return nil
}
context.draw(cgImage, in: CGRect(origin: .zero, size: size))
guard let pixelBuffer = context.data else { return nil }
let pointer = pixelBuffer.bindMemory(to: UInt32.self, capacity: width * height)
let pixel = pointer[Int(point.y) * width + Int(point.x)]
let r: CGFloat = CGFloat(red(for: pixel)) / 255
let g: CGFloat = CGFloat(green(for: pixel)) / 255
let b: CGFloat = CGFloat(blue(for: pixel)) / 255
let a: CGFloat = CGFloat(alpha(for: pixel)) / 255
return UIColor(red: r, green: g, blue: b, alpha: a)
}
private func alpha(for pixelData: UInt32) -> UInt8 {
return UInt8((pixelData >> 24) & 255)
}
private func red(for pixelData: UInt32) -> UInt8 {
return UInt8((pixelData >> 16) & 255)
}
private func green(for pixelData: UInt32) -> UInt8 {
return UInt8((pixelData >> 8) & 255)
}
private func blue(for pixelData: UInt32) -> UInt8 {
return UInt8((pixelData >> 0) & 255)
}
private func rgba(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) -> UInt32 {
return (UInt32(alpha) << 24) | (UInt32(red) << 16) | (UInt32(green) << 8) | (UInt32(blue) << 0)
}
}
Clearly, if you're going to check a bunch of pixels, you'll want to refactor this (decouple the creation of the standardized pixel buffer from the code that checks the color), but hopefully this illustrates the idea.
For earlier versions of Swift, see previous revision of this answer.

Get average color of UIImage in Swift

I was recently attempting to convert the code from here into Swift. However, I keep getting a white color, no matter the image. Here's my code:
// Playground - noun: a place where people can play
import UIKit
extension UIImage {
func averageColor() -> UIColor {
var colorSpace = CGColorSpaceCreateDeviceRGB()
var rgba: [CGFloat] = [0,0,0,0]
var context = CGBitmapContextCreate(&rgba, 1, 1, 8, 4, colorSpace, CGBitmapInfo.fromRaw(CGImageAlphaInfo.PremultipliedLast.toRaw())!)
rgba
CGContextDrawImage(context, CGRectMake(0, 0, 1, 1), self.CGImage)
if rgba[3] > 0 {
var alpha = rgba[3] / 255
var multiplier = alpha / 255
return UIColor(red: rgba[0] * multiplier, green: rgba[1] * multiplier, blue: rgba[2] * multiplier, alpha: alpha)
} else {
return UIColor(red: rgba[0] / 255, green: rgba[1] / 255, blue: rgba[2] / 255, alpha: rgba[3] / 255)
}
}
}
var img = UIImage(data: NSData(contentsOfURL: NSURL(string: "http://upload.wikimedia.org/wikipedia/commons/c/c3/Aurora_as_seen_by_IMAGE.PNG")))
img.averageColor()
Thanks in advance.
CoreImage in iOS 9: use the CIAreaAverage filter and pass the extent of your entire image to be averaged.
Plus, it's much faster since it'll either be running on the GPU or as a highly-optimized CPU CIKernel.
import UIKit
extension UIImage {
func areaAverage() -> UIColor {
var bitmap = [UInt8](count: 4, repeatedValue: 0)
if #available(iOS 9.0, *) {
// Get average color.
let context = CIContext()
let inputImage = CIImage ?? CoreImage.CIImage(CGImage: CGImage!)
let extent = inputImage.extent
let inputExtent = CIVector(x: extent.origin.x, y: extent.origin.y, z: extent.size.width, w: extent.size.height)
let filter = CIFilter(name: "CIAreaAverage", withInputParameters: [kCIInputImageKey: inputImage, kCIInputExtentKey: inputExtent])!
let outputImage = filter.outputImage!
let outputExtent = outputImage.extent
assert(outputExtent.size.width == 1 && outputExtent.size.height == 1)
// Render to bitmap.
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: kCIFormatRGBA8, colorSpace: CGColorSpaceCreateDeviceRGB())
} else {
// Create 1x1 context that interpolates pixels when drawing to it.
let context = CGBitmapContextCreate(&bitmap, 1, 1, 8, 4, CGColorSpaceCreateDeviceRGB(), CGBitmapInfo.ByteOrderDefault.rawValue | CGImageAlphaInfo.PremultipliedLast.rawValue)!
let inputImage = CGImage ?? CIContext().createCGImage(CIImage!, fromRect: CIImage!.extent)
// Render to bitmap.
CGContextDrawImage(context, CGRect(x: 0, y: 0, width: 1, height: 1), inputImage)
}
// Compute result.
let result = UIColor(red: CGFloat(bitmap[0]) / 255.0, green: CGFloat(bitmap[1]) / 255.0, blue: CGFloat(bitmap[2]) / 255.0, alpha: CGFloat(bitmap[3]) / 255.0)
return result
}
}
Swift 3
func areaAverage() -> UIColor {
var bitmap = [UInt8](repeating: 0, count: 4)
if #available(iOS 9.0, *) {
// Get average color.
let context = CIContext()
let inputImage: CIImage = ciImage ?? CoreImage.CIImage(cgImage: cgImage!)
let extent = inputImage.extent
let inputExtent = CIVector(x: extent.origin.x, y: extent.origin.y, z: extent.size.width, w: extent.size.height)
let filter = CIFilter(name: "CIAreaAverage", withInputParameters: [kCIInputImageKey: inputImage, kCIInputExtentKey: inputExtent])!
let outputImage = filter.outputImage!
let outputExtent = outputImage.extent
assert(outputExtent.size.width == 1 && outputExtent.size.height == 1)
// Render to bitmap.
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: kCIFormatRGBA8, colorSpace: CGColorSpaceCreateDeviceRGB())
} else {
// Create 1x1 context that interpolates pixels when drawing to it.
let context = CGContext(data: &bitmap, width: 1, height: 1, bitsPerComponent: 8, bytesPerRow: 4, space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)!
let inputImage = cgImage ?? CIContext().createCGImage(ciImage!, from: ciImage!.extent)
// Render to bitmap.
context.draw(inputImage!, in: CGRect(x: 0, y: 0, width: 1, height: 1))
}
// Compute result.
let result = UIColor(red: CGFloat(bitmap[0]) / 255.0, green: CGFloat(bitmap[1]) / 255.0, blue: CGFloat(bitmap[2]) / 255.0, alpha: CGFloat(bitmap[3]) / 255.0)
return result
}
Here's a solution:
func averageColor() -> UIColor {
let rgba = UnsafeMutablePointer<CUnsignedChar>.alloc(4)
let colorSpace: CGColorSpaceRef = CGColorSpaceCreateDeviceRGB()
let info = CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue)
let context: CGContextRef = CGBitmapContextCreate(rgba, 1, 1, 8, 4, colorSpace, info)
CGContextDrawImage(context, CGRectMake(0, 0, 1, 1), self.CGImage)
if rgba[3] > 0 {
let alpha: CGFloat = CGFloat(rgba[3]) / 255.0
let multiplier: CGFloat = alpha / 255.0
return UIColor(red: CGFloat(rgba[0]) * multiplier, green: CGFloat(rgba[1]) * multiplier, blue: CGFloat(rgba[2]) * multiplier, alpha: alpha)
} else {
return UIColor(red: CGFloat(rgba[0]) / 255.0, green: CGFloat(rgba[1]) / 255.0, blue: CGFloat(rgba[2]) / 255.0, alpha: CGFloat(rgba[3]) / 255.0)
}
}
Swift 3:
func areaAverage() -> UIColor {
var bitmap = [UInt8](repeating: 0, count: 4)
let context = CIContext(options: nil)
let cgImg = context.createCGImage(CoreImage.CIImage(cgImage: self.cgImage!), from: CoreImage.CIImage(cgImage: self.cgImage!).extent)
let inputImage = CIImage(cgImage: cgImg!)
let extent = inputImage.extent
let inputExtent = CIVector(x: extent.origin.x, y: extent.origin.y, z: extent.size.width, w: extent.size.height)
let filter = CIFilter(name: "CIAreaAverage", withInputParameters: [kCIInputImageKey: inputImage, kCIInputExtentKey: inputExtent])!
let outputImage = filter.outputImage!
let outputExtent = outputImage.extent
assert(outputExtent.size.width == 1 && outputExtent.size.height == 1)
// Render to bitmap.
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: kCIFormatRGBA8, colorSpace: CGColorSpaceCreateDeviceRGB())
// Compute result.
let result = UIColor(red: CGFloat(bitmap[0]) / 255.0, green: CGFloat(bitmap[1]) / 255.0, blue: CGFloat(bitmap[2]) / 255.0, alpha: CGFloat(bitmap[3]) / 255.0)
return result
}
Are you setting up your context correctly? If I look at the documentation for the CGBitmapContext Reference:
https://developer.apple.com/library/ios/documentation/graphicsimaging/Reference/CGBitmapContext/index.html#//apple_ref/c/func/CGBitmapContextCreate
it looks like you are only allocating enough memory for the image that would be contained in the CGFloat array. It also looks like you are telling the compiler that your image is only going to be one pixel by one pixel.
It looks like that size is also being confirmed as one pixel by one pixel when you are setting your CGRect in CGContextDrawImage.
If the Playground is only creating an image one pixel by one pixel, that would explain why you are only seeing a white screen.

Resources