Using functions in swift classes - ios

I have been trying to write a code to apply a filter for an image in swift. I am trying out the following code in the playground. It keeps failing for some reason, and I believe it is because I messed up somewhere in the variable referencing for functions. I have no idea if I am supposed to use inout in the initializer to make the values mutable or something like that. Please point out where I am making a mistake. THe code is giving no error whatsoever, but It is not resulting in an Image ouput that I desire.
//: Playground - noun: a place where people can play
import UIKit
class imageProcessor{
var avgRed = 0
var avgBlue = 0
var avgGreen = 0
var avgPixelValue = 0
var rgbaImage:RGBAImage? = nil
func averagePixelValue ( input inputImage: RGBAImage ) -> ( Int , Int , Int , Int ){
var totalRed = 0
var totalBlue = 0
var totalGreen = 0
for y in 0..<inputImage.height{
for x in 0..<inputImage.width{
let index = y * inputImage.height + x
let pixel = inputImage.pixels[index]
totalBlue += Int(pixel.blue)
totalRed += Int(pixel.red)
totalGreen += Int(pixel.green)
}
}
let totalPixels = inputImage.height * inputImage.width
let avgRed = totalRed/totalPixels
let avgBlue = totalBlue/totalPixels
let avgGreen = totalGreen/totalPixels
let avgValue = ( avgRed + avgGreen + avgBlue )/3
return ( avgRed , avgGreen , avgBlue , avgValue )
}
init ( inputImage: RGBAImage ) {
rgbaImage = inputImage
( avgRed , avgGreen , avgBlue , avgPixelValue ) = averagePixelValue(input: rgbaImage!)
}
func addColorTint (color:String , intensity: Int=1)->RGBAImage{
for i in 0..<self.rgbaImage!.height{
for j in 0..<self.rgbaImage!.width{
var pixel = self.rgbaImage!.pixels[i*self.rgbaImage!.height + j]
let avgPixel = (Int(pixel.red) + Int(pixel.green) + Int(pixel.blue))/3
if ( avgPixel > avgValue ){
switch color{
case "red" :
self.rgbaImage!.pixels[i*self.rgbaImage!.height + j].red = UInt8(max(0,min(255,Int(pixel.red) * intensity)))
case "blue":
self.rgbaImage!.pixels[i*self.rgbaImage!.height + j].blue = UInt8(max(0,min(255,Int(pixel.blue) * intensity)))
case "green":
self.rgbaImage!.pixels[i*self.rgbaImage!.height + j].green = UInt8(max(0,min(255,Int(pixel.green) * intensity)))
default:
print ("0")
}
}
}
}
return self.rgbaImage!
}
}
let image = UIImage(named: "sample")
var rgbaImage = RGBAImage(image: image!)
// Process the image!
var newInstance = imageProcessor(inputImage: rgbaImage!)
let newrgbaInstance = newInstance.addColorTint("red", intensity: 2)
let newImage = newrgbaInstance.toUIImage()
newImage
The code seems to be running fine, but it shows no output in the playground output panel ( Right side of the window ). Any suggestions on to how I am supposed to make this work is highly appreciated. Also It worked when I used it as a simple function but not as a class.
Also, the library/class I am using is included below
import UIKit
public struct Pixel {
public var value: UInt32
public var red: UInt8 {
get {
return UInt8(value & 0xFF)
}
set {
value = UInt32(newValue) | (value & 0xFFFFFF00)
}
}
public var green: UInt8 {
get {
return UInt8((value >> 8) & 0xFF)
}
set {
value = (UInt32(newValue) << 8) | (value & 0xFFFF00FF)
}
}
public var blue: UInt8 {
get {
return UInt8((value >> 16) & 0xFF)
}
set {
value = (UInt32(newValue) << 16) | (value & 0xFF00FFFF)
}
}
public var alpha: UInt8 {
get {
return UInt8((value >> 24) & 0xFF)
}
set {
value = (UInt32(newValue) << 24) | (value & 0x00FFFFFF)
}
}
}
public struct RGBAImage {
public var pixels: UnsafeMutableBufferPointer<Pixel>
public var width: Int
public var height: Int
public init?(image: UIImage) {
guard let cgImage = image.CGImage else { return nil }
// Redraw image for correct pixel format
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.ByteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.PremultipliedLast.rawValue & CGBitmapInfo.AlphaInfoMask.rawValue
width = Int(image.size.width)
height = Int(image.size.height)
let bytesPerRow = width * 4
let imageData = UnsafeMutablePointer<Pixel>.alloc(width * height)
guard let imageContext = CGBitmapContextCreate(imageData, width, height, 8, bytesPerRow, colorSpace, bitmapInfo) else { return nil }
CGContextDrawImage(imageContext, CGRect(origin: CGPointZero, size: image.size), cgImage)
pixels = UnsafeMutableBufferPointer<Pixel>(start: imageData, count: width * height)
}
public func toUIImage() -> UIImage? {
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.ByteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.PremultipliedLast.rawValue & CGBitmapInfo.AlphaInfoMask.rawValue
let bytesPerRow = width * 4
let imageContext = CGBitmapContextCreateWithData(pixels.baseAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo, nil, nil)
guard let cgImage = CGBitmapContextCreateImage(imageContext) else {return nil}
let image = UIImage(CGImage: cgImage)
return image
}
}

I found the error. It was that I have been using an unidentified variable avgValue, which I renamed to avgPixelValue.

Related

EXC_BAD_INSTRUCTION Swift Dithering Function

I've been trying to create a dithering function in Swift but I keep running into issues. I've noticed the code loops just fine for the first 9000 or so pixels of a random image I selected. But then it gives me a runtime error, I've looked everywhere and I can't seem to solve the issue. Please help.
file:///Users/jeffn/Desktop/MyPlayground34.playground/: error: Playground execution aborted: error: Execution was interrupted, reason: EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0).
The process has been left at the point where it was interrupted, use "thread return -x" to return to the state before expression evaluation.
import UIKit
struct Pixel {
var value: UInt32
var red: UInt8 {
get { return UInt8(value & 0xFF) }
set { value = UInt32(newValue) | (value & 0xFFFFFF00) }
}
var green: UInt8 {
get { return UInt8((value >> 8) & 0xFF) }
set { value = (UInt32(newValue) << 8) | (value & 0xFFFF00FF) }
}
var blue: UInt8 {
get { return UInt8((value >> 16) & 0xFF) }
set { value = (UInt32(newValue) << 16) | (value & 0xFF00FFFF) }
}
var alpha: UInt8 {
get { return UInt8((value >> 24) & 0xFF) }
set { value = (UInt32(newValue) << 24) | (value & 0x00FFFFFF) }
}}
public struct RGBA {
var pixels: UnsafeMutableBufferPointer<Pixel>
var width: Int
var height: Int
init?(image: UIImage) {
guard let cgImage = image.cgImage else { return nil }
width = Int(image.size.width)
height = Int(image.size.height)
let bitsPerComponent = 8
let bytesPerPixel = 4
let bytesPerRow = width * bytesPerPixel
let imageData = UnsafeMutablePointer<Pixel>.allocate(capacity: width * height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedLast.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
guard let imageContext = CGContext(data: imageData, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else { return nil }
imageContext.draw(cgImage, in: CGRect(origin: CGPoint(x: 0,y :0), size: image.size))
pixels = UnsafeMutableBufferPointer<Pixel>(start: imageData, count: width * height)
}
public func toUIImage() -> UIImage? {
let bitsPerComponent = 8
let bytesPerPixel = 4
let bytesPerRow = width * bytesPerPixel
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedLast.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
let imageContext = CGContext(data: pixels.baseAddress, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo, releaseCallback: nil, releaseInfo: nil)
guard let cgImage = imageContext!.makeImage() else {return nil}
let image = UIImage(cgImage: cgImage)
return image
}
}
public func contrast(image: UIImage) -> RGBA {
let rgba = RGBA(image: image)!
var new_red: UInt8 = 0
var new_green = 0
var new_blue = 0
var new_alpha = 0
var error_red: UInt8 = 0
var error_green = 0
var error_blue = 0
var error_alpha = 0
var pixel_1_red: UInt8 = 0
var output_red: UInt8 = 0
let w1: Double = 7.00/16.00
let w2: Double = 3.00/16.00
let w3: Double = 5.00/16.00
let w4: Double = 1.00/16.00
for y in 0..<rgba.height-1{
for x in 0..<rgba.width-1{
var index = y * rgba.width + x
var index_1 = y*rgba.width + x + 1
var pixel = rgba.pixels[index]
var pixel_1 = rgba.pixels[index_1]
pixel_1_red = pixel_1.red
if(pixel.red < 128){new_red = 0} else {new_red = 255}
error_red = new_red - pixel.red
var double_error_red = Double(error_red)*w1
var int_error_red = UInt8(double_error_red)
output_red = pixel_1_red + int_error_red
pixel_1.red = output_red
rgba.pixels[index_1] = pixel_1
}
}
return rgba
}
let image = UIImage(named: "newlowpassfilter.jpg")!
let rgba = contrast(image: image)
let newImage = rgba.toUIImage()
image
newImage
Maybe integer overflow is your issue.
With testing with one of my sample jpg-image, this line caused overflow:
output_red = pixel_1_red + int_error_red
You may need to change the line to something like this:
let temp_red = Int(pixel_1_red) + Int(int_error_red)
output_red = temp_red > 255 ? 255 : UInt8(temp_red)
In Swift, + operator detects overflow and causes app crash, and overflow ignoring operator &+ does not suit for image processing. You may need to care about the result range of each operation in your code.

instaFilter Processor error - missing return in a function expected to return

I'm currently learning swift and I'm currently taking a class for it. We are told to write a code to apply filters to a sample picture to change a given intensity of a color on an image in its parameter and then return the modified image.
In the code that I have written, on the last couple lines, it states an error saying
missing return in a function expected to return 'UIImage'
my class code:
import UIKit
let image = UIImage(named: "sample")!
class brightnessFilter {
func increaseContrast(image: UIImage) -> UIImage{
var rgbaImage = RGBAImage(image: image)!
let avgRed = 118
let avgGreen = 98
let avgBlue = 83
for y in 0..<rgbaImage.height {
for x in 0..<rgbaImage.width {
let index = y * rgbaImage.width + x
var pixel = rgbaImage.pixels[index]
let redLum = Int(pixel.red) - avgRed
let greenLum = Int(pixel.green) - avgGreen
let blueLum = Int(pixel.blue) - avgBlue
pixel.red = UInt8(max(min(255, avgRed + 2 * redLum), 0))
pixel.blue = UInt8(max(min(255, avgBlue + 2 * blueLum), 0))
pixel.green = UInt8(max(min(255, avgGreen + 2 * greenLum), 0))
rgbaImage.pixels[index] = pixel
}
}
let newImage1 = rgbaImage.toUIImage()!
return (newImage1)
}
}
let test = brightnessFilter()
let processedImg = test.increaseContrast(image)
class redFilter {
func increaseContrast(image: UIImage) -> UIImage{
var rgbaImage = RGBAImage(image: image)!
let avgRed = 118
for y in 0..<rgbaImage.height {
for x in 0..<rgbaImage.width {
let index = y * rgbaImage.width + x
var pixel = rgbaImage.pixels[index]
let redDiff = Int(pixel.red) - avgRed
if (redDiff > 0) {
pixel.red = UInt8( max(0, min(255, avgRed + redDiff * 5)))
rgbaImage.pixels[index] = pixel
}
}
let newImage2 = rgbaImage.toUIImage()!
return (newImage2)
}
}
}
let test2 = redFilter()
RGBA class:
import UIKit
public struct Pixel {
public var value: UInt32
public var red: UInt8 {
get {
return UInt8(value & 0xFF)
}
set {
value = UInt32(newValue) | (value & 0xFFFFFF00)
}
}
public var green: UInt8 {
get {
return UInt8((value >> 8) & 0xFF)
}
set {
value = (UInt32(newValue) << 8) | (value & 0xFFFF00FF)
}
}
public var blue: UInt8 {
get {
return UInt8((value >> 16) & 0xFF)
}
set {
value = (UInt32(newValue) << 16) | (value & 0xFF00FFFF)
}
}
public var alpha: UInt8 {
get {
return UInt8((value >> 24) & 0xFF)
}
set {
value = (UInt32(newValue) << 24) | (value & 0x00FFFFFF)
}
}
}
When a method declaration is written with return value, you must make sure it is always return a value.
I believe your error is in the following method:
func increaseContrast(image: UIImage) -> UIImage {
var rgbaImage = RGBAImage(image: image)!
let avgRed = 118
let newImage2 = UIImage() //<-- Added
for y in 0..<rgbaImage.height {
for x in 0..<rgbaImage.width {
let index = y * rgbaImage.width + x
var pixel = rgbaImage.pixels[index]
let redDiff = Int(pixel.red) - avgRed
if (redDiff > 0) {
pixel.red = UInt8( max(0, min(255, avgRed + redDiff * 5)))
rgbaImage.pixels[index] = pixel
}
}
let newImage2 = rgbaImage.toUIImage()!
return (newImage2) //<---NOTICE
}// <---FIX
}
Notice: that the return statement is in the for loop, meaning the method might not always return a value.
Fix: Put the return statement under the curly brackets marked in the code.
Added: You must return a value, therefore, you must create one
Side note: Make sure to handle nil

How to read and log the raw pixels of image in swift iOS

I need to read pixel values of an image and iterate to print in swift output, I have written this so far and used a RGBAImage class to read out pixels. I'm getting lost from CGContextRef to Iteration. I tried to write from CGImage, getting pixel data from objective C language to swift since I wanted to work in swift.
func createRGBAPixel(inImage: CGImageRef) -> CGContextRef {
//Image width, height
let pixelWidth = CGImageGetWidth(inImage)
let pixelHeight = CGImageGetHeight(inImage)
//Declaring number of bytes
let bytesPerRow = Int(pixelWidth) * 4
let byteCount = bytesPerRow * Int(pixelHeight)
//RGB color space
let colorSpace = CGColorSpaceCreateDeviceRGB()
//Allocating image data
let mapData = malloc(byteCount)
let mapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
//Create bitmap context
let context = CGBitmapContextCreate(mapData, pixelWidth, pixelHeight, Int(8), Int(bytesPerRow), colorSpace, mapInfo.rawValue)
let pixelImage = CGBitmapContextCreate(pixels, pixelWidth, pixelHeight, bitsPerComponent, bytesPerRow, colorSpace, mapInfo)
let CGContextRef = pixelImage
let CGContextDrawImage(context, CGRectMake(0, 0, pixelWidth, pixelHeight), inImage)
//Iterating and logging
print("Logging pixel counts")
let pixels = calloc(pixelHeight * pixelWidth, sizeof(UInt32))
let myImage = CGImageRef: inImage
let myRGBA = RGBAImage(image: myImage)! //RGBAImage class to read pixels.
var number = 0
var currentPixel:Int32 = 0
currentPixel = pixels * UInt32
for number in 0..<pixelHeight {
for number in 0..<pixelWidth {
var color = color * currentPixel
print((pixel.red + pixel.green + pixel.blue) / 3.0)
currentPixel++
}
}
return context!
}
I created small class for this:
class ImagePixelReader {
enum Component:Int {
case r = 0
case g = 1
case b = 2
case alpha = 3
}
struct Color {
var r:UInt8
var g:UInt8
var b:UInt8
var a:UInt8
var uiColor:UIColor {
return UIColor(red:CGFloat(r)/255.0,green:CGFloat(g)/255.0,blue:CGFloat(b)/255.0,alpha:CGFloat(alpha)/255.0)
}
}
let image:UIImage
private var data:CFData
private let pointer:UnsafePointer<UInt8>
private let scale:Int
init?(image:UIImage){
self.image = image
guard let cfdata = self.image.cgImage?.dataProvider?.data,
let pointer = CFDataGetBytePtr(cfdata) else {
return nil
}
self.scale = Int(image.scale)
self.data = cfdata
self.pointer = pointer
}
func componentAt(_ component:Component,x:Int,y:Int)->UInt8{
assert(CGFloat(x) < image.size.width)
assert(CGFloat(y) < image.size.height)
let pixelPosition = (Int(image.size.width) * y * scale + x) * 4 * scale
return pointer[pixelPosition + component.rawValue]
}
func colorAt(x:Int,y:Int)->Color{
assert(CGFloat(x) < image.size.width)
assert(CGFloat(y) < image.size.height)
let pixelPosition = (Int(image.size.width) * y * scale + x) * 4 * scale
return Color(r: pointer[pixelPosition + Component.r.rawValue],
g: pointer[pixelPosition + Component.g.rawValue],
b: pointer[pixelPosition + Component.b.rawValue],
a: pointer[pixelPosition + Component.alpha.rawValue])
}
}
How to use:
if let reader = ImagePixelReader(image: yourImage) {
//get alpha or color
let alpha = reader.componentAt(.alpha, x: 10, y:10)
let color = reader.colorAt(x:10, y: 10).uiColor
//getting all the pixels you need
var values = ""
//iterate over all pixels
for x in 0 ..< Int(image.size.width){
for y in 0 ..< Int(image.size.height){
let color = reader.colorAt(x: x, y: y)
values += "[\(x):\(y):\(color)] "
}
//add new line for every new row
values += "\n"
}
print(values)
}

iOS Swift Flood fill algorithm

I created this extension for "bucket fill" (flood fill) of touch point:
extension UIImageView {
func bucketFill(startPoint: CGPoint, newColor: UIColor) {
var newRed, newGreen, newBlue, newAlpha: CUnsignedChar
let pixelsWide = CGImageGetWidth(self.image!.CGImage)
let pixelsHigh = CGImageGetHeight(self.image!.CGImage)
let rect = CGRect(x:0, y:0, width:Int(pixelsWide), height:Int(pixelsHigh))
let bitmapBytesPerRow = Int(pixelsWide) * 4
var context = self.image!.createARGBBitmapContext()
//Clear the context
CGContextClearRect(context, rect)
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(context, rect, self.image!.CGImage)
var data = CGBitmapContextGetData(context)
var dataType = UnsafeMutablePointer<UInt8>(data)
let newColorRef = CGColorGetComponents(newColor.CGColor)
if(CGColorGetNumberOfComponents(newColor.CGColor) == 2) {
newRed = CUnsignedChar(newColorRef[0] * 255) // CUnsignedChar
newGreen = CUnsignedChar(newColorRef[0] * 255)
newBlue = CUnsignedChar(newColorRef[0] * 255)
newAlpha = CUnsignedChar(newColorRef[1])
} else {
newRed = CUnsignedChar(newColorRef[0] * 255)
newGreen = CUnsignedChar(newColorRef[1] * 255)
newBlue = CUnsignedChar(newColorRef[2] * 255)
newAlpha = CUnsignedChar(newColorRef[3])
}
let newColorStr = ColorRGB(red: newRed, green: newGreen, blue: newBlue)
var stack = Stack()
let offset = 4*((Int(pixelsWide) * Int(startPoint.y)) + Int(startPoint.x))
//let alpha = dataType[offset]
let startRed: UInt8 = dataType[offset+1]
let startGreen: UInt8 = dataType[offset+2]
let startBlue: UInt8 = dataType[offset+3]
stack.push(startPoint)
while(!stack.isEmpty()) {
let point: CGPoint = stack.pop() as! CGPoint
let offset = 4*((Int(pixelsWide) * Int(point.y)) + Int(point.x))
let alpha = dataType[offset]
let red: UInt8 = dataType[offset+1]
let green: UInt8 = dataType[offset+2]
let blue: UInt8 = dataType[offset+3]
if (red == newRed && green == newGreen && blue == newBlue) {
continue
}
if (red.absoluteDifference(startRed) < 4 && green.absoluteDifference(startGreen) < 4 && blue.absoluteDifference(startBlue) < 4) {
dataType[offset] = 255
dataType[offset + 1] = newRed
dataType[offset + 2] = newGreen
dataType[offset + 3] = newBlue
if (point.x > 0) {
stack.push(CGPoint(x: point.x - 1, y: point.y))
}
if (point.x < CGFloat(pixelsWide)) {
stack.push(CGPoint(x: point.x + 1, y: point.y))
}
if (point.y > 0) {
stack.push(CGPoint(x: point.x, y: point.y - 1))
}
if (point.y < CGFloat(pixelsHigh)) {
stack.push(CGPoint(x: point.x, y: point.y + 1))
}
} else {
}
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
let finalContext = CGBitmapContextCreate(data, pixelsWide, pixelsHigh, CLong(8), CLong(bitmapBytesPerRow), colorSpace, bitmapInfo)
let imageRef = CGBitmapContextCreateImage(finalContext)
self.image = UIImage(CGImage: imageRef, scale: self.image!.scale,orientation: self.image!.imageOrientation)
}
}
Now I would like to improve performance. How can I make this algorithm work faster? UInt8.absoluteDifference extension is my attempt to include almost same colors to flood fill and it's working but this could be really improve and I know it but I don't know how.
extension UInt8 {
func absoluteDifference(subtrahend: UInt8) -> UInt8 {
if (self > subtrahend) {
return self - subtrahend;
} else {
return subtrahend - self;
}
}
}
My Stack class:
class Stack {
var count: Int = 0
var head: Node = Node()
init() {
}
func isEmpty() -> Bool {
return self.count == 0
}
func push(value: Any) {
if isEmpty() {
self.head = Node()
}
var node = Node(value: value)
node.next = self.head
self.head = node
self.count++
}
func pop() -> Any? {
if isEmpty() {
return nil
}
var node = self.head
self.head = node.next!
self.count--
return node.value
}
}
Thanks for help

How to use LUT png for CIColorCube filter?

I would like to use a lookup table png (example) as color cube data for the CIColorCube filter in Swift. All I tried (and found) so far are examples with a computed color cube as in this example.
How can I read a png as lookup data?
I now used this and this project to adapt their Objective-C implementation for Swift:
func colorCubeFilterFromLUT(imageName : NSString) -> CIFilter? {
let kDimension : UInt = 64
let lutImage = UIImage(named: imageName)!.CGImage
let lutWidth = CGImageGetWidth(lutImage!)
let lutHeight = CGImageGetHeight(lutImage!)
let rowCount = lutHeight / kDimension
let columnCount = lutWidth / kDimension
if ((lutWidth % kDimension != 0) || (lutHeight % kDimension != 0) || (rowCount * columnCount != kDimension)) {
NSLog("Invalid colorLUT %#", imageName);
return nil
}
let bitmap = self.createRGBABitmapFromImage(lutImage)
let size = Int(kDimension) * Int(kDimension) * Int(kDimension) * sizeof(Float) * 4
let data = UnsafeMutablePointer<Float>(malloc(UInt(size)))
var bitmapOffset : Int = 0
var z : UInt = 0
for (var row: UInt = 0; row < rowCount; row++)
{
for (var y: UInt = 0; y < kDimension; y++)
{
var tmp = z
for (var col: UInt = 0; col < columnCount; col++)
{
for (var x: UInt = 0; x < kDimension; x++) {
let alpha = Float(bitmap[Int(bitmapOffset)]) / 255.0
let red = Float(bitmap[Int(bitmapOffset+1)]) / 255.0
let green = Float(bitmap[Int(bitmapOffset+2)]) / 255.0
let blue = Float(bitmap[Int(bitmapOffset+3)]) / 255.0
var dataOffset = Int(z * kDimension * kDimension + y * kDimension + x) * 4
data[dataOffset] = red
data[dataOffset + 1] = green
data[dataOffset + 2] = blue
data[dataOffset + 3] = alpha
bitmapOffset += 4
}
z++
}
z = tmp
}
z += columnCount
}
let colorCubeData = NSData(bytesNoCopy: data, length: size, freeWhenDone: true)
// create CIColorCube Filter
var filter = CIFilter(name: "CIColorCube")
filter.setValue(colorCubeData, forKey: "inputCubeData")
filter.setValue(kDimension, forKey: "inputCubeDimension")
return filter
}
func createRGBABitmapFromImage(inImage: CGImage) -> UnsafeMutablePointer<Float> {
//Get image width, height
let pixelsWide = CGImageGetWidth(inImage)
let pixelsHigh = CGImageGetHeight(inImage)
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
let bitmapBytesPerRow = Int(pixelsWide) * 4
let bitmapByteCount = bitmapBytesPerRow * Int(pixelsHigh)
// Use the generic RGB color space.
let colorSpace = CGColorSpaceCreateDeviceRGB()
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
let bitmapData = malloc(CUnsignedLong(bitmapByteCount)) // bitmap
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
// Create the bitmap context. We want pre-multiplied RGBA, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
let context = CGBitmapContextCreate(bitmapData, pixelsWide, pixelsHigh, 8, UInt(bitmapBytesPerRow), colorSpace, bitmapInfo)
let rect = CGRect(x:0, y:0, width:Int(pixelsWide), height:Int(pixelsHigh))
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(context, rect, inImage)
// Now we can get a pointer to the image data associated with the bitmap
// context.
// var data = CGBitmapContextGetData(context)
// var dataType = UnsafeMutablePointer<Float>(data)
// return dataType
var convertedBitmap = malloc(UInt(bitmapByteCount * sizeof(Float)))
vDSP_vfltu8(UnsafePointer<UInt8>(bitmapData), 1, UnsafeMutablePointer<Float>(convertedBitmap), 1, vDSP_Length(bitmapByteCount))
free(bitmapData)
return UnsafeMutablePointer<Float>(convertedBitmap)
}
Also see this answer.
Thought I would update this for Swift 3.0 also this works for JPG's and PNG's 3D Color LUTs
fileprivate func colorCubeFilterFromLUT(imageName : String) -> CIFilter? {
let size = 64
let lutImage = UIImage(named: imageName)!.cgImage
let lutWidth = lutImage!.width
let lutHeight = lutImage!.height
let rowCount = lutHeight / size
let columnCount = lutWidth / size
if ((lutWidth % size != 0) || (lutHeight % size != 0) || (rowCount * columnCount != size)) {
NSLog("Invalid colorLUT %#", imageName);
return nil
}
let bitmap = getBytesFromImage(image: UIImage(named: imageName))!
let floatSize = MemoryLayout<Float>.size
let cubeData = UnsafeMutablePointer<Float>.allocate(capacity: size * size * size * 4 * floatSize)
var z = 0
var bitmapOffset = 0
for _ in 0 ..< rowCount {
for y in 0 ..< size {
let tmp = z
for _ in 0 ..< columnCount {
for x in 0 ..< size {
let alpha = Float(bitmap[bitmapOffset]) / 255.0
let red = Float(bitmap[bitmapOffset+1]) / 255.0
let green = Float(bitmap[bitmapOffset+2]) / 255.0
let blue = Float(bitmap[bitmapOffset+3]) / 255.0
let dataOffset = (z * size * size + y * size + x) * 4
cubeData[dataOffset + 3] = alpha
cubeData[dataOffset + 2] = red
cubeData[dataOffset + 1] = green
cubeData[dataOffset + 0] = blue
bitmapOffset += 4
}
z += 1
}
z = tmp
}
z += columnCount
}
let colorCubeData = NSData(bytesNoCopy: cubeData, length: size * size * size * 4 * floatSize, freeWhenDone: true)
// create CIColorCube Filter
let filter = CIFilter(name: "CIColorCube")
filter?.setValue(colorCubeData, forKey: "inputCubeData")
filter?.setValue(size, forKey: "inputCubeDimension")
return filter
}
fileprivate func getBytesFromImage(image:UIImage?) -> [UInt8]?
{
var pixelValues: [UInt8]?
if let imageRef = image?.cgImage {
let width = Int(imageRef.width)
let height = Int(imageRef.height)
let bitsPerComponent = 8
let bytesPerRow = width * 4
let totalBytes = height * bytesPerRow
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
let colorSpace = CGColorSpaceCreateDeviceRGB()
var intensities = [UInt8](repeating: 0, count: totalBytes)
let contextRef = CGContext(data: &intensities, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
contextRef?.draw(imageRef, in: CGRect(x: 0.0, y: 0.0, width: CGFloat(width), height: CGFloat(height)))
pixelValues = intensities
}
return pixelValues!
}

Resources