EXC_BAD_INSTRUCTION Swift Dithering Function - ios

I've been trying to create a dithering function in Swift but I keep running into issues. I've noticed the code loops just fine for the first 9000 or so pixels of a random image I selected. But then it gives me a runtime error, I've looked everywhere and I can't seem to solve the issue. Please help.
file:///Users/jeffn/Desktop/MyPlayground34.playground/: error: Playground execution aborted: error: Execution was interrupted, reason: EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0).
The process has been left at the point where it was interrupted, use "thread return -x" to return to the state before expression evaluation.
import UIKit
struct Pixel {
var value: UInt32
var red: UInt8 {
get { return UInt8(value & 0xFF) }
set { value = UInt32(newValue) | (value & 0xFFFFFF00) }
}
var green: UInt8 {
get { return UInt8((value >> 8) & 0xFF) }
set { value = (UInt32(newValue) << 8) | (value & 0xFFFF00FF) }
}
var blue: UInt8 {
get { return UInt8((value >> 16) & 0xFF) }
set { value = (UInt32(newValue) << 16) | (value & 0xFF00FFFF) }
}
var alpha: UInt8 {
get { return UInt8((value >> 24) & 0xFF) }
set { value = (UInt32(newValue) << 24) | (value & 0x00FFFFFF) }
}}
public struct RGBA {
var pixels: UnsafeMutableBufferPointer<Pixel>
var width: Int
var height: Int
init?(image: UIImage) {
guard let cgImage = image.cgImage else { return nil }
width = Int(image.size.width)
height = Int(image.size.height)
let bitsPerComponent = 8
let bytesPerPixel = 4
let bytesPerRow = width * bytesPerPixel
let imageData = UnsafeMutablePointer<Pixel>.allocate(capacity: width * height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedLast.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
guard let imageContext = CGContext(data: imageData, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else { return nil }
imageContext.draw(cgImage, in: CGRect(origin: CGPoint(x: 0,y :0), size: image.size))
pixels = UnsafeMutableBufferPointer<Pixel>(start: imageData, count: width * height)
}
public func toUIImage() -> UIImage? {
let bitsPerComponent = 8
let bytesPerPixel = 4
let bytesPerRow = width * bytesPerPixel
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedLast.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
let imageContext = CGContext(data: pixels.baseAddress, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo, releaseCallback: nil, releaseInfo: nil)
guard let cgImage = imageContext!.makeImage() else {return nil}
let image = UIImage(cgImage: cgImage)
return image
}
}
public func contrast(image: UIImage) -> RGBA {
let rgba = RGBA(image: image)!
var new_red: UInt8 = 0
var new_green = 0
var new_blue = 0
var new_alpha = 0
var error_red: UInt8 = 0
var error_green = 0
var error_blue = 0
var error_alpha = 0
var pixel_1_red: UInt8 = 0
var output_red: UInt8 = 0
let w1: Double = 7.00/16.00
let w2: Double = 3.00/16.00
let w3: Double = 5.00/16.00
let w4: Double = 1.00/16.00
for y in 0..<rgba.height-1{
for x in 0..<rgba.width-1{
var index = y * rgba.width + x
var index_1 = y*rgba.width + x + 1
var pixel = rgba.pixels[index]
var pixel_1 = rgba.pixels[index_1]
pixel_1_red = pixel_1.red
if(pixel.red < 128){new_red = 0} else {new_red = 255}
error_red = new_red - pixel.red
var double_error_red = Double(error_red)*w1
var int_error_red = UInt8(double_error_red)
output_red = pixel_1_red + int_error_red
pixel_1.red = output_red
rgba.pixels[index_1] = pixel_1
}
}
return rgba
}
let image = UIImage(named: "newlowpassfilter.jpg")!
let rgba = contrast(image: image)
let newImage = rgba.toUIImage()
image
newImage

Maybe integer overflow is your issue.
With testing with one of my sample jpg-image, this line caused overflow:
output_red = pixel_1_red + int_error_red
You may need to change the line to something like this:
let temp_red = Int(pixel_1_red) + Int(int_error_red)
output_red = temp_red > 255 ? 255 : UInt8(temp_red)
In Swift, + operator detects overflow and causes app crash, and overflow ignoring operator &+ does not suit for image processing. You may need to care about the result range of each operation in your code.

Related

Thread 1: EXC_BAD_ACCESS (code=EXC_I386_GPFLT)

In my swift project, I have two classes that work together to hold Pixel values of an image to be able to modify red, green, blue and alpha values. An UnsafeMutableBufferPointer holds lots of bites that are comprised of the Pixel class objects.
I can interact with that the class that holds the UnsafeMutableBufferPointer<Pixel> property. I can access all of the properties on that object and that all works fine. The only problem I'm having with the UnsafeMutableBufferPoint<Pixel> is trying to loop through it with my Pixel object and it keeps crashing with the Thread 1: EXC_BAD_ACCESS (code=EXC_I386_GPFLT) exception.
init!(image: UIImage)
{
_width = Int(image.size.width)
_height = Int(image.size.height)
guard let cgImage = image.cgImage else { return nil }
_width = Int(image.size.width)
_height = Int(image.size.height)
let bitsPerComponent = 8
let bytesPerPixel = 4
let bytesPerRow = _width * bytesPerPixel
let imageData = UnsafeMutablePointer<Pixel>.allocate(capacity: _width * _height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedLast.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
guard let imageContext = CGContext(data: imageData, width: _width, height: _height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else { return nil }
imageContext.draw(cgImage, in: CGRect(origin: CGPoint.zero, size: image.size))
_pixels = UnsafeMutableBufferPointer<Pixel>(start: imageData, count: _width * _height)
}
This function is the part that is crashing the program. The exact part that is crashing is the for loop that is looping through the rgba.pixels. rgba.pixels is the UnsafeMutableBufferPointer.
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any])
{
let image: UIImage = info[UIImagePickerControllerEditedImage] as! UIImage!
let rgba = RGBA(image: image)!
for pixel in rgba.pixels
{
print(pixel.red)
}
self.dismiss(animated: true, completion: nil);
}
This is the constructor where I create the UnsafeMutableBufferPointer<Pixel>. Is there an easier way to do this and still be able to get the RBGA values and change them easily.
The Pixel class is a UInt32 value this is split into four UInt 8 values.
Am I using the wrong construct to hold those values and if so, is there a safer or easier construct to use? Or am I doing something wrong when accessing the Pixel values?
This is how I got the pixels of an image -
// Grab and set up variables for the original image
let inputCGImage = inputImage.CGImage
let inputWidth: Int = CGImageGetWidth(inputCGImage)
let inputHeight: Int = CGImageGetHeight(inputCGImage)
// Get the colorspace that will be used for image processing (RGB/HSV)
let colorSpace: CGColorSpaceRef = CGColorSpaceCreateDeviceRGB()!
// Hardcode memory variables
let bytesPerPixel = 4 // 32 bits = 4 bytes
let bitsPerComponent = 8 // 32 bits div. by 4 components (RGBA) = 8 bits per component
let inputBytesPerRow = bytesPerPixel * inputWidth
// Get a pointer pointing to an allocated array to hold all the pixel data of the image
let inputPixels = UnsafeMutablePointer<UInt32>(calloc(inputHeight * inputWidth, sizeof(UInt32)))
// Create a context to draw the original image in (aka put the pixel data into the above array)
let context: CGContextRef = CGBitmapContextCreate(inputPixels, inputWidth, inputHeight, bitsPerComponent, inputBytesPerRow, colorSpace, CGImageAlphaInfo.PremultipliedLast.rawValue | CGBitmapInfo.ByteOrder32Big.rawValue)!
CGContextDrawImage(context, CGRect(x: 0, y: 0, width: inputWidth, height: inputHeight), inputCGImage)
Keep in mind this is not Swift 3 syntax incase that's what you're using, but that's the basic algorithm. Now to grab the individual color values of each pixel, you will have to implement these functions -
func Mask8(x: UInt32) -> UInt32
{
return x & 0xFF
}
func R(x: UInt32) -> UInt32
{
return Mask8(x)
}
func G(x: UInt32) -> UInt32
{
return Mask8(x >> 8)
}
func B(x: UInt32) -> UInt32
{
return Mask8(x >> 16)
}
func A(x: UInt32) -> UInt32
{
return Mask8(x >> 24)
}
To create a completely new color after processing the RGBA values, you use this function -
func RGBAMake(r: UInt32, g: UInt32, b: UInt32, a: UInt32) -> UInt32
{
return (Mask8(r) | Mask8(g) << 8 | Mask8(b) << 16 | Mask8(a) << 24)
}
To iterate through the pixels array, you do it as so -
var currentPixel = inputPixels
for _ in 0..<height
{
for i in 0..<width
{
let color: UInt32 = currentPixel.memory
if i < width - 1
{
print(NSString(format: "%3.0f", R(x: color), terminator: " "))
}
else
{
print(NSString(format: "%3.0f", R(x: color)))
}
currentPixel += 1
}
}

How to read and log the raw pixels of image in swift iOS

I need to read pixel values of an image and iterate to print in swift output, I have written this so far and used a RGBAImage class to read out pixels. I'm getting lost from CGContextRef to Iteration. I tried to write from CGImage, getting pixel data from objective C language to swift since I wanted to work in swift.
func createRGBAPixel(inImage: CGImageRef) -> CGContextRef {
//Image width, height
let pixelWidth = CGImageGetWidth(inImage)
let pixelHeight = CGImageGetHeight(inImage)
//Declaring number of bytes
let bytesPerRow = Int(pixelWidth) * 4
let byteCount = bytesPerRow * Int(pixelHeight)
//RGB color space
let colorSpace = CGColorSpaceCreateDeviceRGB()
//Allocating image data
let mapData = malloc(byteCount)
let mapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
//Create bitmap context
let context = CGBitmapContextCreate(mapData, pixelWidth, pixelHeight, Int(8), Int(bytesPerRow), colorSpace, mapInfo.rawValue)
let pixelImage = CGBitmapContextCreate(pixels, pixelWidth, pixelHeight, bitsPerComponent, bytesPerRow, colorSpace, mapInfo)
let CGContextRef = pixelImage
let CGContextDrawImage(context, CGRectMake(0, 0, pixelWidth, pixelHeight), inImage)
//Iterating and logging
print("Logging pixel counts")
let pixels = calloc(pixelHeight * pixelWidth, sizeof(UInt32))
let myImage = CGImageRef: inImage
let myRGBA = RGBAImage(image: myImage)! //RGBAImage class to read pixels.
var number = 0
var currentPixel:Int32 = 0
currentPixel = pixels * UInt32
for number in 0..<pixelHeight {
for number in 0..<pixelWidth {
var color = color * currentPixel
print((pixel.red + pixel.green + pixel.blue) / 3.0)
currentPixel++
}
}
return context!
}
I created small class for this:
class ImagePixelReader {
enum Component:Int {
case r = 0
case g = 1
case b = 2
case alpha = 3
}
struct Color {
var r:UInt8
var g:UInt8
var b:UInt8
var a:UInt8
var uiColor:UIColor {
return UIColor(red:CGFloat(r)/255.0,green:CGFloat(g)/255.0,blue:CGFloat(b)/255.0,alpha:CGFloat(alpha)/255.0)
}
}
let image:UIImage
private var data:CFData
private let pointer:UnsafePointer<UInt8>
private let scale:Int
init?(image:UIImage){
self.image = image
guard let cfdata = self.image.cgImage?.dataProvider?.data,
let pointer = CFDataGetBytePtr(cfdata) else {
return nil
}
self.scale = Int(image.scale)
self.data = cfdata
self.pointer = pointer
}
func componentAt(_ component:Component,x:Int,y:Int)->UInt8{
assert(CGFloat(x) < image.size.width)
assert(CGFloat(y) < image.size.height)
let pixelPosition = (Int(image.size.width) * y * scale + x) * 4 * scale
return pointer[pixelPosition + component.rawValue]
}
func colorAt(x:Int,y:Int)->Color{
assert(CGFloat(x) < image.size.width)
assert(CGFloat(y) < image.size.height)
let pixelPosition = (Int(image.size.width) * y * scale + x) * 4 * scale
return Color(r: pointer[pixelPosition + Component.r.rawValue],
g: pointer[pixelPosition + Component.g.rawValue],
b: pointer[pixelPosition + Component.b.rawValue],
a: pointer[pixelPosition + Component.alpha.rawValue])
}
}
How to use:
if let reader = ImagePixelReader(image: yourImage) {
//get alpha or color
let alpha = reader.componentAt(.alpha, x: 10, y:10)
let color = reader.colorAt(x:10, y: 10).uiColor
//getting all the pixels you need
var values = ""
//iterate over all pixels
for x in 0 ..< Int(image.size.width){
for y in 0 ..< Int(image.size.height){
let color = reader.colorAt(x: x, y: y)
values += "[\(x):\(y):\(color)] "
}
//add new line for every new row
values += "\n"
}
print(values)
}

Using functions in swift classes

I have been trying to write a code to apply a filter for an image in swift. I am trying out the following code in the playground. It keeps failing for some reason, and I believe it is because I messed up somewhere in the variable referencing for functions. I have no idea if I am supposed to use inout in the initializer to make the values mutable or something like that. Please point out where I am making a mistake. THe code is giving no error whatsoever, but It is not resulting in an Image ouput that I desire.
//: Playground - noun: a place where people can play
import UIKit
class imageProcessor{
var avgRed = 0
var avgBlue = 0
var avgGreen = 0
var avgPixelValue = 0
var rgbaImage:RGBAImage? = nil
func averagePixelValue ( input inputImage: RGBAImage ) -> ( Int , Int , Int , Int ){
var totalRed = 0
var totalBlue = 0
var totalGreen = 0
for y in 0..<inputImage.height{
for x in 0..<inputImage.width{
let index = y * inputImage.height + x
let pixel = inputImage.pixels[index]
totalBlue += Int(pixel.blue)
totalRed += Int(pixel.red)
totalGreen += Int(pixel.green)
}
}
let totalPixels = inputImage.height * inputImage.width
let avgRed = totalRed/totalPixels
let avgBlue = totalBlue/totalPixels
let avgGreen = totalGreen/totalPixels
let avgValue = ( avgRed + avgGreen + avgBlue )/3
return ( avgRed , avgGreen , avgBlue , avgValue )
}
init ( inputImage: RGBAImage ) {
rgbaImage = inputImage
( avgRed , avgGreen , avgBlue , avgPixelValue ) = averagePixelValue(input: rgbaImage!)
}
func addColorTint (color:String , intensity: Int=1)->RGBAImage{
for i in 0..<self.rgbaImage!.height{
for j in 0..<self.rgbaImage!.width{
var pixel = self.rgbaImage!.pixels[i*self.rgbaImage!.height + j]
let avgPixel = (Int(pixel.red) + Int(pixel.green) + Int(pixel.blue))/3
if ( avgPixel > avgValue ){
switch color{
case "red" :
self.rgbaImage!.pixels[i*self.rgbaImage!.height + j].red = UInt8(max(0,min(255,Int(pixel.red) * intensity)))
case "blue":
self.rgbaImage!.pixels[i*self.rgbaImage!.height + j].blue = UInt8(max(0,min(255,Int(pixel.blue) * intensity)))
case "green":
self.rgbaImage!.pixels[i*self.rgbaImage!.height + j].green = UInt8(max(0,min(255,Int(pixel.green) * intensity)))
default:
print ("0")
}
}
}
}
return self.rgbaImage!
}
}
let image = UIImage(named: "sample")
var rgbaImage = RGBAImage(image: image!)
// Process the image!
var newInstance = imageProcessor(inputImage: rgbaImage!)
let newrgbaInstance = newInstance.addColorTint("red", intensity: 2)
let newImage = newrgbaInstance.toUIImage()
newImage
The code seems to be running fine, but it shows no output in the playground output panel ( Right side of the window ). Any suggestions on to how I am supposed to make this work is highly appreciated. Also It worked when I used it as a simple function but not as a class.
Also, the library/class I am using is included below
import UIKit
public struct Pixel {
public var value: UInt32
public var red: UInt8 {
get {
return UInt8(value & 0xFF)
}
set {
value = UInt32(newValue) | (value & 0xFFFFFF00)
}
}
public var green: UInt8 {
get {
return UInt8((value >> 8) & 0xFF)
}
set {
value = (UInt32(newValue) << 8) | (value & 0xFFFF00FF)
}
}
public var blue: UInt8 {
get {
return UInt8((value >> 16) & 0xFF)
}
set {
value = (UInt32(newValue) << 16) | (value & 0xFF00FFFF)
}
}
public var alpha: UInt8 {
get {
return UInt8((value >> 24) & 0xFF)
}
set {
value = (UInt32(newValue) << 24) | (value & 0x00FFFFFF)
}
}
}
public struct RGBAImage {
public var pixels: UnsafeMutableBufferPointer<Pixel>
public var width: Int
public var height: Int
public init?(image: UIImage) {
guard let cgImage = image.CGImage else { return nil }
// Redraw image for correct pixel format
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.ByteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.PremultipliedLast.rawValue & CGBitmapInfo.AlphaInfoMask.rawValue
width = Int(image.size.width)
height = Int(image.size.height)
let bytesPerRow = width * 4
let imageData = UnsafeMutablePointer<Pixel>.alloc(width * height)
guard let imageContext = CGBitmapContextCreate(imageData, width, height, 8, bytesPerRow, colorSpace, bitmapInfo) else { return nil }
CGContextDrawImage(imageContext, CGRect(origin: CGPointZero, size: image.size), cgImage)
pixels = UnsafeMutableBufferPointer<Pixel>(start: imageData, count: width * height)
}
public func toUIImage() -> UIImage? {
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.ByteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.PremultipliedLast.rawValue & CGBitmapInfo.AlphaInfoMask.rawValue
let bytesPerRow = width * 4
let imageContext = CGBitmapContextCreateWithData(pixels.baseAddress, width, height, 8, bytesPerRow, colorSpace, bitmapInfo, nil, nil)
guard let cgImage = CGBitmapContextCreateImage(imageContext) else {return nil}
let image = UIImage(CGImage: cgImage)
return image
}
}
I found the error. It was that I have been using an unidentified variable avgValue, which I renamed to avgPixelValue.

Colour correction of Photoshop LUT filter using CIFilter

Using photoshop to create LUT filter and use iOS CIFilter to read the LUT image, filter image created by iOS is not corresponding to the filer image created by photoshop.
How can I trace the issue?
This is the original image
This is the image with filter I created from photoshop
This is the image with filter I created from iPhone
This is the LUT image I am using
please Try This its work for me
public class LUTsHelper {
public static func applyLUTsFilter(lutImage: String, dimension: Int, colorSpace: CGColorSpace) -> CIFilter? {
guard let image = UIImage(named: lutImage) else {
return nil
}
guard let cgImage = image.cgImage else {
return nil
}
guard let bitmap = createBitmap(image: cgImage, colorSpace: colorSpace) else {
return nil
}
let width = cgImage.width
let height = cgImage.height
let rowNum = width / dimension
let columnNum = height / dimension
let dataSize = dimension * dimension * dimension * MemoryLayout<Float>.size * 4
var array = Array<Float>(repeating: 0, count: dataSize)
var bitmapOffest: Int = 0
var z: Int = 0
for _ in stride(from: 0, to: rowNum, by: 1) {
for y in stride(from: 0, to: dimension, by: 1) {
let tmp = z
for _ in stride(from: 0, to: columnNum, by: 1) {
for x in stride(from: 0, to: dimension, by: 1) {
let dataOffset = (z * dimension * dimension + y * dimension + x) * 4
let position = bitmap
.advanced(by: bitmapOffest)
array[dataOffset + 0] = Float(position
.advanced(by: 0)
.pointee) / 255
array[dataOffset + 1] = Float(position
.advanced(by: 1)
.pointee) / 255
array[dataOffset + 2] = Float(position
.advanced(by: 2)
.pointee) / 255
array[dataOffset + 3] = Float(position
.advanced(by: 3)
.pointee) / 255
bitmapOffest += 4
}
z += 1
}
z = tmp
}
z += columnNum
}
free(bitmap)
let data = Data.init(bytes: array, count: dataSize)
guard
let cubeFilter = CIFilter(name: "CIColorCubeWithColorSpace")
else {
return nil
}
cubeFilter.setValue(dimension, forKey: "inputCubeDimension")
cubeFilter.setValue(data, forKey: "inputCubeData")
cubeFilter.setValue(colorSpace, forKey: "inputColorSpace")
return cubeFilter
}
private static func createBitmap(image: CGImage, colorSpace: CGColorSpace) -> UnsafeMutablePointer<UInt8>? {
let width = image.width
let height = image.height
let bitsPerComponent = 8
let bytesPerRow = width * 4
let bitmapSize = bytesPerRow * height
guard let data = malloc(bitmapSize) else {
return nil
}
guard let context = CGContext(
data: data,
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue,
releaseCallback: nil,
releaseInfo: nil) else {
return nil
}
context.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height))
return data.bindMemory(to: UInt8.self, capacity: bitmapSize)
}}
now us this class
let colorSpace: CGColorSpace = CGColorSpace.init(name: CGColorSpace.sRGB) ?? CGColorSpaceCreateDeviceRGB()
let lutFilter = LUTsHelper.applyLUTsFilter(lutImage: "demo.png", dimension: 64, colorSpace: colorSpace)
lutFilter?.setValue(outputImage, forKey: "inputImage")
let lutOutputImage = lutFilter?.outputImage
if let output = lutOutputImage {
outputImage = output
}

How to use LUT png for CIColorCube filter?

I would like to use a lookup table png (example) as color cube data for the CIColorCube filter in Swift. All I tried (and found) so far are examples with a computed color cube as in this example.
How can I read a png as lookup data?
I now used this and this project to adapt their Objective-C implementation for Swift:
func colorCubeFilterFromLUT(imageName : NSString) -> CIFilter? {
let kDimension : UInt = 64
let lutImage = UIImage(named: imageName)!.CGImage
let lutWidth = CGImageGetWidth(lutImage!)
let lutHeight = CGImageGetHeight(lutImage!)
let rowCount = lutHeight / kDimension
let columnCount = lutWidth / kDimension
if ((lutWidth % kDimension != 0) || (lutHeight % kDimension != 0) || (rowCount * columnCount != kDimension)) {
NSLog("Invalid colorLUT %#", imageName);
return nil
}
let bitmap = self.createRGBABitmapFromImage(lutImage)
let size = Int(kDimension) * Int(kDimension) * Int(kDimension) * sizeof(Float) * 4
let data = UnsafeMutablePointer<Float>(malloc(UInt(size)))
var bitmapOffset : Int = 0
var z : UInt = 0
for (var row: UInt = 0; row < rowCount; row++)
{
for (var y: UInt = 0; y < kDimension; y++)
{
var tmp = z
for (var col: UInt = 0; col < columnCount; col++)
{
for (var x: UInt = 0; x < kDimension; x++) {
let alpha = Float(bitmap[Int(bitmapOffset)]) / 255.0
let red = Float(bitmap[Int(bitmapOffset+1)]) / 255.0
let green = Float(bitmap[Int(bitmapOffset+2)]) / 255.0
let blue = Float(bitmap[Int(bitmapOffset+3)]) / 255.0
var dataOffset = Int(z * kDimension * kDimension + y * kDimension + x) * 4
data[dataOffset] = red
data[dataOffset + 1] = green
data[dataOffset + 2] = blue
data[dataOffset + 3] = alpha
bitmapOffset += 4
}
z++
}
z = tmp
}
z += columnCount
}
let colorCubeData = NSData(bytesNoCopy: data, length: size, freeWhenDone: true)
// create CIColorCube Filter
var filter = CIFilter(name: "CIColorCube")
filter.setValue(colorCubeData, forKey: "inputCubeData")
filter.setValue(kDimension, forKey: "inputCubeDimension")
return filter
}
func createRGBABitmapFromImage(inImage: CGImage) -> UnsafeMutablePointer<Float> {
//Get image width, height
let pixelsWide = CGImageGetWidth(inImage)
let pixelsHigh = CGImageGetHeight(inImage)
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
let bitmapBytesPerRow = Int(pixelsWide) * 4
let bitmapByteCount = bitmapBytesPerRow * Int(pixelsHigh)
// Use the generic RGB color space.
let colorSpace = CGColorSpaceCreateDeviceRGB()
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
let bitmapData = malloc(CUnsignedLong(bitmapByteCount)) // bitmap
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
// Create the bitmap context. We want pre-multiplied RGBA, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
let context = CGBitmapContextCreate(bitmapData, pixelsWide, pixelsHigh, 8, UInt(bitmapBytesPerRow), colorSpace, bitmapInfo)
let rect = CGRect(x:0, y:0, width:Int(pixelsWide), height:Int(pixelsHigh))
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(context, rect, inImage)
// Now we can get a pointer to the image data associated with the bitmap
// context.
// var data = CGBitmapContextGetData(context)
// var dataType = UnsafeMutablePointer<Float>(data)
// return dataType
var convertedBitmap = malloc(UInt(bitmapByteCount * sizeof(Float)))
vDSP_vfltu8(UnsafePointer<UInt8>(bitmapData), 1, UnsafeMutablePointer<Float>(convertedBitmap), 1, vDSP_Length(bitmapByteCount))
free(bitmapData)
return UnsafeMutablePointer<Float>(convertedBitmap)
}
Also see this answer.
Thought I would update this for Swift 3.0 also this works for JPG's and PNG's 3D Color LUTs
fileprivate func colorCubeFilterFromLUT(imageName : String) -> CIFilter? {
let size = 64
let lutImage = UIImage(named: imageName)!.cgImage
let lutWidth = lutImage!.width
let lutHeight = lutImage!.height
let rowCount = lutHeight / size
let columnCount = lutWidth / size
if ((lutWidth % size != 0) || (lutHeight % size != 0) || (rowCount * columnCount != size)) {
NSLog("Invalid colorLUT %#", imageName);
return nil
}
let bitmap = getBytesFromImage(image: UIImage(named: imageName))!
let floatSize = MemoryLayout<Float>.size
let cubeData = UnsafeMutablePointer<Float>.allocate(capacity: size * size * size * 4 * floatSize)
var z = 0
var bitmapOffset = 0
for _ in 0 ..< rowCount {
for y in 0 ..< size {
let tmp = z
for _ in 0 ..< columnCount {
for x in 0 ..< size {
let alpha = Float(bitmap[bitmapOffset]) / 255.0
let red = Float(bitmap[bitmapOffset+1]) / 255.0
let green = Float(bitmap[bitmapOffset+2]) / 255.0
let blue = Float(bitmap[bitmapOffset+3]) / 255.0
let dataOffset = (z * size * size + y * size + x) * 4
cubeData[dataOffset + 3] = alpha
cubeData[dataOffset + 2] = red
cubeData[dataOffset + 1] = green
cubeData[dataOffset + 0] = blue
bitmapOffset += 4
}
z += 1
}
z = tmp
}
z += columnCount
}
let colorCubeData = NSData(bytesNoCopy: cubeData, length: size * size * size * 4 * floatSize, freeWhenDone: true)
// create CIColorCube Filter
let filter = CIFilter(name: "CIColorCube")
filter?.setValue(colorCubeData, forKey: "inputCubeData")
filter?.setValue(size, forKey: "inputCubeDimension")
return filter
}
fileprivate func getBytesFromImage(image:UIImage?) -> [UInt8]?
{
var pixelValues: [UInt8]?
if let imageRef = image?.cgImage {
let width = Int(imageRef.width)
let height = Int(imageRef.height)
let bitsPerComponent = 8
let bytesPerRow = width * 4
let totalBytes = height * bytesPerRow
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
let colorSpace = CGColorSpaceCreateDeviceRGB()
var intensities = [UInt8](repeating: 0, count: totalBytes)
let contextRef = CGContext(data: &intensities, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
contextRef?.draw(imageRef, in: CGRect(x: 0.0, y: 0.0, width: CGFloat(width), height: CGFloat(height)))
pixelValues = intensities
}
return pixelValues!
}

Resources