Pixel Array to UIImage in Swift - ios

I've been trying to figure out how to convert an array of rgb pixel data to a UIImage in Swift.
I'm keeping the rgb data per pixel in a simple struct:
public struct PixelData {
var a: Int
var r: Int
var g: Int
var b: Int
}
I've made my way to the following function, but the resulting image is incorrect:
func imageFromARGB32Bitmap(pixels:[PixelData], width: Int, height: Int)-> UIImage {
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo:CGBitmapInfo = CGBitmapInfo(CGImageAlphaInfo.PremultipliedFirst.rawValue)
let bitsPerComponent:Int = 8
let bitsPerPixel:Int = 32
assert(pixels.count == Int(width * height))
var data = pixels // Copy to mutable []
let providerRef = CGDataProviderCreateWithCFData(
NSData(bytes: &data, length: data.count * sizeof(PixelData))
)
let cgim = CGImageCreate(
width,
height,
bitsPerComponent,
bitsPerPixel,
width * Int(sizeof(PixelData)),
rgbColorSpace,
bitmapInfo,
providerRef,
nil,
true,
kCGRenderingIntentDefault
)
return UIImage(CGImage: cgim)!
}
Any tips or pointers on how to properly convert an rgb array to an UIImage?

Note: This is a solution for iOS creating a UIImage. For a solution for macOS and NSImage, see this answer.
Your only problem is that the data types in your PixelData structure need to be UInt8. I created a test image in a Playground with the following:
public struct PixelData {
var a: UInt8
var r: UInt8
var g: UInt8
var b: UInt8
}
var pixels = [PixelData]()
let red = PixelData(a: 255, r: 255, g: 0, b: 0)
let green = PixelData(a: 255, r: 0, g: 255, b: 0)
let blue = PixelData(a: 255, r: 0, g: 0, b: 255)
for _ in 1...300 {
pixels.append(red)
}
for _ in 1...300 {
pixels.append(green)
}
for _ in 1...300 {
pixels.append(blue)
}
let image = imageFromARGB32Bitmap(pixels: pixels, width: 30, height: 30)
Update for Swift 4:
I updated imageFromARGB32Bitmap to work with Swift 4. The function now returns a UIImage? and guard is used to return nil if anything goes wrong.
func imageFromARGB32Bitmap(pixels: [PixelData], width: Int, height: Int) -> UIImage? {
guard width > 0 && height > 0 else { return nil }
guard pixels.count == width * height else { return nil }
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let bitsPerComponent = 8
let bitsPerPixel = 32
var data = pixels // Copy to mutable []
guard let providerRef = CGDataProvider(data: NSData(bytes: &data,
length: data.count * MemoryLayout<PixelData>.size)
)
else { return nil }
guard let cgim = CGImage(
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: width * MemoryLayout<PixelData>.size,
space: rgbColorSpace,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
)
else { return nil }
return UIImage(cgImage: cgim)
}
Making it a convenience initializer for UIImage:
This function works well as a convenience initializer for UIImage. Here is the implementation:
extension UIImage {
convenience init?(pixels: [PixelData], width: Int, height: Int) {
guard width > 0 && height > 0, pixels.count == width * height else { return nil }
var data = pixels
guard let providerRef = CGDataProvider(data: Data(bytes: &data, count: data.count * MemoryLayout<PixelData>.size) as CFData)
else { return nil }
guard let cgim = CGImage(
width: width,
height: height,
bitsPerComponent: 8,
bitsPerPixel: 32,
bytesPerRow: width * MemoryLayout<PixelData>.size,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue),
provider: providerRef,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent)
else { return nil }
self.init(cgImage: cgim)
}
}
Here is an example of its usage:
// Generate a 500x500 image of randomly colored pixels
let height = 500
let width = 500
var pixels: [PixelData] = .init(repeating: .init(a: 0, r: 0, g: 0, b: 0), count: width * height)
for index in pixels.indices {
pixels[index].a = 255
pixels[index].r = .random(in: 0...255)
pixels[index].g = .random(in: 0...255)
pixels[index].b = .random(in: 0...255)
}
let image = UIImage(pixels: pixels, width: width, height: height)

Update for Swift 3
struct PixelData {
var a: UInt8 = 0
var r: UInt8 = 0
var g: UInt8 = 0
var b: UInt8 = 0
}
func imageFromBitmap(pixels: [PixelData], width: Int, height: Int) -> UIImage? {
assert(width > 0)
assert(height > 0)
let pixelDataSize = MemoryLayout<PixelData>.size
assert(pixelDataSize == 4)
assert(pixels.count == Int(width * height))
let data: Data = pixels.withUnsafeBufferPointer {
return Data(buffer: $0)
}
let cfdata = NSData(data: data) as CFData
let provider: CGDataProvider! = CGDataProvider(data: cfdata)
if provider == nil {
print("CGDataProvider is not supposed to be nil")
return nil
}
let cgimage: CGImage! = CGImage(
width: width,
height: height,
bitsPerComponent: 8,
bitsPerPixel: 32,
bytesPerRow: width * pixelDataSize,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue),
provider: provider,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
)
if cgimage == nil {
print("CGImage is not supposed to be nil")
return nil
}
return UIImage(cgImage: cgimage)
}

Related

TensorFlowLite.Tensor to UUImage

I am new with swift, TFlite and IOS. I succeed to convert, run my model. However at the end, I need to reconstruct an image. My TFlite model return a TFLite.tensor Float32 4d - shape (1, height, width, 3).
let outputTensor: Tensor
outputTensor = try myInterpreter.output(at: 0)
I am looking to make a RGB picture without alpha. In python, it will like this:
Image.fromarray((np.array(outputTensor.data) * 255).astype(np.uint8))
From my understanding the best way will be to make a CVPixelBuffer, apply a CoreOS transformation (for the x255) and finally make the UUImage. I am deeply lost in the IOS doc, it exists many possibilities, does the community has a suggestion ?
++t
From Google example, an extension of UIImage can be coded:
extension UIImage {
convenience init?(data: Data, size: CGSize) {
let width = Int(size.width)
let height = Int(size.height)
let floats = data.toArray(type: Float32.self)
let bufferCapacity = width * height * 4
let unsafePointer = UnsafeMutablePointer<UInt8>.allocate(capacity: bufferCapacity)
let unsafeBuffer = UnsafeMutableBufferPointer<UInt8>(
start: unsafePointer,
count: bufferCapacity)
defer {
unsafePointer.deallocate()
}
for x in 0..<width {
for y in 0..<height {
let floatIndex = (y * width + x) * 3
let index = (y * width + x) * 4
let red = UInt8(floats[floatIndex] * 255)
let green = UInt8(floats[floatIndex + 1] * 255)
let blue = UInt8(floats[floatIndex + 2] * 255)
unsafeBuffer[index] = red
unsafeBuffer[index + 1] = green
unsafeBuffer[index + 2] = blue
unsafeBuffer[index + 3] = 0
}
}
let outData = Data(buffer: unsafeBuffer)
// Construct image from output tensor data
let alphaInfo = CGImageAlphaInfo.noneSkipLast
let bitmapInfo = CGBitmapInfo(rawValue: alphaInfo.rawValue)
.union(.byteOrder32Big)
let colorSpace = CGColorSpaceCreateDeviceRGB()
guard
let imageDataProvider = CGDataProvider(data: outData as CFData),
let cgImage = CGImage(
width: width,
height: height,
bitsPerComponent: 8,
bitsPerPixel: 32,
bytesPerRow: MemoryLayout<UInt8>.size * 4 * Int(size.width),
space: colorSpace,
bitmapInfo: bitmapInfo,
provider: imageDataProvider,
decode: nil,
shouldInterpolate: false,
intent: .defaultIntent
)
else {
return nil
}
self.init(cgImage: cgImage)
}
}
Then the image can be easily constructed from the inference of TFLite.
let outputTensor: Tensor
outputTensor = try decoder.output(at: 0)
image = UIImage(data: outputTensor.data, size: size) ?? UIImage()

EXC_BAD_ACCESS (code=1) when updating UIImage of UIImageView

I am new with iOS and CoreML. I have a very simple UI with two UIImageViews (one should be the input, and the second should be the output). When tapping the first one, the image should be processed by a neural network and the output should be displayed in the second one.
However, when I try to download the image from the MLMultiArray output object and create an UIImage from it which I can then upload to the second UIImageView I get an EXC_BAD_ACCESS (code=1) .
I have reduced the problem to not calling the neural network processing at all, just trying to create a new image from a MLMultiArray. The outcome was the same.
After that I tried generating an UIImage from an empty buffer. The image is created correctly, but if I attempt to update the UIImageView to use it, I get the same error.
If I try to update the second UIImageView to a different image (e.g.: the input image) everything works fine.
I assume this is a memory management issue about the UIImage object I am creating but I am not able to figure out what I am doing wrong
class ViewController: UIViewController {
#IBOutlet weak var out: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
}
#IBAction func imageTapped(_ sender: UITapGestureRecognizer) {
let imageView = sender.view as? UIImageView
if let imageToAnalyse = imageView?.image {
if let outImg = process(forImage: imageToAnalyse) {
out.image = outImg
}
}
}
func process (forImage inImage:UIImage) -> UIImage? {
let size = CGSize(width: 512, height: 512)
let mlOut = try? MLMultiArray(shape: [1, size.height, size.width] as [NSNumber], dataType: .float32)
let newImage = getSinglePlaneImage(inBuffer: mlOut!, width: Int(size.width), height: Int(size.height))
return newImage
}
func getSinglePlaneImage(inBuffer: MLMultiArray, width: Int, height: Int) -> UIImage
{
var newImage: UIImage
// let floatPtr = inBuffer.dataPointer.bindMemory(to: Float32.self, capacity: inBuffer.count)
// let floatBuffer = UnsafeBufferPointer(start: floatPtr, count: inBuffer.count)
// let pixelValues : [UInt8]? = Array(floatBuffer).map({UInt8( ImageProcessor.clamp( (($0) + 1.0) * 128.0, minValue: 0.0, maxValue: 255.0) ) })
//simulating pixels from MLMultiArray
let pixels : [UInt8]? = Array(repeating: 0, count: 512*512)
var imageRef: CGImage?
if var pixelValues = pixels {
let bitsPerComponent = 8
let bytesPerPixel = 1
let bitsPerPixel = bytesPerPixel * bitsPerComponent
let bytesPerRow = bytesPerPixel * width
let totalBytes = height * bytesPerRow
imageRef = withUnsafePointer(to: &pixelValues, {
ptr -> CGImage? in
var imageRef: CGImage?
let colorSpaceRef = CGColorSpaceCreateDeviceGray()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue).union(CGBitmapInfo())
let data = UnsafeRawPointer(ptr.pointee).assumingMemoryBound(to: UInt8.self)
let releaseData: CGDataProviderReleaseDataCallback = {
(info: UnsafeMutableRawPointer?, data: UnsafeRawPointer, size: Int) -> () in
}
if let providerRef = CGDataProvider(dataInfo: nil, data: data, size: totalBytes, releaseData: releaseData) {
imageRef = CGImage(width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: bytesPerRow,
space: colorSpaceRef,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: false,
intent: CGColorRenderingIntent.defaultIntent)
}
return imageRef
})
}
newImage = UIImage(cgImage: imageRef!)
return newImage
}
}
Seems your code would convert 512x512-float32 array into 512x512-UInt8 Array successfully, so I write this answer based on the uncommented version of your code. (Though, the conversion is not efficient enough and has some room to improve.)
UPDATE
The following description is not the right solution for the OP's issue. Just kept for record. Please skip to UPDATED CODE at the bottom of this answer.
OLD CODE (NOT the right solution)
First of all, the worst flaw in the code are the following two lines:
imageRef = withUnsafePointer(to: &pixelValues, {
let data = UnsafeRawPointer(ptr.pointee).assumingMemoryBound(to: UInt8.self)
The first line above passes a pointer to [UInt8]?, in Swift, [UInt8]? (aka Optional<Array<UInt8>>) is an 8-byte struct, not a contiguous region like C-arrays.
The second is more dangerous. ptr.pointee is [UInt8]?, but accessing Swift Arrays through pointer is not guaranteed. And passing an Array to UnsafeRawPointer.init(_:) may create a temporal region which would be deallocated just after the call to the initializer.
As you know, accessing a dangling pointer does not make harm in some limited condition occasionally, but may generate unexpected result at any time.
I would write something like this:
func getSinglePlaneImage(inBuffer: MLMultiArray, width: Int, height: Int) -> UIImage {
//simulating pixels from MLMultiArray
//...
let pixelValues: [UInt8] = Array(repeating: 0, count: 1*512*512)
let bitsPerComponent = 8
let bytesPerPixel = 1
let bitsPerPixel = bytesPerPixel * 8
let bytesPerRow = bytesPerPixel * width
let totalBytes = height * bytesPerRow
let imageRef = pixelValues.withUnsafeBytes({bytes -> CGImage? in
var imageRef: CGImage?
let colorSpaceRef = CGColorSpaceCreateDeviceGray()
let bitmapInfo: CGBitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue)
let data = bytes.baseAddress!.assumingMemoryBound(to: UInt8.self)
let releaseData: CGDataProviderReleaseDataCallback = {_,_,_ in }
if let providerRef = CGDataProvider(dataInfo: nil, data: data, size: totalBytes, releaseData: releaseData) {
imageRef = CGImage(width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: bytesPerRow,
space: colorSpaceRef,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: false,
intent: .defaultIntent)
}
return imageRef
})
let newImage = UIImage(cgImage: imageRef!)
return newImage
}
When you want a pointer pointing to the starting element of an Array, use withUnsafeBytes and use the pointer (in fact, it is an UnsafeRawBufferPointer) inside the closure argument.
One more, your pixels or pixelValues have no need to be an Optional.
Or else, you can create a grey-scale image with Float32 for each pixel.
func getSinglePlaneImage(inBuffer: MLMultiArray, width: Int, height: Int) -> UIImage {
//simulating pixels from MLMultiArray
//...
let pixelValues: [Float32] = Array(repeating: 0, count: 1*512*512)
let bitsPerComponent = 32 //<-
let bytesPerPixel = 4 //<-
let bitsPerPixel = bytesPerPixel * 8
let bytesPerRow = bytesPerPixel * width
let totalBytes = height * bytesPerRow
let imageRef = pixelValues.withUnsafeBytes({bytes -> CGImage? in
var imageRef: CGImage?
let colorSpaceRef = CGColorSpaceCreateDeviceGray()
let bitmapInfo: CGBitmapInfo = [CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue),
.byteOrder32Little, .floatComponents] //<-
let data = bytes.baseAddress!.assumingMemoryBound(to: Float32.self)
let releaseData: CGDataProviderReleaseDataCallback = {_,_,_ in }
if let providerRef = CGDataProvider(dataInfo: nil, data: data, size: totalBytes, releaseData: releaseData) {
imageRef = CGImage(width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: bytesPerRow,
space: colorSpaceRef,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: false,
intent: CGColorRenderingIntent.defaultIntent)
}
return imageRef
})
let newImage = UIImage(cgImage: imageRef!)
return newImage
}
Both work as expected in my testing project, but if you find something wrong, please inform me.
UPDATED CODE (Hope this is the right solution)
I was missing the fact that CGDataProvider keeps the pointer when created with init(dataInfo:data:size:releaseData:) even after a CGImage is created. So, it may be referenced after the closure to withUnsafeBytes is finished, when it is not valid.
You should better use CGDataProvider.init(data:) in such cases.
func getSinglePlaneImage(inBuffer: MLMultiArray, width: Int, height: Int) -> UIImage {
var newImage: UIImage
//let floatPtr = inBuffer.dataPointer.bindMemory(to: Float32.self, capacity: inBuffer.count)
//let floatBuffer = UnsafeBufferPointer(start: floatPtr, count: inBuffer.count)
//let pixelValues: Data = Data((floatBuffer.lazy.map{
// UInt8(ImageProcessor.clamp((($0) + 1.0) * 128.0, minValue: 0.0, maxValue: 255.0))
//})
//simulating pixels from MLMultiArray
//...
let pixelValues = Data(count: 1*512*512) // <- ###
var imageRef: CGImage?
let bitsPerComponent = 8
let bytesPerPixel = 1
let bitsPerPixel = bytesPerPixel * bitsPerComponent
let bytesPerRow = bytesPerPixel * width
let colorSpaceRef = CGColorSpaceCreateDeviceGray()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue)
if let providerRef = CGDataProvider(data: pixelValues as CFData) { // <-###
imageRef = CGImage(width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bitsPerPixel: bitsPerPixel,
bytesPerRow: bytesPerRow,
space: colorSpaceRef,
bitmapInfo: bitmapInfo,
provider: providerRef,
decode: nil,
shouldInterpolate: false,
intent: CGColorRenderingIntent.defaultIntent)
}
newImage = UIImage(cgImage: imageRef!)
return newImage
}
As far as I tried, this does not crash even in actual device with number of repeated touches. Please try. Thanks for your patience.

glReadPixels to UIImage(), Black Image

I trying take screenshot of camera vuforia. I using this code. Its perfect working on iPhone 7 (ios 11.3) and iPad Pro (ios 11.2). But this code not working on iPad 2 (ios 9.3.5), function returning valid UIImage, but it black.
static public func takeScreenshot() -> UIImage? {
let xCoord: Int = 0
let yCoord: Int = 0
let screen = UIScreen.main.bounds
let scale = UIScreen.main.scale
let width = screen.width * scale
let height = screen.height * scale
let dataLength: Int = Int(width) * Int(height) * 4
let pixels: UnsafeMutableRawPointer? = malloc(dataLength * MemoryLayout<GLubyte>.size)
glPixelStorei(GLenum(GL_PACK_ALIGNMENT), 4)
glReadPixels(GLint(xCoord), GLint(yCoord), GLsizei(width), GLsizei(height), GLenum(GL_RGBA), GLenum(GL_UNSIGNED_BYTE), pixels)
guard let pixelData: UnsafePointer = (UnsafeRawPointer(pixels)?.assumingMemoryBound(to: UInt8.self)) else { return nil }
let cfdata: CFData = CFDataCreate(kCFAllocatorDefault, pixelData, dataLength * MemoryLayout<GLubyte>.size)
let provider: CGDataProvider! = CGDataProvider(data: cfdata)
let colorspace = CGColorSpaceCreateDeviceRGB()
guard let iref = CGImage(width: Int(width),
height: Int(height),
bitsPerComponent: 8,
bitsPerPixel: 32,
bytesPerRow: Int(width)*4,
space: colorspace,
bitmapInfo: CGBitmapInfo.byteOrder32Big,
provider: provider,
decode: nil,
shouldInterpolate: false,
intent: CGColorRenderingIntent.defaultIntent) else { return nil }
UIGraphicsBeginImageContext(CGSize(width: CGFloat(width), height: CGFloat(height)))
if let cgcontext = UIGraphicsGetCurrentContext() {
cgcontext.setBlendMode(CGBlendMode.copy)
cgcontext.draw(iref, in: CGRect(x: CGFloat(0.0), y: CGFloat(0.0), width: CGFloat(width), height: CGFloat(height)))
let image: UIImage? = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
return nil
}
UPDATE: i resolved this problem, need run function in opengl thread
OpenGL ES has very limited formats that are accepted. There is an excellent website with OpenGL documentations http://docs.gl
You are interested in http://docs.gl/es2/glReadPixels or http://docs.gl/es3/glReadPixels. Buffer format should be GL_RGBA or GL_BGRA.
Maybe better approach would be https://stackoverflow.com/a/9704392/1351828.

EXC_BAD_INSTRUCTION Swift Dithering Function

I've been trying to create a dithering function in Swift but I keep running into issues. I've noticed the code loops just fine for the first 9000 or so pixels of a random image I selected. But then it gives me a runtime error, I've looked everywhere and I can't seem to solve the issue. Please help.
file:///Users/jeffn/Desktop/MyPlayground34.playground/: error: Playground execution aborted: error: Execution was interrupted, reason: EXC_BAD_INSTRUCTION (code=EXC_I386_INVOP, subcode=0x0).
The process has been left at the point where it was interrupted, use "thread return -x" to return to the state before expression evaluation.
import UIKit
struct Pixel {
var value: UInt32
var red: UInt8 {
get { return UInt8(value & 0xFF) }
set { value = UInt32(newValue) | (value & 0xFFFFFF00) }
}
var green: UInt8 {
get { return UInt8((value >> 8) & 0xFF) }
set { value = (UInt32(newValue) << 8) | (value & 0xFFFF00FF) }
}
var blue: UInt8 {
get { return UInt8((value >> 16) & 0xFF) }
set { value = (UInt32(newValue) << 16) | (value & 0xFF00FFFF) }
}
var alpha: UInt8 {
get { return UInt8((value >> 24) & 0xFF) }
set { value = (UInt32(newValue) << 24) | (value & 0x00FFFFFF) }
}}
public struct RGBA {
var pixels: UnsafeMutableBufferPointer<Pixel>
var width: Int
var height: Int
init?(image: UIImage) {
guard let cgImage = image.cgImage else { return nil }
width = Int(image.size.width)
height = Int(image.size.height)
let bitsPerComponent = 8
let bytesPerPixel = 4
let bytesPerRow = width * bytesPerPixel
let imageData = UnsafeMutablePointer<Pixel>.allocate(capacity: width * height)
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedLast.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
guard let imageContext = CGContext(data: imageData, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else { return nil }
imageContext.draw(cgImage, in: CGRect(origin: CGPoint(x: 0,y :0), size: image.size))
pixels = UnsafeMutableBufferPointer<Pixel>(start: imageData, count: width * height)
}
public func toUIImage() -> UIImage? {
let bitsPerComponent = 8
let bytesPerPixel = 4
let bytesPerRow = width * bytesPerPixel
let colorSpace = CGColorSpaceCreateDeviceRGB()
var bitmapInfo: UInt32 = CGBitmapInfo.byteOrder32Big.rawValue
bitmapInfo |= CGImageAlphaInfo.premultipliedLast.rawValue & CGBitmapInfo.alphaInfoMask.rawValue
let imageContext = CGContext(data: pixels.baseAddress, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo, releaseCallback: nil, releaseInfo: nil)
guard let cgImage = imageContext!.makeImage() else {return nil}
let image = UIImage(cgImage: cgImage)
return image
}
}
public func contrast(image: UIImage) -> RGBA {
let rgba = RGBA(image: image)!
var new_red: UInt8 = 0
var new_green = 0
var new_blue = 0
var new_alpha = 0
var error_red: UInt8 = 0
var error_green = 0
var error_blue = 0
var error_alpha = 0
var pixel_1_red: UInt8 = 0
var output_red: UInt8 = 0
let w1: Double = 7.00/16.00
let w2: Double = 3.00/16.00
let w3: Double = 5.00/16.00
let w4: Double = 1.00/16.00
for y in 0..<rgba.height-1{
for x in 0..<rgba.width-1{
var index = y * rgba.width + x
var index_1 = y*rgba.width + x + 1
var pixel = rgba.pixels[index]
var pixel_1 = rgba.pixels[index_1]
pixel_1_red = pixel_1.red
if(pixel.red < 128){new_red = 0} else {new_red = 255}
error_red = new_red - pixel.red
var double_error_red = Double(error_red)*w1
var int_error_red = UInt8(double_error_red)
output_red = pixel_1_red + int_error_red
pixel_1.red = output_red
rgba.pixels[index_1] = pixel_1
}
}
return rgba
}
let image = UIImage(named: "newlowpassfilter.jpg")!
let rgba = contrast(image: image)
let newImage = rgba.toUIImage()
image
newImage
Maybe integer overflow is your issue.
With testing with one of my sample jpg-image, this line caused overflow:
output_red = pixel_1_red + int_error_red
You may need to change the line to something like this:
let temp_red = Int(pixel_1_red) + Int(int_error_red)
output_red = temp_red > 255 ? 255 : UInt8(temp_red)
In Swift, + operator detects overflow and causes app crash, and overflow ignoring operator &+ does not suit for image processing. You may need to care about the result range of each operation in your code.

Colour correction of Photoshop LUT filter using CIFilter

Using photoshop to create LUT filter and use iOS CIFilter to read the LUT image, filter image created by iOS is not corresponding to the filer image created by photoshop.
How can I trace the issue?
This is the original image
This is the image with filter I created from photoshop
This is the image with filter I created from iPhone
This is the LUT image I am using
please Try This its work for me
public class LUTsHelper {
public static func applyLUTsFilter(lutImage: String, dimension: Int, colorSpace: CGColorSpace) -> CIFilter? {
guard let image = UIImage(named: lutImage) else {
return nil
}
guard let cgImage = image.cgImage else {
return nil
}
guard let bitmap = createBitmap(image: cgImage, colorSpace: colorSpace) else {
return nil
}
let width = cgImage.width
let height = cgImage.height
let rowNum = width / dimension
let columnNum = height / dimension
let dataSize = dimension * dimension * dimension * MemoryLayout<Float>.size * 4
var array = Array<Float>(repeating: 0, count: dataSize)
var bitmapOffest: Int = 0
var z: Int = 0
for _ in stride(from: 0, to: rowNum, by: 1) {
for y in stride(from: 0, to: dimension, by: 1) {
let tmp = z
for _ in stride(from: 0, to: columnNum, by: 1) {
for x in stride(from: 0, to: dimension, by: 1) {
let dataOffset = (z * dimension * dimension + y * dimension + x) * 4
let position = bitmap
.advanced(by: bitmapOffest)
array[dataOffset + 0] = Float(position
.advanced(by: 0)
.pointee) / 255
array[dataOffset + 1] = Float(position
.advanced(by: 1)
.pointee) / 255
array[dataOffset + 2] = Float(position
.advanced(by: 2)
.pointee) / 255
array[dataOffset + 3] = Float(position
.advanced(by: 3)
.pointee) / 255
bitmapOffest += 4
}
z += 1
}
z = tmp
}
z += columnNum
}
free(bitmap)
let data = Data.init(bytes: array, count: dataSize)
guard
let cubeFilter = CIFilter(name: "CIColorCubeWithColorSpace")
else {
return nil
}
cubeFilter.setValue(dimension, forKey: "inputCubeDimension")
cubeFilter.setValue(data, forKey: "inputCubeData")
cubeFilter.setValue(colorSpace, forKey: "inputColorSpace")
return cubeFilter
}
private static func createBitmap(image: CGImage, colorSpace: CGColorSpace) -> UnsafeMutablePointer<UInt8>? {
let width = image.width
let height = image.height
let bitsPerComponent = 8
let bytesPerRow = width * 4
let bitmapSize = bytesPerRow * height
guard let data = malloc(bitmapSize) else {
return nil
}
guard let context = CGContext(
data: data,
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue,
releaseCallback: nil,
releaseInfo: nil) else {
return nil
}
context.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height))
return data.bindMemory(to: UInt8.self, capacity: bitmapSize)
}}
now us this class
let colorSpace: CGColorSpace = CGColorSpace.init(name: CGColorSpace.sRGB) ?? CGColorSpaceCreateDeviceRGB()
let lutFilter = LUTsHelper.applyLUTsFilter(lutImage: "demo.png", dimension: 64, colorSpace: colorSpace)
lutFilter?.setValue(outputImage, forKey: "inputImage")
let lutOutputImage = lutFilter?.outputImage
if let output = lutOutputImage {
outputImage = output
}

Resources