I wrote a simple algorithm that detects the edges in the UIImages. It works perfectly fine with the images taken from bundle (look at first image).
After I am doing some image manipulations (apply filters, masks, crop and etc) and I pass the image to the same function it comes up messed up (image 2). I assume that that the CoreGrahics is changing something internally in the image. The question is what?
That's how I start processing the image:
public struct PixelData {
var a:UInt8 = 255
var r:UInt8
var g:UInt8
var b:UInt8
}
func findEdges(cgImage:CGImageRef)->UIImage{
var pixelData = CGDataProviderCopyData(CGImageGetDataProvider(cgImage))
//var data = CFDataGetMutableBytePtr
var mdata: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
var data = UnsafeMutablePointer<UInt8>(mdata)
let height = CGImageGetHeight(cgImage)
let width = CGImageGetWidth(cgImage)
var start = CACurrentMediaTime()
//create an empty buffer
let emptyPixel = PixelData(a: 0, r: 0, g: 0, b: 0)
let blackPixel = PixelData(a: 255, r: 255, g: 255, b: 255)
var buffer = [PixelData](count: Int(width * height), repeatedValue: emptyPixel)
var booleanArray = [Bool](count: Int(width * height), repeatedValue: false)
for var y = 0; y < height-1; y++ {
for var x = 0; x < width; x++ {
//Current one
var currentPixelInfo: Int = ((Int(width) * Int(y)) + Int(x)) * 4
var currentAlpha = CGFloat(data[currentPixelInfo+3]) / CGFloat(255.0)
var downPixelInfo: Int = ((Int(width) * Int(y+1)) + Int(x)) * 4
var downAlpha = CGFloat(data[downPixelInfo+3]) / CGFloat(255.0)
if y == 0 && currentAlpha != 0{ // Top Edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
if y > 0 && y < height - 2{
//one up
var topPixelInfo: Int = ((Int(width) * Int(y - 1)) + Int(x )) * 4
var topAlpha = CGFloat(data[topPixelInfo+3]) / CGFloat(255.0)
if downAlpha == 0 && currentAlpha != 0 {//edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
if topAlpha == 0 && currentAlpha != 0 {//edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
}
if y == height - 2 && downAlpha != 0 {
booleanArray[downPixelInfo/4] = true
buffer[downPixelInfo/4] = blackPixel
}
}
}
for var y = 0; y < height-1; y++ {
for var x = 0; x < width-1; x++ {
//Current one
var currentPixelInfo: Int = ((Int(width) * Int(y)) + Int(x)) * 4
var currentAlpha = CGFloat(data[currentPixelInfo+3]) / CGFloat(255.0)
//Next
var nextPixelInfo: Int = ((Int(width) * Int(y)) + Int(x + 1)) * 4
var nextAlpha = CGFloat(data[nextPixelInfo+3]) / CGFloat(255.0)
//check horizontally
if x == 0 && currentAlpha != 0{ // Edge case
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
if x > 0 && x < width - 2{
//One before
var previousPixelInfo: Int = ((Int(width) * Int(y)) + Int(x - 1)) * 4
var previousAlpha = CGFloat(data[previousPixelInfo+3]) / CGFloat(255.0)
if nextAlpha == 0 && currentAlpha != 0 {//Living on the edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
if previousAlpha == 0 && currentAlpha != 0 {//Living on the edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
}
if x == width - 2 && nextAlpha != 0 {
booleanArray[nextPixelInfo/4] = true
buffer[nextPixelInfo/4] = blackPixel
}
}
}
var stop = CACurrentMediaTime()
let image = imageFromARGB32Bitmap(buffer, width: width, height: height)
println(stop - start)
return image!;
//self.imageView.image = image
}
func imageFromARGB32Bitmap(pixels:[PixelData], width:Int, height:Int)->UIImage? {
let bitsPerComponent:Int = 8
let bitsPerPixel:Int = 32
assert(pixels.count == Int(width * height))
var data = pixels // Copy to mutable []
let providerRef = CGDataProviderCreateWithCFData(
NSData(bytes: &data, length: data.count * sizeof(PixelData))
)
// let redPixel = PixelData(a: 255, r: 192, g: 0, b: 0)
let cgim = CGImageCreate(
width,
height,
bitsPerComponent,
bitsPerPixel,
width * Int(sizeof(PixelData)),
rgbColorSpace,
bitmapInfo,
providerRef,
nil,
true,
kCGRenderingIntentDefault
)
return UIImage(CGImage: cgim)
}
[][3]
Although I didn't figure out what exactly UIImage is doing to raw pixel data, the function that I wrote fixes the problem. Key point here is using CGImageAlphaInfo.PremultipliedFirst value of bitmap info since my data structure is expecting ARGB format.
func imageFromBitmapContext(image:CGImageRef, width:Int, height:Int)->UIImage?
{
let colorSpace:CGColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(CGImageAlphaInfo.PremultipliedFirst.rawValue)
let bytesPerRow = 4 * width
let context = CGBitmapContextCreate(nil, Int(width), Int(height), 8, Int(bytesPerRow), colorSpace, bitmapInfo)
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(width), CGFloat(height)), image)
let image = CGBitmapContextCreateImage(context)
return UIImage(CGImage: image)
}
Related
I have been trying to get the GLCameraRipple sample from Apple running in a swift project. Unfortunately this relies heavily on using C style, thread-safe arrays only available in Objective-C.
I have been trying to use a bridging header so that the simulation can run in Objective-C code, and the drawing can run in swift code. That way the thread-safe thing would not be an issue.
I have taken the Objective-C code and translated it almost completely into swift with a few exceptions. I did cull some of the extra math since the texture is the same size as the screen for my needs. If you want to check my translation I have put them below.
Anyway I have made a class that will run in any xcode project with an opengl environment.
import Foundation
import GLKit
import OpenGLES
class WaterDrawer
{
static var sim = RippleModel()
static var shade = Shader("Shader2")
static func pt(pt: CGPoint)
{
sim.initiateRippleAtLocation(pt)
}
static func firstStart(width: Int, height: Int)
{
sim.initWithScreenWidth(width / 4, iheight: height / 4, accWidth: width, accHeight: height)
shade.begin()
buildMatrix(width, height: height)
bufferSetup()
}
static func draw()
{
glUseProgram(shade.progId)
let posLoc = GLuint(glGetAttribLocation(shade.progId, "pos"))
let texLoc = GLuint(glGetAttribLocation(shade.progId, "tc"))
glBindBuffer(GLenum(GL_ARRAY_BUFFER), texVBO);
glBufferData(GLenum(GL_ARRAY_BUFFER), GLsizeiptr(sim.getVertexSize()), sim.getTexCoords(), GLenum(GL_DYNAMIC_DRAW));
glVertexAttribPointer(texLoc, 2, GLenum(GL_FLOAT), GLboolean(GL_FALSE), 0, BUFFER_OFFSET(0))
glEnableVertexAttribArray(texLoc)
glBindBuffer(GLenum(GL_ARRAY_BUFFER), posVBO)
glVertexAttribPointer(posLoc, 2, GLenum(GL_FLOAT), GLboolean(GL_FALSE), 0, BUFFER_OFFSET(0))
glEnableVertexAttribArray(posLoc)
let uniOrtho = glGetUniformLocation(shade.progId, "matrix")
glUniformMatrix4fv(uniOrtho, 1, GLboolean(GL_FALSE), &orthographicMatrix)
glBindBuffer(GLenum(GL_ELEMENT_ARRAY_BUFFER), indVBO)
glDrawElements(GLenum(GL_TRIANGLE_STRIP), GLsizei(sim.getIndexCount()), GLenum(GL_UNSIGNED_SHORT), nil)
glBindBuffer(GLenum(GL_ARRAY_BUFFER), 0)
glBindBuffer(GLenum(GL_ELEMENT_ARRAY_BUFFER), 0)
glDisableVertexAttribArray(posLoc)
glDisableVertexAttribArray(texLoc)
}
static func update()
{
sim.runSimulation()
}
static var posVBO:GLuint = 0
static var texVBO:GLuint = 0
static var indVBO:GLuint = 0
static func bufferSetup()
{
Whirl.crashLog("Started passing in buffer data")
glGenBuffers(1, &indVBO);
glBindBuffer(GLenum(GL_ELEMENT_ARRAY_BUFFER), indVBO);
glBufferData(GLenum(GL_ELEMENT_ARRAY_BUFFER), GLsizeiptr(sim.getIndexSize()), sim.getIndices(), GLenum(GL_STATIC_DRAW));
glGenBuffers(1, &posVBO);
glBindBuffer(GLenum(GL_ARRAY_BUFFER), posVBO);
glBufferData(GLenum(GL_ARRAY_BUFFER), GLsizeiptr(sim.getVertexSize()), sim.getVertices(), GLenum(GL_STATIC_DRAW));
glGenBuffers(1, &texVBO);
glBindBuffer(GLenum(GL_ARRAY_BUFFER), texVBO);
glBufferData(GLenum(GL_ARRAY_BUFFER), GLsizeiptr(sim.getVertexSize()), sim.getTexCoords(), GLenum(GL_DYNAMIC_DRAW));
Whirl.crashLog("Finished passing in buffer Data")
}
static var orthographicMatrix:[GLfloat] = []
static func buildMatrix(width: Int, height: Int)
{
orthographicMatrix = glkitmatrixtoarray(GLKMatrix4MakeOrtho(0, GLfloat(width), 0, GLfloat(height), -100, 100))
//Storage.upScaleFactor
}
static func glkitmatrixtoarray(mat: GLKMatrix4) -> [GLfloat]
{
var buildme:[GLfloat] = []
buildme.append(mat.m.0)
buildme.append(mat.m.1)
buildme.append(mat.m.2)
buildme.append(mat.m.3)
buildme.append(mat.m.4)
buildme.append(mat.m.5)
buildme.append(mat.m.6)
buildme.append(mat.m.7)
buildme.append(mat.m.8)
buildme.append(mat.m.9)
buildme.append(mat.m.10)
buildme.append(mat.m.11)
buildme.append(mat.m.12)
buildme.append(mat.m.13)
buildme.append(mat.m.14)
buildme.append(mat.m.15)
return buildme
}
}
So theoretically this code can use either the swift implementation or the Objective-C implementation, I just need to switch the way the mesh is initiated.
Trouble is when I use the Objective-C one the screen is blank, I have checked, and the buffer data looks really weird in the frame capture.
Are you allowed to pass in data from an Objective-C code to a glBuffer?
Simulation.swift
import Foundation
import GLKit
import OpenGLES
class RippleModel
{
var screenWidth:UInt32 = 0
var screenHeight:UInt32 = 0
var poolWidth:UInt32 = 0
var poolHeight:UInt32 = 0
var screenWidthi:Int = 0
var screenHeighti:Int = 0
var poolWidthi:Int = 0
var poolHeighti:Int = 0
let touchRadius:Int = 5 //5 i think
var rippleVertices:[GLfloat] = []
var rippleTexCoords:[GLfloat] = []
var rippleIndices:[GLushort] = []//NOTE IF CHANGE THIS TO INTO SO MUCH DRAW CALL
var rippleSource:[GLfloat] = []
var rippleDest:[GLfloat] = []
var rippleCoeff:[GLfloat] = []
var VertexSize:GLsizeiptr = 0
var IndicieSize:GLsizeiptr = 0
var IndicieCount:Int = 0
func calculateSizes()
{
VertexSize = rippleVertices.count * sizeof(GLfloat)
IndicieSize = rippleIndices.count * sizeof(GLushort)
IndicieCount = rippleIndices.count
Whirl.crashLog("Data sizes Vertex size \(VertexSize)\tIndicie Size \(IndicieSize) \tIndicie Count \(IndicieCount)")
}
func figureOutCoefficent()
{
for y in 0...(2 * touchRadius)
{
for x in 0...(2 * touchRadius)
{
let dx = x - touchRadius
let dy = y - touchRadius
let distance = sqrt(GLfloat(dx * dx + dy * dy))
let me = y * (touchRadius*2 + 1) + x
if (distance <= GLfloat(touchRadius))
{
let factor = distance / GLfloat(touchRadius)
rippleCoeff[me] = -(cos(factor*GLfloat(M_PI))+1.0) * 256.0;
}
else
{
rippleCoeff[me] = 0.0
}
}
}
}
init()
{
}
func initWithScreenWidth( iwidth: Int, iheight: Int, accWidth: Int, accHeight: Int)
{
screenWidth = UInt32(accWidth);screenWidthi = Int(screenWidth)
screenHeight = UInt32(accHeight);screenHeighti = Int(screenHeight)
poolWidth = UInt32(iwidth);poolWidthi = Int(poolWidth)
poolHeight = UInt32(iheight);poolHeighti = Int(poolHeight)
//WE DONT NEED TEX COORD MUMBO JUMBO IT IS FULL SCREEN ALREADY
Whirl.crashLog("Started allocation")
rippleCoeff = [GLfloat](count: Int( (touchRadius * 2 + 1) * (touchRadius*2 + 1) ), repeatedValue: 0)
figureOutCoefficent()
let simCount:Int = Int(poolWidth + 2) * Int(poolHeight + 2)
rippleSource = [GLfloat](count: simCount, repeatedValue: 0)
rippleDest = [GLfloat](count: simCount, repeatedValue: 0)
let locb:Int = Int(poolWidth * poolHeight * 2)
rippleVertices = [GLfloat](count: locb, repeatedValue: 0)
rippleTexCoords = [GLfloat](count: locb, repeatedValue: 0)
rippleIndices = [GLushort](count: Int(poolHeight - 1) * Int((poolWidth * 2) + 2), repeatedValue: 0)
Whirl.crashLog("Finished allocation")
initMesh()
calculateSizes()
}
func initMesh()
{
Whirl.crashLog("Started initting pos coords")
for i in 0..<poolHeight
{let ii = GLfloat(i)
for j in 0..<poolWidth
{let jj = GLfloat(j)
let cordb:Int = Int(i*poolWidth+j)*2
rippleVertices[cordb + 0] = (jj / GLfloat(poolWidth - 1)) * GLfloat(screenWidth)
rippleVertices[cordb + 1] = (ii / GLfloat(poolHeight - 1)) * GLfloat(screenHeight)
rippleTexCoords[cordb + 0] = ii / GLfloat(poolHeight - 1)
rippleTexCoords[cordb + 1] = (jj/GLfloat(poolWidth - 1))
}
}
Whirl.crashLog("Finished initting pos coords")
Whirl.crashLog("Started initting index coords")
var index = 0
for i in 0 ..< poolHeighti-1
{
for j in 0 ..< poolWidthi
{
if (i%2 == 0)
{
// emit extra index to create degenerate triangle
if (j == 0)
{
rippleIndices[index] = GLushort(i*poolWidthi+j);
index += 1;
}
rippleIndices[index] = GLushort(i*poolWidthi+j);
index += 1;
rippleIndices[index] = GLushort((i+1)*poolWidthi+j);
index += 1;
// emit extra index to create degenerate triangle
if (j == (poolWidthi-1))
{
rippleIndices[index] = GLushort((i+1)*poolWidthi+j);
index += 1;
}
}
else
{
// emit extra index to create degenerate triangle
if (j == 0)
{
rippleIndices[index] = GLushort((i+1)*poolWidthi+j);
index += 1;
}
rippleIndices[index] = GLushort((i+1)*poolWidthi+j);
index += 1;
rippleIndices[index] = GLushort(i * poolWidthi + j);
index += 1;
// emit extra index to create degenerate triangle
if (j == (poolWidthi-1))
{
rippleIndices[index] = GLushort(i * poolWidthi + j);
index += 1;
}
}
}
}
Whirl.crashLog("Finished initting coords")
}
var firstUpdate = true
func runSimulation()
{
if (firstUpdate)
{firstUpdate = false; Whirl.crashLog("First update")}
let queue = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)
//dispatch_apply(Int(poolHeight), queue, {(y: size_t) -> Void in
for y in 0..<poolHeighti {
let pw = self.poolWidthi
for x in 1..<(pw - 1)
{
let ai:Int = (y ) * (pw + 2) + x + 1
let bi:Int = (y + 2) * (pw + 2) + x + 1
let ci:Int = (y + 1) * (pw + 2) + x
let di:Int = (y + 1) * (pw + 2) + x + 2
let me:Int = (y + 1) * (pw + 2) + x + 1
let a = self.rippleSource[ai]
let b = self.rippleSource[bi]
let c = self.rippleSource[ci]
let d = self.rippleSource[di]
var result = (a + b + c + d) / 2.0 - self.rippleDest[me]
result -= result / 32.0
self.rippleDest[me] = result
}
}
//)
let hm1 = GLfloat(poolHeight - 1)
let wm1 = GLfloat(poolWidth - 1)
//dispatch_apply(poolHeighti, queue, {(y: size_t) -> Void in
for y in 0..<poolHeighti{
let yy = GLfloat(y)
let pw = self.poolWidthi
for x in 1..<(pw - 1)
{let xx = GLfloat(x)
let ai:Int = (y ) * (pw + 2) + x + 1
let bi:Int = (y + 2) * (pw + 2) + x + 1
let ci:Int = (y + 1) * (pw + 2) + x
let di:Int = (y + 1) * (pw + 2) + x + 2
let a = self.rippleDest[ai]
let b = self.rippleDest[bi]
let c = self.rippleDest[ci]
let d = self.rippleDest[di]
var s_offset = ((b - a) / 2048)
var t_offset = ((c - d) / 2048)
s_offset = (s_offset < -0.5) ? -0.5 : s_offset;
t_offset = (t_offset < -0.5) ? -0.5 : t_offset;
s_offset = (s_offset > 0.5) ? 0.5 : s_offset;
t_offset = (t_offset > 0.5) ? 0.5 : t_offset;
let s_tc = yy / hm1
let t_tc = xx / wm1
let me = (y * pw + x) * 2
self.rippleTexCoords[me + 0] = s_tc + s_offset
self.rippleTexCoords[me + 1] = t_tc + t_offset
}
}
//)
let pTmp = rippleDest
rippleDest = rippleSource
rippleSource = pTmp
}
var firstRipple:Bool = true
func initiateRippleAtLocation(location: CGPoint)
{
if (firstRipple)
{firstRipple = false; Whirl.crashLog("First ripple placement")}
let xIndex = Int((GLfloat(location.x) / GLfloat(screenWidth)) * GLfloat(poolWidthi))
let yIndex = Int((1.0 - (GLfloat(location.y) / GLfloat(screenHeighti))) * GLfloat(poolHeight))
let lowy = yIndex - touchRadius
let highy = yIndex + touchRadius
let lowx = xIndex - touchRadius
let highx = xIndex + touchRadius
//Whirl.crashLog("Ripple at (\(xIndex) , \(yIndex))\tX:(\(lowx) - \(highx))\tY:(\(lowy) - \(highy))")
for y in lowy...highy
{
for x in lowx...highx
{
if (x > 0 && x < (poolWidthi - 1) && y > 0 && y < poolHeighti)
{
let ind = (poolWidthi + 2) * (y + 1) + x + 1
let coef = (y-(yIndex-touchRadius))*(touchRadius*2+1)+x-(xIndex-touchRadius)
let past = rippleSource[ind]
let coe = rippleCoeff[coef]
if (coe < past)
{
rippleSource[ind] = coe
}
}
}
}
}
func rippleLine(location1: CGPoint, location2: CGPoint)
{
if (firstRipple)
{firstRipple = false; Whirl.crashLog("First ripple placement")}
let xIndex1 = Int((GLfloat(location1.x) / GLfloat(screenWidth)) * GLfloat(poolWidthi))
let yIndex1 = Int((1.0 - (GLfloat(location1.y) / GLfloat(screenHeighti))) * GLfloat(poolHeight))
let xIndex2 = Int((GLfloat(location2.x) / GLfloat(screenWidth)) * GLfloat(poolWidthi))
let yIndex2 = Int((1.0 - (GLfloat(location2.y) / GLfloat(screenHeighti))) * GLfloat(poolHeight))
let lowy1 = yIndex1 - touchRadius
let highy1 = yIndex1 + touchRadius
let lowx1 = xIndex1 - touchRadius
let highx1 = xIndex1 + touchRadius
let lowy2 = yIndex2 - touchRadius
let highy2 = yIndex2 + touchRadius
let lowx2 = xIndex2 - touchRadius
let highx2 = xIndex2 + touchRadius
let lowx = min(lowx1, lowx2)
let highx = max(highx1, highx2)
let lowy = min(lowy1, lowy2)
let highy = max(highy1, highy2)
for y in lowy...highy
{
for x in lowx...highx
{
if (x > 0 && x < (poolWidthi - 1) && y > 0 && y < poolHeighti)
{
let ind = (poolWidthi + 2) * (y + 1) + x + 1
let tar = ldist(CGPoint(x: xIndex1, y: yIndex1), p2: CGPoint(x: xIndex2, y: yIndex2), me: CGPoint(x: x, y: y))
let dx = x - Int(tar.x)
let dy = y - Int(tar.y)
let distq = (dx * dx + dy * dy)
if (distq < touchRadius * touchRadius)
{
let factor = sqrt(GLfloat(distq)) / GLfloat(touchRadius)
rippleSource[ind] = -(cos(factor*GLfloat(M_PI))+1.0) * 256.0;
}
//rippleSource[ind] = 1000
}
}
}
}
func ldist(p1: CGPoint, p2: CGPoint, me: CGPoint) -> CGPoint
{
let diffX = p2.x - p1.x
let diffY = p2.y - p1.y
var target = CGPoint()
if ((diffX == 0) && (diffY == 0))
{
target = p1
}
let t = ((me.x - p1.x) * diffX + (me.y - p1.y) * diffY) / (diffX * diffX + diffY * diffY)
if (t < 0)
{
target = p1
}
else if (t > 1)
{
target = p2
}
else
{
target = CGPoint(x: (p1.x + t * diffX), y: (p1.y + t * diffY))
}
let int = CGPoint(x: round(target.x), y: round(target.y))
return int
}
func getVertices() -> [GLfloat]
{
//Return the mesh positions
return rippleVertices
}
func getTexCoords() -> [GLfloat]
{
//Return the array of texture coords
return rippleTexCoords
}
func getIndices() -> [GLushort]
{
//Return the array of indices
return rippleIndices
}
func getVertexSize() -> GLsizeiptr
{
//Return the size of the mesh position array
return VertexSize
}
func getIndexSize() -> GLsizeiptr
{
//Return the byte size of the incicie array
return IndicieSize
}
func getIndexCount() -> GLsizeiptr
{
//This goes in the draw call, count of indices
return IndicieCount
}
}
RippleModel.m (from apple)
Are you allowed to pass in data from an objective-c code to a glBuffer?
Why wouldn't you be allowed? Swift has a pointer API (UnsafePointer<T>, UnsafeMutablePointer<T>, etc.) exactly for this purpose. Obviously this is "unsafe" in the sense that the underlying memory the [Objective-]C pointer points to could change at anytime without the Swift pointer knowing. It also has no information about the size of the memory block that it points to.
Any C pointers or arrays can be bridged to Swift (probably as UnsafeMutablePointer<Void> which you will need to cast to your OpenGL type).
You can avoid any risk of referencing invalid memory by dereferencing the pointer (if it is non-nil) and copying the value stored at the pointer to a variable in your Swift application.
With OpenGL ES being a pure set of C functions, I don't think passing a pointer of swift datatypes will work easily.
The following code will give you a hint of how to pass the index buffer.
var Indices: [GLubyte] = [
0, 1, 2,
2, 3, 0
]
var indexBuffer: GLuint = GLuint()
glGenBuffers(1, &indexBuffer)
glBindBuffer(GLenum(GL_ELEMENT_ARRAY_BUFFER), indexBuffer)
glBufferData(GLenum(GL_ELEMENT_ARRAY_BUFFER), Indices.size(), Indices, GLenum(GL_STATIC_DRAW))
Reference: Here is a link with a working code. https://github.com/bradley/iOSSwiftOpenGL/blob/master/iOSSwiftOpenGL/OpenGLView.swift
I need to read pixel values of an image and iterate to print in swift output, I have written this so far and used a RGBAImage class to read out pixels. I'm getting lost from CGContextRef to Iteration. I tried to write from CGImage, getting pixel data from objective C language to swift since I wanted to work in swift.
func createRGBAPixel(inImage: CGImageRef) -> CGContextRef {
//Image width, height
let pixelWidth = CGImageGetWidth(inImage)
let pixelHeight = CGImageGetHeight(inImage)
//Declaring number of bytes
let bytesPerRow = Int(pixelWidth) * 4
let byteCount = bytesPerRow * Int(pixelHeight)
//RGB color space
let colorSpace = CGColorSpaceCreateDeviceRGB()
//Allocating image data
let mapData = malloc(byteCount)
let mapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
//Create bitmap context
let context = CGBitmapContextCreate(mapData, pixelWidth, pixelHeight, Int(8), Int(bytesPerRow), colorSpace, mapInfo.rawValue)
let pixelImage = CGBitmapContextCreate(pixels, pixelWidth, pixelHeight, bitsPerComponent, bytesPerRow, colorSpace, mapInfo)
let CGContextRef = pixelImage
let CGContextDrawImage(context, CGRectMake(0, 0, pixelWidth, pixelHeight), inImage)
//Iterating and logging
print("Logging pixel counts")
let pixels = calloc(pixelHeight * pixelWidth, sizeof(UInt32))
let myImage = CGImageRef: inImage
let myRGBA = RGBAImage(image: myImage)! //RGBAImage class to read pixels.
var number = 0
var currentPixel:Int32 = 0
currentPixel = pixels * UInt32
for number in 0..<pixelHeight {
for number in 0..<pixelWidth {
var color = color * currentPixel
print((pixel.red + pixel.green + pixel.blue) / 3.0)
currentPixel++
}
}
return context!
}
I created small class for this:
class ImagePixelReader {
enum Component:Int {
case r = 0
case g = 1
case b = 2
case alpha = 3
}
struct Color {
var r:UInt8
var g:UInt8
var b:UInt8
var a:UInt8
var uiColor:UIColor {
return UIColor(red:CGFloat(r)/255.0,green:CGFloat(g)/255.0,blue:CGFloat(b)/255.0,alpha:CGFloat(alpha)/255.0)
}
}
let image:UIImage
private var data:CFData
private let pointer:UnsafePointer<UInt8>
private let scale:Int
init?(image:UIImage){
self.image = image
guard let cfdata = self.image.cgImage?.dataProvider?.data,
let pointer = CFDataGetBytePtr(cfdata) else {
return nil
}
self.scale = Int(image.scale)
self.data = cfdata
self.pointer = pointer
}
func componentAt(_ component:Component,x:Int,y:Int)->UInt8{
assert(CGFloat(x) < image.size.width)
assert(CGFloat(y) < image.size.height)
let pixelPosition = (Int(image.size.width) * y * scale + x) * 4 * scale
return pointer[pixelPosition + component.rawValue]
}
func colorAt(x:Int,y:Int)->Color{
assert(CGFloat(x) < image.size.width)
assert(CGFloat(y) < image.size.height)
let pixelPosition = (Int(image.size.width) * y * scale + x) * 4 * scale
return Color(r: pointer[pixelPosition + Component.r.rawValue],
g: pointer[pixelPosition + Component.g.rawValue],
b: pointer[pixelPosition + Component.b.rawValue],
a: pointer[pixelPosition + Component.alpha.rawValue])
}
}
How to use:
if let reader = ImagePixelReader(image: yourImage) {
//get alpha or color
let alpha = reader.componentAt(.alpha, x: 10, y:10)
let color = reader.colorAt(x:10, y: 10).uiColor
//getting all the pixels you need
var values = ""
//iterate over all pixels
for x in 0 ..< Int(image.size.width){
for y in 0 ..< Int(image.size.height){
let color = reader.colorAt(x: x, y: y)
values += "[\(x):\(y):\(color)] "
}
//add new line for every new row
values += "\n"
}
print(values)
}
My image for the first time looks like:
After pressing "Apply the Filter" button it becomes:
and I implement this ColorCube:
How you see, it implements color-cube, but very very insensibly.
This is my code:
func colorCubeFilterFromLUT(imageName : NSString) -> CIFilter? {
let kDimension : UInt = 64
let lutImage = UIImage(named: imageName as String)!.CGImage
let lutWidth: UInt = UInt(CGImageGetWidth(lutImage!))
let lutHeight: UInt = UInt(CGImageGetHeight(lutImage!))
let rowCount = lutHeight / kDimension
let columnCount = lutWidth / kDimension
if ((lutWidth % kDimension != 0) || (lutHeight % kDimension != 0) || (rowCount * columnCount != kDimension)) {
NSLog("Invalid colorLUT %#", imageName);
return nil
}
let bitmap = self.createRGBABitmapFromImage(lutImage!)
let size = Int(kDimension) * Int(kDimension) * Int(kDimension) * sizeof(Float) * 4
let data = UnsafeMutablePointer<Float>(malloc(Int(size)))
var bitmapOffset : Int = 0
var z : UInt = 0
for (var row: UInt = 0; row < rowCount; row++)
{
for (var y: UInt = 0; y < kDimension; y++)
{
var tmp = z
for (var col: UInt = 0; col < columnCount; col++)
{
for (var x: UInt = 0; x < kDimension; x++) {
let alpha = Float(bitmap[Int(bitmapOffset)]) / 255.0
let red = Float(bitmap[Int(bitmapOffset+1)]) / alpha
let green = Float(bitmap[Int(bitmapOffset+2)]) / alpha
let blue = Float(bitmap[Int(bitmapOffset+3)]) / alpha
var dataOffset = Int(z * kDimension * kDimension + y * kDimension + x) * 4
data[dataOffset] = red
data[dataOffset + 1] = green
data[dataOffset + 2] = blue
data[dataOffset + 3] = alpha
bitmapOffset += 4
}
z++
}
z = tmp
}
z += columnCount
}
let colorCubeData = NSData(bytesNoCopy: data, length: size, freeWhenDone: true)
// create CIColorCube Filter
var filter = CIFilter(name: "CIColorCube")
filter!.setValue(colorCubeData, forKey: "inputCubeData")
filter!.setValue(kDimension, forKey: "inputCubeDimension")
return filter
}
func createRGBABitmapFromImage(inImage: CGImage) -> UnsafeMutablePointer<Float> {
let pixelsWide = CGImageGetWidth(inImage)
let pixelsHigh = CGImageGetHeight(inImage)
let bitmapBytesPerRow = Int(pixelsWide) * 4
let bitmapByteCount = bitmapBytesPerRow * Int(pixelsHigh)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapData = malloc(Int(CUnsignedLong(bitmapByteCount))) // bitmap
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue | CGBitmapInfo.ByteOrder32Big.rawValue)
let context = CGBitmapContextCreate(bitmapData, 512, 512, 8, Int(bitmapBytesPerRow), colorSpace, bitmapInfo.rawValue)
let rect = CGRect(x:0, y:0, width:1000, height:1000)
CGContextDrawImage(context, rect, inImage)
var convertedBitmap = malloc(Int(bitmapByteCount * sizeof(Float)))
print("BBB \(convertedBitmap)")
vDSP_vfltu8(UnsafePointer<UInt8>(bitmapData), 1, UnsafeMutablePointer<Float>(convertedBitmap), 1, vDSP_Length(bitmapByteCount))
free(bitmapData)
return UnsafeMutablePointer<Float>(convertedBitmap)
}
I'm working over it from the morning and nothing. I cannot find why this filter does not implements to my image.
I created this extension for "bucket fill" (flood fill) of touch point:
extension UIImageView {
func bucketFill(startPoint: CGPoint, newColor: UIColor) {
var newRed, newGreen, newBlue, newAlpha: CUnsignedChar
let pixelsWide = CGImageGetWidth(self.image!.CGImage)
let pixelsHigh = CGImageGetHeight(self.image!.CGImage)
let rect = CGRect(x:0, y:0, width:Int(pixelsWide), height:Int(pixelsHigh))
let bitmapBytesPerRow = Int(pixelsWide) * 4
var context = self.image!.createARGBBitmapContext()
//Clear the context
CGContextClearRect(context, rect)
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(context, rect, self.image!.CGImage)
var data = CGBitmapContextGetData(context)
var dataType = UnsafeMutablePointer<UInt8>(data)
let newColorRef = CGColorGetComponents(newColor.CGColor)
if(CGColorGetNumberOfComponents(newColor.CGColor) == 2) {
newRed = CUnsignedChar(newColorRef[0] * 255) // CUnsignedChar
newGreen = CUnsignedChar(newColorRef[0] * 255)
newBlue = CUnsignedChar(newColorRef[0] * 255)
newAlpha = CUnsignedChar(newColorRef[1])
} else {
newRed = CUnsignedChar(newColorRef[0] * 255)
newGreen = CUnsignedChar(newColorRef[1] * 255)
newBlue = CUnsignedChar(newColorRef[2] * 255)
newAlpha = CUnsignedChar(newColorRef[3])
}
let newColorStr = ColorRGB(red: newRed, green: newGreen, blue: newBlue)
var stack = Stack()
let offset = 4*((Int(pixelsWide) * Int(startPoint.y)) + Int(startPoint.x))
//let alpha = dataType[offset]
let startRed: UInt8 = dataType[offset+1]
let startGreen: UInt8 = dataType[offset+2]
let startBlue: UInt8 = dataType[offset+3]
stack.push(startPoint)
while(!stack.isEmpty()) {
let point: CGPoint = stack.pop() as! CGPoint
let offset = 4*((Int(pixelsWide) * Int(point.y)) + Int(point.x))
let alpha = dataType[offset]
let red: UInt8 = dataType[offset+1]
let green: UInt8 = dataType[offset+2]
let blue: UInt8 = dataType[offset+3]
if (red == newRed && green == newGreen && blue == newBlue) {
continue
}
if (red.absoluteDifference(startRed) < 4 && green.absoluteDifference(startGreen) < 4 && blue.absoluteDifference(startBlue) < 4) {
dataType[offset] = 255
dataType[offset + 1] = newRed
dataType[offset + 2] = newGreen
dataType[offset + 3] = newBlue
if (point.x > 0) {
stack.push(CGPoint(x: point.x - 1, y: point.y))
}
if (point.x < CGFloat(pixelsWide)) {
stack.push(CGPoint(x: point.x + 1, y: point.y))
}
if (point.y > 0) {
stack.push(CGPoint(x: point.x, y: point.y - 1))
}
if (point.y < CGFloat(pixelsHigh)) {
stack.push(CGPoint(x: point.x, y: point.y + 1))
}
} else {
}
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
let finalContext = CGBitmapContextCreate(data, pixelsWide, pixelsHigh, CLong(8), CLong(bitmapBytesPerRow), colorSpace, bitmapInfo)
let imageRef = CGBitmapContextCreateImage(finalContext)
self.image = UIImage(CGImage: imageRef, scale: self.image!.scale,orientation: self.image!.imageOrientation)
}
}
Now I would like to improve performance. How can I make this algorithm work faster? UInt8.absoluteDifference extension is my attempt to include almost same colors to flood fill and it's working but this could be really improve and I know it but I don't know how.
extension UInt8 {
func absoluteDifference(subtrahend: UInt8) -> UInt8 {
if (self > subtrahend) {
return self - subtrahend;
} else {
return subtrahend - self;
}
}
}
My Stack class:
class Stack {
var count: Int = 0
var head: Node = Node()
init() {
}
func isEmpty() -> Bool {
return self.count == 0
}
func push(value: Any) {
if isEmpty() {
self.head = Node()
}
var node = Node(value: value)
node.next = self.head
self.head = node
self.count++
}
func pop() -> Any? {
if isEmpty() {
return nil
}
var node = self.head
self.head = node.next!
self.count--
return node.value
}
}
Thanks for help
I would like to use a lookup table png (example) as color cube data for the CIColorCube filter in Swift. All I tried (and found) so far are examples with a computed color cube as in this example.
How can I read a png as lookup data?
I now used this and this project to adapt their Objective-C implementation for Swift:
func colorCubeFilterFromLUT(imageName : NSString) -> CIFilter? {
let kDimension : UInt = 64
let lutImage = UIImage(named: imageName)!.CGImage
let lutWidth = CGImageGetWidth(lutImage!)
let lutHeight = CGImageGetHeight(lutImage!)
let rowCount = lutHeight / kDimension
let columnCount = lutWidth / kDimension
if ((lutWidth % kDimension != 0) || (lutHeight % kDimension != 0) || (rowCount * columnCount != kDimension)) {
NSLog("Invalid colorLUT %#", imageName);
return nil
}
let bitmap = self.createRGBABitmapFromImage(lutImage)
let size = Int(kDimension) * Int(kDimension) * Int(kDimension) * sizeof(Float) * 4
let data = UnsafeMutablePointer<Float>(malloc(UInt(size)))
var bitmapOffset : Int = 0
var z : UInt = 0
for (var row: UInt = 0; row < rowCount; row++)
{
for (var y: UInt = 0; y < kDimension; y++)
{
var tmp = z
for (var col: UInt = 0; col < columnCount; col++)
{
for (var x: UInt = 0; x < kDimension; x++) {
let alpha = Float(bitmap[Int(bitmapOffset)]) / 255.0
let red = Float(bitmap[Int(bitmapOffset+1)]) / 255.0
let green = Float(bitmap[Int(bitmapOffset+2)]) / 255.0
let blue = Float(bitmap[Int(bitmapOffset+3)]) / 255.0
var dataOffset = Int(z * kDimension * kDimension + y * kDimension + x) * 4
data[dataOffset] = red
data[dataOffset + 1] = green
data[dataOffset + 2] = blue
data[dataOffset + 3] = alpha
bitmapOffset += 4
}
z++
}
z = tmp
}
z += columnCount
}
let colorCubeData = NSData(bytesNoCopy: data, length: size, freeWhenDone: true)
// create CIColorCube Filter
var filter = CIFilter(name: "CIColorCube")
filter.setValue(colorCubeData, forKey: "inputCubeData")
filter.setValue(kDimension, forKey: "inputCubeDimension")
return filter
}
func createRGBABitmapFromImage(inImage: CGImage) -> UnsafeMutablePointer<Float> {
//Get image width, height
let pixelsWide = CGImageGetWidth(inImage)
let pixelsHigh = CGImageGetHeight(inImage)
// Declare the number of bytes per row. Each pixel in the bitmap in this
// example is represented by 4 bytes; 8 bits each of red, green, blue, and
// alpha.
let bitmapBytesPerRow = Int(pixelsWide) * 4
let bitmapByteCount = bitmapBytesPerRow * Int(pixelsHigh)
// Use the generic RGB color space.
let colorSpace = CGColorSpaceCreateDeviceRGB()
// Allocate memory for image data. This is the destination in memory
// where any drawing to the bitmap context will be rendered.
let bitmapData = malloc(CUnsignedLong(bitmapByteCount)) // bitmap
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
// Create the bitmap context. We want pre-multiplied RGBA, 8-bits
// per component. Regardless of what the source image format is
// (CMYK, Grayscale, and so on) it will be converted over to the format
// specified here by CGBitmapContextCreate.
let context = CGBitmapContextCreate(bitmapData, pixelsWide, pixelsHigh, 8, UInt(bitmapBytesPerRow), colorSpace, bitmapInfo)
let rect = CGRect(x:0, y:0, width:Int(pixelsWide), height:Int(pixelsHigh))
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(context, rect, inImage)
// Now we can get a pointer to the image data associated with the bitmap
// context.
// var data = CGBitmapContextGetData(context)
// var dataType = UnsafeMutablePointer<Float>(data)
// return dataType
var convertedBitmap = malloc(UInt(bitmapByteCount * sizeof(Float)))
vDSP_vfltu8(UnsafePointer<UInt8>(bitmapData), 1, UnsafeMutablePointer<Float>(convertedBitmap), 1, vDSP_Length(bitmapByteCount))
free(bitmapData)
return UnsafeMutablePointer<Float>(convertedBitmap)
}
Also see this answer.
Thought I would update this for Swift 3.0 also this works for JPG's and PNG's 3D Color LUTs
fileprivate func colorCubeFilterFromLUT(imageName : String) -> CIFilter? {
let size = 64
let lutImage = UIImage(named: imageName)!.cgImage
let lutWidth = lutImage!.width
let lutHeight = lutImage!.height
let rowCount = lutHeight / size
let columnCount = lutWidth / size
if ((lutWidth % size != 0) || (lutHeight % size != 0) || (rowCount * columnCount != size)) {
NSLog("Invalid colorLUT %#", imageName);
return nil
}
let bitmap = getBytesFromImage(image: UIImage(named: imageName))!
let floatSize = MemoryLayout<Float>.size
let cubeData = UnsafeMutablePointer<Float>.allocate(capacity: size * size * size * 4 * floatSize)
var z = 0
var bitmapOffset = 0
for _ in 0 ..< rowCount {
for y in 0 ..< size {
let tmp = z
for _ in 0 ..< columnCount {
for x in 0 ..< size {
let alpha = Float(bitmap[bitmapOffset]) / 255.0
let red = Float(bitmap[bitmapOffset+1]) / 255.0
let green = Float(bitmap[bitmapOffset+2]) / 255.0
let blue = Float(bitmap[bitmapOffset+3]) / 255.0
let dataOffset = (z * size * size + y * size + x) * 4
cubeData[dataOffset + 3] = alpha
cubeData[dataOffset + 2] = red
cubeData[dataOffset + 1] = green
cubeData[dataOffset + 0] = blue
bitmapOffset += 4
}
z += 1
}
z = tmp
}
z += columnCount
}
let colorCubeData = NSData(bytesNoCopy: cubeData, length: size * size * size * 4 * floatSize, freeWhenDone: true)
// create CIColorCube Filter
let filter = CIFilter(name: "CIColorCube")
filter?.setValue(colorCubeData, forKey: "inputCubeData")
filter?.setValue(size, forKey: "inputCubeDimension")
return filter
}
fileprivate func getBytesFromImage(image:UIImage?) -> [UInt8]?
{
var pixelValues: [UInt8]?
if let imageRef = image?.cgImage {
let width = Int(imageRef.width)
let height = Int(imageRef.height)
let bitsPerComponent = 8
let bytesPerRow = width * 4
let totalBytes = height * bytesPerRow
let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
let colorSpace = CGColorSpaceCreateDeviceRGB()
var intensities = [UInt8](repeating: 0, count: totalBytes)
let contextRef = CGContext(data: &intensities, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo)
contextRef?.draw(imageRef, in: CGRect(x: 0.0, y: 0.0, width: CGFloat(width), height: CGFloat(height)))
pixelValues = intensities
}
return pixelValues!
}