Generate UIImage for ColorPicker (CGDataProvider and CGImageSource) - ios

Guys I am new in swift and programming, I want to generate a Image to be shown for a ColorPicker I'm using SwiftColorPicker for that and have the following code for image creating:
private func createImageFromData(_ width:Int, height:Int) {
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
provider = CGDataProvider(data: mutableData)
imageSource = CGImageSourceCreateWithDataProvider(provider, nil)
let cgImage = CGImage.init(width: width, height: height, bitsPerComponent: 8, bitsPerPixel: 32, bytesPerRow: width * 4, space: colorSpace, bitmapInfo: bitmapInfo, provider: provider, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
if let cgimg = cgImage {
image = UIImage(cgImage: cgimg)
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAsset(from: self.image!)
}, completionHandler: { success, error in
if success {
print("succes")
}
else if let error = error {
print("error")
debugPrint(error as Any)
}
else {
print("woooot?")
}
})
} else { print("Where the hell is the image?") }
}
func changeSize(_ width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
CFDataSetLength(mutableData, size)
createImageFromData(width, height: height)
}
init(width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
mutableData = CFDataCreateMutable(kCFAllocatorDefault, size)
createImageFromData(width, height: height)
}
public func writeColorData(_ h:CGFloat, a:CGFloat) {
let d = CFDataGetMutableBytePtr(self.mutableData)
if width == 0 || height == 0 {
return
}
var i:Int = 0
let h360:CGFloat = ((h == 1 ? 0 : h) * 360) / 60.0
let sector:Int = Int(floor(h360))
let f:CGFloat = h360 - CGFloat(sector)
let f1:CGFloat = 1.0 - f
var p:CGFloat = 0.0
var q:CGFloat = 0.0
var t:CGFloat = 0.0
let sd:CGFloat = 1.0 / CGFloat(width)
let vd:CGFloat = 1 / CGFloat(height)
var double_s:CGFloat = 0
var pf:CGFloat = 0
let v_range = 0..<height
let s_range = 0..<width
for v in v_range {
pf = 255 * CGFloat(v) * vd
for s in s_range {
i = (v * width + s) * 4
d?[i] = UInt8(255)
if s == 0 {
q = pf
d?[i+1] = UInt8(q)
d?[i+2] = UInt8(q)
d?[i+3] = UInt8(q)
continue
}
double_s = CGFloat(s) * sd
p = pf * (1.0 - double_s)
q = pf * (1.0 - double_s * f)
t = pf * ( 1.0 - double_s * f1)
switch(sector) {
case 0:
d?[i+1] = UInt8(pf)
d?[i+2] = UInt8(t)
d?[i+3] = UInt8(p)
case 1:
d?[i+1] = UInt8(q)
d?[i+2] = UInt8(pf)
d?[i+3] = UInt8(p)
case 2:
d?[i+1] = UInt8(p)
d?[i+2] = UInt8(pf)
d?[i+3] = UInt8(t)
case 3:
d?[i+1] = UInt8(p)
d?[i+2] = UInt8(q)
d?[i+3] = UInt8(pf)
case 4:
d?[i+1] = UInt8(t)
d?[i+2] = UInt8(p)
d?[i+3] = UInt8(pf)
default:
d?[i+1] = UInt8(pf)
d?[i+2] = UInt8(p)
d?[i+3] = UInt8(q)
}
}
}
}
and:
// Write colors to data array
if self.data1Shown { self.pickerImage2!.writeColorData(self.h, a:self.a) }
else { self.pickerImage1!.writeColorData(self.h, a:self.a)}
with:
public var a:CGFloat = 1 {
didSet {
if a < 0 || a > 1 {
a = max(0, min(1, a))
}
}
}
public var h:CGFloat = 0 { // // [0,1]
didSet {
if h > 1 || h < 0 {
h = max(0, min(1, h))
}
renderBitmap()
setNeedsDisplay()
}
}
It fails when i try to save it to photo albums (i've done that cause I was not able to set the image so I tried to save it to understand if it's ok or it's a error), and voila! An graceful error appears:
Error Domain=NSCocoaErrorDomain Code=-1 "(null)"
So I wanna know maybe someone knows what to do, how I can fix this, here is the link on gitHub for the Source (maybe it will help):
https://github.com/MrMatthias/SwiftColorPicker
Thanks!

I've made a helpful swift extension to create a UIImage of a solid color.
Feel free to use it:
extension UIImage {
static func image(of color: UIColor, size: CGSize) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
color.set()
UIRectFill(CGRect(origin: CGPoint.zero, size: size))
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image!
}
}
Then you can use it by:
let redImage = UIImage.image(of: .red, size: CGSize(width: 200, height: 200))
Hope that helps!

Related

Why is my screen not rendering anything?

I have the following code. When I run this I am getting a blank screen. This block of code is inside sceneDidLoad() so they will get executed but not displaying anything. Am I missing something ?
let worldNode = SKNode()
let height = 100
let width = 100
let regions:[Float: String] = [-0.04: "sand", -0.08: "water", 0.9: "grass"]
let noiseSource = GKPerlinNoiseSource()
let noise: GKNoise = GKNoise(noiseSource: noiseSource)
let noiseMap: GKNoiseMap = GKNoiseMap(noise: noise)
let tileMapNode = SKTileMapNode()
tileMapNode.enableAutomapping = true
tileMapNode.numberOfRows = height
tileMapNode.numberOfColumns = width
worldNode.addChild(tileMapNode)
for y in 0 ... height {
for x in 0 ... width {
let currentHeight = noiseMap.value(atPosition: vector2(Int32(x), Int32(y)));
for (key, value) in regions {
if (currentHeight <= key) {
//colourMap [y * mapChunkSize + x] = regions[i];
let tileSize = CGSize(width: 32.0, height: 32.0)
let tileTexture = SKTexture(imageNamed: value)
let tileDef = SKTileDefinition(texture: tileTexture, size: tileSize)
let tileGroup = SKTileGroup(tileDefinition: tileDef)
tileMapNode.setTileGroup(tileGroup, forColumn: x, row: y)
print("Tiling: \(value)")
break;
}
}
}
}
self.addChild(worldNode)

How to read and log the raw pixels of image in swift iOS

I need to read pixel values of an image and iterate to print in swift output, I have written this so far and used a RGBAImage class to read out pixels. I'm getting lost from CGContextRef to Iteration. I tried to write from CGImage, getting pixel data from objective C language to swift since I wanted to work in swift.
func createRGBAPixel(inImage: CGImageRef) -> CGContextRef {
//Image width, height
let pixelWidth = CGImageGetWidth(inImage)
let pixelHeight = CGImageGetHeight(inImage)
//Declaring number of bytes
let bytesPerRow = Int(pixelWidth) * 4
let byteCount = bytesPerRow * Int(pixelHeight)
//RGB color space
let colorSpace = CGColorSpaceCreateDeviceRGB()
//Allocating image data
let mapData = malloc(byteCount)
let mapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
//Create bitmap context
let context = CGBitmapContextCreate(mapData, pixelWidth, pixelHeight, Int(8), Int(bytesPerRow), colorSpace, mapInfo.rawValue)
let pixelImage = CGBitmapContextCreate(pixels, pixelWidth, pixelHeight, bitsPerComponent, bytesPerRow, colorSpace, mapInfo)
let CGContextRef = pixelImage
let CGContextDrawImage(context, CGRectMake(0, 0, pixelWidth, pixelHeight), inImage)
//Iterating and logging
print("Logging pixel counts")
let pixels = calloc(pixelHeight * pixelWidth, sizeof(UInt32))
let myImage = CGImageRef: inImage
let myRGBA = RGBAImage(image: myImage)! //RGBAImage class to read pixels.
var number = 0
var currentPixel:Int32 = 0
currentPixel = pixels * UInt32
for number in 0..<pixelHeight {
for number in 0..<pixelWidth {
var color = color * currentPixel
print((pixel.red + pixel.green + pixel.blue) / 3.0)
currentPixel++
}
}
return context!
}
I created small class for this:
class ImagePixelReader {
enum Component:Int {
case r = 0
case g = 1
case b = 2
case alpha = 3
}
struct Color {
var r:UInt8
var g:UInt8
var b:UInt8
var a:UInt8
var uiColor:UIColor {
return UIColor(red:CGFloat(r)/255.0,green:CGFloat(g)/255.0,blue:CGFloat(b)/255.0,alpha:CGFloat(alpha)/255.0)
}
}
let image:UIImage
private var data:CFData
private let pointer:UnsafePointer<UInt8>
private let scale:Int
init?(image:UIImage){
self.image = image
guard let cfdata = self.image.cgImage?.dataProvider?.data,
let pointer = CFDataGetBytePtr(cfdata) else {
return nil
}
self.scale = Int(image.scale)
self.data = cfdata
self.pointer = pointer
}
func componentAt(_ component:Component,x:Int,y:Int)->UInt8{
assert(CGFloat(x) < image.size.width)
assert(CGFloat(y) < image.size.height)
let pixelPosition = (Int(image.size.width) * y * scale + x) * 4 * scale
return pointer[pixelPosition + component.rawValue]
}
func colorAt(x:Int,y:Int)->Color{
assert(CGFloat(x) < image.size.width)
assert(CGFloat(y) < image.size.height)
let pixelPosition = (Int(image.size.width) * y * scale + x) * 4 * scale
return Color(r: pointer[pixelPosition + Component.r.rawValue],
g: pointer[pixelPosition + Component.g.rawValue],
b: pointer[pixelPosition + Component.b.rawValue],
a: pointer[pixelPosition + Component.alpha.rawValue])
}
}
How to use:
if let reader = ImagePixelReader(image: yourImage) {
//get alpha or color
let alpha = reader.componentAt(.alpha, x: 10, y:10)
let color = reader.colorAt(x:10, y: 10).uiColor
//getting all the pixels you need
var values = ""
//iterate over all pixels
for x in 0 ..< Int(image.size.width){
for y in 0 ..< Int(image.size.height){
let color = reader.colorAt(x: x, y: y)
values += "[\(x):\(y):\(color)] "
}
//add new line for every new row
values += "\n"
}
print(values)
}

Core Graphics messing changes UIimage format

I wrote a simple algorithm that detects the edges in the UIImages. It works perfectly fine with the images taken from bundle (look at first image).
After I am doing some image manipulations (apply filters, masks, crop and etc) and I pass the image to the same function it comes up messed up (image 2). I assume that that the CoreGrahics is changing something internally in the image. The question is what?
That's how I start processing the image:
public struct PixelData {
var a:UInt8 = 255
var r:UInt8
var g:UInt8
var b:UInt8
}
func findEdges(cgImage:CGImageRef)->UIImage{
var pixelData = CGDataProviderCopyData(CGImageGetDataProvider(cgImage))
//var data = CFDataGetMutableBytePtr
var mdata: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
var data = UnsafeMutablePointer<UInt8>(mdata)
let height = CGImageGetHeight(cgImage)
let width = CGImageGetWidth(cgImage)
var start = CACurrentMediaTime()
//create an empty buffer
let emptyPixel = PixelData(a: 0, r: 0, g: 0, b: 0)
let blackPixel = PixelData(a: 255, r: 255, g: 255, b: 255)
var buffer = [PixelData](count: Int(width * height), repeatedValue: emptyPixel)
var booleanArray = [Bool](count: Int(width * height), repeatedValue: false)
for var y = 0; y < height-1; y++ {
for var x = 0; x < width; x++ {
//Current one
var currentPixelInfo: Int = ((Int(width) * Int(y)) + Int(x)) * 4
var currentAlpha = CGFloat(data[currentPixelInfo+3]) / CGFloat(255.0)
var downPixelInfo: Int = ((Int(width) * Int(y+1)) + Int(x)) * 4
var downAlpha = CGFloat(data[downPixelInfo+3]) / CGFloat(255.0)
if y == 0 && currentAlpha != 0{ // Top Edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
if y > 0 && y < height - 2{
//one up
var topPixelInfo: Int = ((Int(width) * Int(y - 1)) + Int(x )) * 4
var topAlpha = CGFloat(data[topPixelInfo+3]) / CGFloat(255.0)
if downAlpha == 0 && currentAlpha != 0 {//edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
if topAlpha == 0 && currentAlpha != 0 {//edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
}
if y == height - 2 && downAlpha != 0 {
booleanArray[downPixelInfo/4] = true
buffer[downPixelInfo/4] = blackPixel
}
}
}
for var y = 0; y < height-1; y++ {
for var x = 0; x < width-1; x++ {
//Current one
var currentPixelInfo: Int = ((Int(width) * Int(y)) + Int(x)) * 4
var currentAlpha = CGFloat(data[currentPixelInfo+3]) / CGFloat(255.0)
//Next
var nextPixelInfo: Int = ((Int(width) * Int(y)) + Int(x + 1)) * 4
var nextAlpha = CGFloat(data[nextPixelInfo+3]) / CGFloat(255.0)
//check horizontally
if x == 0 && currentAlpha != 0{ // Edge case
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
if x > 0 && x < width - 2{
//One before
var previousPixelInfo: Int = ((Int(width) * Int(y)) + Int(x - 1)) * 4
var previousAlpha = CGFloat(data[previousPixelInfo+3]) / CGFloat(255.0)
if nextAlpha == 0 && currentAlpha != 0 {//Living on the edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
if previousAlpha == 0 && currentAlpha != 0 {//Living on the edge
booleanArray[currentPixelInfo/4] = true
buffer[currentPixelInfo/4] = blackPixel
}
}
if x == width - 2 && nextAlpha != 0 {
booleanArray[nextPixelInfo/4] = true
buffer[nextPixelInfo/4] = blackPixel
}
}
}
var stop = CACurrentMediaTime()
let image = imageFromARGB32Bitmap(buffer, width: width, height: height)
println(stop - start)
return image!;
//self.imageView.image = image
}
func imageFromARGB32Bitmap(pixels:[PixelData], width:Int, height:Int)->UIImage? {
let bitsPerComponent:Int = 8
let bitsPerPixel:Int = 32
assert(pixels.count == Int(width * height))
var data = pixels // Copy to mutable []
let providerRef = CGDataProviderCreateWithCFData(
NSData(bytes: &data, length: data.count * sizeof(PixelData))
)
// let redPixel = PixelData(a: 255, r: 192, g: 0, b: 0)
let cgim = CGImageCreate(
width,
height,
bitsPerComponent,
bitsPerPixel,
width * Int(sizeof(PixelData)),
rgbColorSpace,
bitmapInfo,
providerRef,
nil,
true,
kCGRenderingIntentDefault
)
return UIImage(CGImage: cgim)
}
[][3]
Although I didn't figure out what exactly UIImage is doing to raw pixel data, the function that I wrote fixes the problem. Key point here is using CGImageAlphaInfo.PremultipliedFirst value of bitmap info since my data structure is expecting ARGB format.
func imageFromBitmapContext(image:CGImageRef, width:Int, height:Int)->UIImage?
{
let colorSpace:CGColorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(CGImageAlphaInfo.PremultipliedFirst.rawValue)
let bytesPerRow = 4 * width
let context = CGBitmapContextCreate(nil, Int(width), Int(height), 8, Int(bytesPerRow), colorSpace, bitmapInfo)
CGContextDrawImage(context, CGRectMake(0, 0, CGFloat(width), CGFloat(height)), image)
let image = CGBitmapContextCreateImage(context)
return UIImage(CGImage: image)
}

Colour correction of Photoshop LUT filter using CIFilter

Using photoshop to create LUT filter and use iOS CIFilter to read the LUT image, filter image created by iOS is not corresponding to the filer image created by photoshop.
How can I trace the issue?
This is the original image
This is the image with filter I created from photoshop
This is the image with filter I created from iPhone
This is the LUT image I am using
please Try This its work for me
public class LUTsHelper {
public static func applyLUTsFilter(lutImage: String, dimension: Int, colorSpace: CGColorSpace) -> CIFilter? {
guard let image = UIImage(named: lutImage) else {
return nil
}
guard let cgImage = image.cgImage else {
return nil
}
guard let bitmap = createBitmap(image: cgImage, colorSpace: colorSpace) else {
return nil
}
let width = cgImage.width
let height = cgImage.height
let rowNum = width / dimension
let columnNum = height / dimension
let dataSize = dimension * dimension * dimension * MemoryLayout<Float>.size * 4
var array = Array<Float>(repeating: 0, count: dataSize)
var bitmapOffest: Int = 0
var z: Int = 0
for _ in stride(from: 0, to: rowNum, by: 1) {
for y in stride(from: 0, to: dimension, by: 1) {
let tmp = z
for _ in stride(from: 0, to: columnNum, by: 1) {
for x in stride(from: 0, to: dimension, by: 1) {
let dataOffset = (z * dimension * dimension + y * dimension + x) * 4
let position = bitmap
.advanced(by: bitmapOffest)
array[dataOffset + 0] = Float(position
.advanced(by: 0)
.pointee) / 255
array[dataOffset + 1] = Float(position
.advanced(by: 1)
.pointee) / 255
array[dataOffset + 2] = Float(position
.advanced(by: 2)
.pointee) / 255
array[dataOffset + 3] = Float(position
.advanced(by: 3)
.pointee) / 255
bitmapOffest += 4
}
z += 1
}
z = tmp
}
z += columnNum
}
free(bitmap)
let data = Data.init(bytes: array, count: dataSize)
guard
let cubeFilter = CIFilter(name: "CIColorCubeWithColorSpace")
else {
return nil
}
cubeFilter.setValue(dimension, forKey: "inputCubeDimension")
cubeFilter.setValue(data, forKey: "inputCubeData")
cubeFilter.setValue(colorSpace, forKey: "inputColorSpace")
return cubeFilter
}
private static func createBitmap(image: CGImage, colorSpace: CGColorSpace) -> UnsafeMutablePointer<UInt8>? {
let width = image.width
let height = image.height
let bitsPerComponent = 8
let bytesPerRow = width * 4
let bitmapSize = bytesPerRow * height
guard let data = malloc(bitmapSize) else {
return nil
}
guard let context = CGContext(
data: data,
width: width,
height: height,
bitsPerComponent: bitsPerComponent,
bytesPerRow: bytesPerRow,
space: colorSpace,
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue,
releaseCallback: nil,
releaseInfo: nil) else {
return nil
}
context.draw(image, in: CGRect(x: 0, y: 0, width: width, height: height))
return data.bindMemory(to: UInt8.self, capacity: bitmapSize)
}}
now us this class
let colorSpace: CGColorSpace = CGColorSpace.init(name: CGColorSpace.sRGB) ?? CGColorSpaceCreateDeviceRGB()
let lutFilter = LUTsHelper.applyLUTsFilter(lutImage: "demo.png", dimension: 64, colorSpace: colorSpace)
lutFilter?.setValue(outputImage, forKey: "inputImage")
let lutOutputImage = lutFilter?.outputImage
if let output = lutOutputImage {
outputImage = output
}

iOS Swift Flood fill algorithm

I created this extension for "bucket fill" (flood fill) of touch point:
extension UIImageView {
func bucketFill(startPoint: CGPoint, newColor: UIColor) {
var newRed, newGreen, newBlue, newAlpha: CUnsignedChar
let pixelsWide = CGImageGetWidth(self.image!.CGImage)
let pixelsHigh = CGImageGetHeight(self.image!.CGImage)
let rect = CGRect(x:0, y:0, width:Int(pixelsWide), height:Int(pixelsHigh))
let bitmapBytesPerRow = Int(pixelsWide) * 4
var context = self.image!.createARGBBitmapContext()
//Clear the context
CGContextClearRect(context, rect)
// Draw the image to the bitmap context. Once we draw, the memory
// allocated for the context for rendering will then contain the
// raw image data in the specified color space.
CGContextDrawImage(context, rect, self.image!.CGImage)
var data = CGBitmapContextGetData(context)
var dataType = UnsafeMutablePointer<UInt8>(data)
let newColorRef = CGColorGetComponents(newColor.CGColor)
if(CGColorGetNumberOfComponents(newColor.CGColor) == 2) {
newRed = CUnsignedChar(newColorRef[0] * 255) // CUnsignedChar
newGreen = CUnsignedChar(newColorRef[0] * 255)
newBlue = CUnsignedChar(newColorRef[0] * 255)
newAlpha = CUnsignedChar(newColorRef[1])
} else {
newRed = CUnsignedChar(newColorRef[0] * 255)
newGreen = CUnsignedChar(newColorRef[1] * 255)
newBlue = CUnsignedChar(newColorRef[2] * 255)
newAlpha = CUnsignedChar(newColorRef[3])
}
let newColorStr = ColorRGB(red: newRed, green: newGreen, blue: newBlue)
var stack = Stack()
let offset = 4*((Int(pixelsWide) * Int(startPoint.y)) + Int(startPoint.x))
//let alpha = dataType[offset]
let startRed: UInt8 = dataType[offset+1]
let startGreen: UInt8 = dataType[offset+2]
let startBlue: UInt8 = dataType[offset+3]
stack.push(startPoint)
while(!stack.isEmpty()) {
let point: CGPoint = stack.pop() as! CGPoint
let offset = 4*((Int(pixelsWide) * Int(point.y)) + Int(point.x))
let alpha = dataType[offset]
let red: UInt8 = dataType[offset+1]
let green: UInt8 = dataType[offset+2]
let blue: UInt8 = dataType[offset+3]
if (red == newRed && green == newGreen && blue == newBlue) {
continue
}
if (red.absoluteDifference(startRed) < 4 && green.absoluteDifference(startGreen) < 4 && blue.absoluteDifference(startBlue) < 4) {
dataType[offset] = 255
dataType[offset + 1] = newRed
dataType[offset + 2] = newGreen
dataType[offset + 3] = newBlue
if (point.x > 0) {
stack.push(CGPoint(x: point.x - 1, y: point.y))
}
if (point.x < CGFloat(pixelsWide)) {
stack.push(CGPoint(x: point.x + 1, y: point.y))
}
if (point.y > 0) {
stack.push(CGPoint(x: point.x, y: point.y - 1))
}
if (point.y < CGFloat(pixelsHigh)) {
stack.push(CGPoint(x: point.x, y: point.y + 1))
}
} else {
}
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.PremultipliedFirst.rawValue)
let finalContext = CGBitmapContextCreate(data, pixelsWide, pixelsHigh, CLong(8), CLong(bitmapBytesPerRow), colorSpace, bitmapInfo)
let imageRef = CGBitmapContextCreateImage(finalContext)
self.image = UIImage(CGImage: imageRef, scale: self.image!.scale,orientation: self.image!.imageOrientation)
}
}
Now I would like to improve performance. How can I make this algorithm work faster? UInt8.absoluteDifference extension is my attempt to include almost same colors to flood fill and it's working but this could be really improve and I know it but I don't know how.
extension UInt8 {
func absoluteDifference(subtrahend: UInt8) -> UInt8 {
if (self > subtrahend) {
return self - subtrahend;
} else {
return subtrahend - self;
}
}
}
My Stack class:
class Stack {
var count: Int = 0
var head: Node = Node()
init() {
}
func isEmpty() -> Bool {
return self.count == 0
}
func push(value: Any) {
if isEmpty() {
self.head = Node()
}
var node = Node(value: value)
node.next = self.head
self.head = node
self.count++
}
func pop() -> Any? {
if isEmpty() {
return nil
}
var node = self.head
self.head = node.next!
self.count--
return node.value
}
}
Thanks for help

Resources