Swift converting between UInt and Int - ios

I have the following methods
var photos = [MWPhoto] = [MWPhoto]()
func numberOfPhotosInPhotoBrowser(photoBrowser: MWPhotoBrowser!) -> UInt {
return self.photos.count
}
func photoBrowser(photoBrowser: MWPhotoBrowser!, photoAtIndex index: UInt) -> MWPhotoProtocol! {
return self.photos[index]
}
However for the first I get Int is not convertible to UInt (since self.photos.count is an Int
and for the second UInt is not convertible to Int - since the self.photos[ can only take an Int for its index.
How can I correctly convert the UInt to Int and back?

In the first one, the return type is UInt, but you return Int since count returns Int.
Basically UInt has initializer which take variants of value types arguments such as Int, CGFloat, Double or event string and return a new value type.
UInt(8) // result is 8 UInt value type
UInt(20.12) // result is 20 UInt value type
UInt(Double(10)) // result is 10 UInt value type
UInt("10") // result is 10 UInt value type, note this is failable initializer, can be a value or nil
-
func numberOfPhotosInPhotoBrowser(photoBrowser: MWPhotoBrowser!) -> UInt {
return UInt(self.photos.count)
}
For the second one, the array subscript expects Int value where you are passing UInt, so create a new Int value type from UInt,
func photoBrowser(photoBrowser: MWPhotoBrowser!, photoAtIndex index: UInt) -> MWPhotoProtocol! {
return self.photos[Int(index)]
}

// initializing Int
var someInt: Int = 8
someInt
// Converting Int to UInt
var someIntToUInt: UInt = UInt(someInt)
someIntToUInt
// initializing UInt
var someUInt: UInt = 10
someUInt
// Converting UInt to Int
var someUIntToInt: Int = Int(someUInt)
someUIntToInt

If you want unsigned int from negative value use UInt(bitPattern:)
let intVal = -1
let uintVal = UInt(bitPattern: intVal) // uintVal == 0xffffffffffffffff

I got so frustrated with Swift's cryptic method parameters bitPattern: and truncatingBitPattern: and my inability to remember which one to use when, that I created the following class containing a large number of conversion methods.
I'm not necessarily recommending that you include this in your program. I'm sure many people will say that Swift is trying to protect us from ourselves and that sabotaging that effort is dumb. So maybe you should just keep this file somewhere as a kind of cheat sheet so you can quickly determine how to do a conversion, and copy the parameters into your program when needed.
Incidentally, JDI stands for "just do it".
/// Class containing a large number of static methods to convert an Int to a UInt or vice-versa, and
/// also to perform conversions between different bit sizes, for example UInt32 to UInt8.
///
/// Many of these "conversions" are trivial, and are only included for the sake of completeness.
///
/// A few of the conversions involving Int and UInt can give different results when run on 32-bit
/// and 64-bit systems. All of the conversion where the bit size of both the source and the target
/// are specified will always give the same result independent of platform.
public class JDI {
// MARK: - To signed Int
// To Int8
public static func ToInt8(_ x : Int8) -> Int8 {
return x
}
public static func ToInt8(_ x : Int32) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : Int64) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : Int) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt8) -> Int8 {
return Int8(bitPattern: x)
}
public static func ToInt8(_ x : UInt32) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt64) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt) -> Int8 {
return Int8(truncatingBitPattern: x)
}
// To Int32
public static func ToInt32(_ x : Int8) -> Int32 {
return Int32(x)
}
public static func ToInt32(_ x : Int32) -> Int32 {
return x
}
public static func ToInt32(_ x : Int64) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : Int) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : UInt8) -> Int32 {
return Int32(x)
}
public static func ToInt32(_ x : UInt32) -> Int32 {
return Int32(bitPattern: x)
}
public static func ToInt32(_ x : UInt64) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : UInt) -> Int32 {
return Int32(truncatingBitPattern: x)
}
// To Int64
public static func ToInt64(_ x : Int8) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : Int32) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : Int64) -> Int64 {
return x
}
public static func ToInt64(_ x : Int) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt8) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt32) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt64) -> Int64 {
return Int64(bitPattern: x)
}
public static func ToInt64(_ x : UInt) -> Int64 {
return Int64(bitPattern: UInt64(x)) // Does not extend high bit of 32-bit input
}
// To Int
public static func ToInt(_ x : Int8) -> Int {
return Int(x)
}
public static func ToInt(_ x : Int32) -> Int {
return Int(x)
}
public static func ToInt(_ x : Int64) -> Int {
return Int(truncatingBitPattern: x)
}
public static func ToInt(_ x : Int) -> Int {
return x
}
public static func ToInt(_ x : UInt8) -> Int {
return Int(x)
}
public static func ToInt(_ x : UInt32) -> Int {
if MemoryLayout<Int>.size == MemoryLayout<Int32>.size {
return Int(Int32(bitPattern: x)) // For 32-bit systems, non-authorized interpretation
}
return Int(x)
}
public static func ToInt(_ x : UInt64) -> Int {
return Int(truncatingBitPattern: x)
}
public static func ToInt(_ x : UInt) -> Int {
return Int(bitPattern: x)
}
// MARK: - To unsigned Int
// To UInt8
public static func ToUInt8(_ x : Int8) -> UInt8 {
return UInt8(bitPattern: x)
}
public static func ToUInt8(_ x : Int32) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : Int64) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : Int) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt8) -> UInt8 {
return x
}
public static func ToUInt8(_ x : UInt32) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt64) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
// To UInt32
public static func ToUInt32(_ x : Int8) -> UInt32 {
return UInt32(bitPattern: Int32(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt32(_ x : Int32) -> UInt32 {
return UInt32(bitPattern: x)
}
public static func ToUInt32(_ x : Int64) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : Int) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : UInt8) -> UInt32 {
return UInt32(x)
}
public static func ToUInt32(_ x : UInt32) -> UInt32 {
return x
}
public static func ToUInt32(_ x : UInt64) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : UInt) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
// To UInt64
public static func ToUInt64(_ x : Int8) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt64(_ x : Int32) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt64(_ x : Int64) -> UInt64 {
return UInt64(bitPattern: x)
}
public static func ToUInt64(_ x : Int) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit if necessary, assume minus input significant
}
public static func ToUInt64(_ x : UInt8) -> UInt64 {
return UInt64(x)
}
public static func ToUInt64(_ x : UInt32) -> UInt64 {
return UInt64(x)
}
public static func ToUInt64(_ x : UInt64) -> UInt64 {
return x
}
public static func ToUInt64(_ x : UInt) -> UInt64 {
return UInt64(x) // Does not extend high bit of 32-bit input
}
// To UInt
public static func ToUInt(_ x : Int8) -> UInt {
return UInt(bitPattern: Int(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt(_ x : Int32) -> UInt {
return UInt(truncatingBitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt(_ x : Int64) -> UInt {
return UInt(truncatingBitPattern: x)
}
public static func ToUInt(_ x : Int) -> UInt {
return UInt(bitPattern: x)
}
public static func ToUInt(_ x : UInt8) -> UInt {
return UInt(x)
}
public static func ToUInt(_ x : UInt32) -> UInt {
return UInt(x)
}
public static func ToUInt(_ x : UInt64) -> UInt {
return UInt(truncatingBitPattern: x)
}
public static func ToUInt(_ x : UInt) -> UInt {
return x
}
}
Here's some test code:
public func doTest() {
// To Int8
assert(JDI.ToInt8(42 as Int8) == 42)
assert(JDI.ToInt8(-13 as Int8) == -13)
assert(JDI.ToInt8(42 as Int32) == 42)
assert(JDI.ToInt8(257 as Int32) == 1)
assert(JDI.ToInt8(42 as Int64) == 42)
assert(JDI.ToInt8(257 as Int64) == 1)
assert(JDI.ToInt8(42 as Int) == 42)
assert(JDI.ToInt8(257 as Int) == 1)
assert(JDI.ToInt8(42 as UInt8) == 42)
assert(JDI.ToInt8(0xf3 as UInt8) == -13)
assert(JDI.ToInt8(42 as UInt32) == 42)
assert(JDI.ToInt8(0xfffffff3 as UInt32) == -13)
assert(JDI.ToInt8(42 as UInt64) == 42)
assert(JDI.ToInt8(UInt64.max - 12) == -13)
assert(JDI.ToInt8(42 as UInt) == 42)
assert(JDI.ToInt8(UInt.max - 12) == -13)
// To Int32
assert(JDI.ToInt32(42 as Int8) == 42)
assert(JDI.ToInt32(-13 as Int8) == -13)
assert(JDI.ToInt32(42 as Int32) == 42)
assert(JDI.ToInt32(-13 as Int32) == -13)
assert(JDI.ToInt32(42 as Int64) == 42)
assert(JDI.ToInt32(Int64(Int32.min) - 1) == Int32.max)
assert(JDI.ToInt32(42 as Int) == 42)
assert(JDI.ToInt32(-13 as Int) == -13)
assert(JDI.ToInt32(42 as UInt8) == 42)
assert(JDI.ToInt32(0xf3 as UInt8) == 243)
assert(JDI.ToInt32(42 as UInt32) == 42)
assert(JDI.ToInt32(0xfffffff3 as UInt32) == -13)
assert(JDI.ToInt32(42 as UInt64) == 42)
assert(JDI.ToInt32(UInt64.max - 12) == -13)
assert(JDI.ToInt32(42 as UInt) == 42)
assert(JDI.ToInt32(UInt.max - 12) == -13)
// To Int64
assert(JDI.ToInt64(42 as Int8) == 42)
assert(JDI.ToInt64(-13 as Int8) == -13)
assert(JDI.ToInt64(42 as Int32) == 42)
assert(JDI.ToInt64(-13 as Int32) == -13)
assert(JDI.ToInt64(42 as Int64) == 42)
assert(JDI.ToInt64(-13 as Int64) == -13)
assert(JDI.ToInt64(42 as Int) == 42)
assert(JDI.ToInt64(-13 as Int) == -13)
assert(JDI.ToInt64(42 as UInt8) == 42)
assert(JDI.ToInt64(0xf3 as UInt8) == 243)
assert(JDI.ToInt64(42 as UInt32) == 42)
assert(JDI.ToInt64(0xfffffff3 as UInt32) == 4294967283)
assert(JDI.ToInt64(42 as UInt64) == 42)
assert(JDI.ToInt64(UInt64.max - 12) == -13)
assert(JDI.ToInt64(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToInt64(UInt.max - 12) == 4294967283) // For 32-bit systems
#else
assert(JDI.ToInt64(UInt.max - 12) == -13) // For 64-bit systems
#endif
// To Int
assert(JDI.ToInt(42 as Int8) == 42)
assert(JDI.ToInt(-13 as Int8) == -13)
assert(JDI.ToInt(42 as Int32) == 42)
assert(JDI.ToInt(-13 as Int32) == -13)
assert(JDI.ToInt(42 as Int64) == 42)
assert(JDI.ToInt(-13 as Int64) == -13)
assert(JDI.ToInt(42 as Int) == 42)
assert(JDI.ToInt(-13 as Int) == -13)
assert(JDI.ToInt(42 as UInt8) == 42)
assert(JDI.ToInt(0xf3 as UInt8) == 243)
assert(JDI.ToInt(42 as UInt32) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToInt(0xfffffff3 as UInt32) == -13) // For 32-bit systems
#else
assert(JDI.ToInt(0xfffffff3 as UInt32) == 4294967283) // For 64-bit systems
#endif
assert(JDI.ToInt(42 as UInt64) == 42)
assert(JDI.ToInt(UInt64.max - 12) == -13)
assert(JDI.ToInt(42 as UInt) == 42)
assert(JDI.ToInt(UInt.max - 12) == -13)
// To UInt8
assert(JDI.ToUInt8(42 as Int8) == 42)
assert(JDI.ToUInt8(-13 as Int8) == 0xf3)
assert(JDI.ToUInt8(42 as Int32) == 42)
assert(JDI.ToUInt8(-13 as Int32) == 0xf3)
assert(JDI.ToUInt8(42 as Int64) == 42)
assert(JDI.ToUInt8(-13 as Int64) == 0xf3)
assert(JDI.ToUInt8(Int64.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as Int) == 42)
assert(JDI.ToUInt8(-13 as Int) == 0xf3)
assert(JDI.ToUInt8(Int.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as UInt8) == 42)
assert(JDI.ToUInt8(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt8(42 as UInt32) == 42)
assert(JDI.ToUInt8(0xfffffff3 as UInt32) == 0xf3)
assert(JDI.ToUInt8(42 as UInt64) == 42)
assert(JDI.ToUInt8(UInt64.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as UInt) == 42)
assert(JDI.ToUInt8(UInt.max - 12) == 0xf3)
// To UInt32
assert(JDI.ToUInt32(42 as Int8) == 42)
assert(JDI.ToUInt32(-13 as Int8) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int32) == 42)
assert(JDI.ToUInt32(-13 as Int32) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int64) == 42)
assert(JDI.ToUInt32(-13 as Int64) == 0xfffffff3)
assert(JDI.ToUInt32(Int64.max - 12) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int) == 42)
assert(JDI.ToUInt32(-13 as Int) == 0xfffffff3)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt32(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt32(Int.max - 12) == 0xfffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt32(42 as UInt8) == 42)
assert(JDI.ToUInt32(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt32(42 as UInt32) == 42)
assert(JDI.ToUInt32(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt32(42 as UInt64) == 42)
assert(JDI.ToUInt32(UInt64.max - 12) == 0xfffffff3)
assert(JDI.ToUInt32(42 as UInt) == 42)
assert(JDI.ToUInt32(UInt.max - 12) == 0xfffffff3)
// To UInt64
assert(JDI.ToUInt64(42 as Int8) == 42)
assert(JDI.ToUInt64(-13 as Int8) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as Int32) == 42)
assert(JDI.ToUInt64(-13 as Int32) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as Int64) == 42)
assert(JDI.ToUInt64(-13 as Int64) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(Int64.max - 12) == (UInt64.max >> 1) - 12)
assert(JDI.ToUInt64(42 as Int) == 42)
assert(JDI.ToUInt64(-13 as Int) == 0xfffffffffffffff3)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt64(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt64(Int.max - 12) == 0x7ffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt64(42 as UInt8) == 42)
assert(JDI.ToUInt64(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt64(42 as UInt32) == 42)
assert(JDI.ToUInt64(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt64(42 as UInt64) == 42)
assert(JDI.ToUInt64(UInt64.max - 12) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt64(UInt.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt64(UInt.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
// To UInt
assert(JDI.ToUInt(42 as Int8) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int8) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(-13 as Int8) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as Int32) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int32) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(-13 as Int32) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as Int64) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int64) == 0xfffffff3) // For 32-bit systems
assert(JDI.ToUInt(Int64.max - 12) == 0xfffffff3)
#else
assert(JDI.ToUInt(-13 as Int64) == 0xfffffffffffffff3) // For 64-bit systems
assert(JDI.ToUInt(Int64.max - 12) == 0x7ffffffffffffff3)
#endif
assert(JDI.ToUInt(42 as Int) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(Int.max - 12) == 0x7ffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as UInt8) == 42)
assert(JDI.ToUInt(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt(42 as UInt32) == 42)
assert(JDI.ToUInt(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt(42 as UInt64) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(UInt64.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(UInt64.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(UInt.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(UInt.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
print("\nTesting JDI complete.\n")
}

Add this anywhere outside of a class:
extension UInt {
/// SwiftExtensionKit
var toInt: Int { return Int(self) }
}
Then just call:
self.photos[index].toInt

Related

How to extend arithmetic operators to support optional values as well - Swift

In the below example,
let value1: Int? = 23
let value2: Int = 20
let answer = value1 + value2 // Compiler warning that + operator cannot be applied to Int? and Int
So I would have to change the code to
if let value1 = value1 {
let answer = value1 + value2
}
How to create an extension for + that supports Optional values as well? In that case it should give nil as output.
What if the operation has multiple operands?
let value1: Int? = 2
let answer = value1 + 3.0
You just have to find the right protocol type to constrain the generic types, really. After that the implementation is trivial:
// plus and minus is supported by AdditiveArithmetic
func +<T: AdditiveArithmetic>(lhs: T?, rhs: T?) -> T? {
return lhs.flatMap { x in rhs.map { y in x + y } }
/* the above is just a more "functional" way of writing
if let x = lhs, let y = rhs {
return x + y
} else {
return nil
}
*/
}
func -<T: AdditiveArithmetic>(lhs: T?, rhs: T?) -> T? {
return lhs.flatMap { x in rhs.map { y in x - y } }
}
// times is supported by Numeric
func *<T: Numeric>(lhs: T?, rhs: T?) -> T? {
return lhs.flatMap { x in rhs.map { y in x * y } }
}
// divide is not supported by a single protocol AFAIK
func /<T: BinaryInteger>(lhs: T?, rhs: T?) -> T? {
return lhs.flatMap { x in rhs.map { y in x / y } }
}
func /<T: FloatingPoint>(lhs: T?, rhs: T?) -> T? {
return lhs.flatMap { x in rhs.map { y in x / y } }
}
To make value1 + 3.0 work, you'd have to do something like this:
func +<T: BinaryInteger, U: FloatingPoint>(lhs: T?, rhs: U?) -> U? {
return lhs.flatMap { x in rhs.map { y in U(x) + y } }
}
But it's usually not a good idea to go against the restrictions put in place. I don't recommend this.
Only pasting solution for addition, but other operators will work analogously (but mind subtract and division, since they are not commutative)
Three reasonable solutions
Global function(s)
Extend existential* (conforming to AdditiveArithmetic) to some new protocol e.g. AdditiveArithmeticOptional
Extend Optional
* Note: you can read about existentials here, e.g. protocol isn't an existential, but a concrete type, e.g. a struct is.
1 Global function(s)
See #Sweepers answer
Note: a global function is a function not implemented on a type (protocol or existential). Swift's zip function is an example
2 Extend existentials to new protocol
public protocol AdditiveArithmeticOptional: AdditiveArithmetic {
static func + (lhs: Self, rhs: Self?) -> Self
}
public extension AdditiveArithmeticOptional {
static func + (lhs: Self, rhs: Self?) -> Self {
guard let value = rhs else { return lhs }
return value + lhs
}
static func + (lhs: Self?, rhs: Self) -> Self {
rhs + lhs
}
}
extension Int8: AdditiveArithmeticOptional {}
extension Int16: AdditiveArithmeticOptional {}
extension Int32: AdditiveArithmeticOptional {}
extension Int64: AdditiveArithmeticOptional {}
extension Int: AdditiveArithmeticOptional {} // same as `Int64` on 64 bit system, same as `Int32` on 32 bit system
extension UInt8: AdditiveArithmeticOptional {}
extension UInt16: AdditiveArithmeticOptional {}
extension UInt32: AdditiveArithmeticOptional {}
extension UInt64: AdditiveArithmeticOptional {}
extension UInt: AdditiveArithmeticOptional {} // same as `UInt64` on 64 bit system, same as `UInt32` on 32 bit system
3 extend Optional
extension Optional where Wrapped: AdditiveArithmetic {
static func + <I>(optional: Self, increment: I) -> I where I: AdditiveArithmetic & ExpressibleByIntegerLiteral, I.IntegerLiteralType == Wrapped {
guard let value = optional else { return increment }
let base = I.init(integerLiteral: value)
return base + increment
}
static func + <I>(increment: I, optional: Self) -> I where I: AdditiveArithmetic & ExpressibleByIntegerLiteral, I.IntegerLiteralType == Wrapped {
optional + increment
}
}
You could create your own custom operator function if you have multiple scenarios where you require arithmetic operations between optionals as follows:
func + (lhs: Int?, rhs: Int?) -> Int {
(lhs ?? 0) + (rhs ?? 0)
}
func + (lhs: Int?, rhs: Int) -> Int {
(lhs ?? 0) + rhs
}
func + (lhs: Int, rhs: Int?) -> Int {
lhs + (rhs ?? 0)
}
Note: Add return keyword if you are using Swift 5 or below.
Update: Upon further investigation and inspiration from the answer of #sweeper following solution seemed more elegant.
func + <T: AdditiveArithmetic>(lhs: T?, rhs: T?) -> T {
(lhs ?? .zero) + (rhs ?? .zero)
}
or if you need a nil when operation was not successful
func + <T: AdditiveArithmetic>(lhs: T?, rhs: T?) -> T? {
lhs.flatMap { lhs in rhs.flatMap { lhs + $0 }}
}
Optionals don't make much sense to be added, a nil value usually represents something went wrong along the way and a value could not be retrieved. It's better to be explicit, and to write the if-let statements. This way you can handle the cases where you end up with nil values.
But if you want to ignore the nils, then you can use the nil coalescing operator:
let value1: Int? = 23
let value2: Int = 20
let answer = (value1 ?? 0) + value2
You are still explicit about the unwrap, but you also have a fallback route in case you want to ignore the nil value. And, maybe even more important, the code transmits its scope in a clear manner. If someone later on stumbles upon your code it will be clear to them what the code does and how it recovers from unexpected situations (nil is an unexpected value in regards to the addition)

Java ByteBuffer equivalent in Swift 4

I'm trying to convert my underneath Java (Android) code to Swift.
I'm stuck at the part of the ByteBuffer.
private MqttMessage createMqttMessage(boolean encrypted, byte[] message) {
/* add a zero-byte at the end just to be sure it's terminated although the payload is zero-padded */
int padding = 16 - ((message.length) % 16);
/* a zero-byte at the end of the JSON is required for determining the string's end */
padding = padding == 0 ? 16 : padding;
byte[] payload = new byte[message.length+padding];
System.arraycopy(message, 0, payload, 0, message.length);
/* FRAME_HEADER + JSON-payload + zero-byte + FRAME_TAIL_SIZE */
int bufferSize = Constants.FRAME_HEADER_SIZE + payload.length + 1 + Constants.FRAME_TAIL_SIZE;
ByteBuffer frame = ByteBuffer.allocate(bufferSize);
/* The CC3220 is an ARM device, thus little-endian */
frame.order(ByteOrder.LITTLE_ENDIAN);
// message flags (uint8_t)
byte flags = (byte) (encrypted ? Constants.FRAME_FLAG_ENCRYPTED : 0x0);
// always use authentication
flags |= Constants.FRAME_FLAG_AUTHENTICATED;
frame.put(flags);
// User token (uint32_t)
frame.putInt((int)this.mUserToken);
What's the equivalent of a ByteBuffer in Swift?
This is what I have so far:
private func createMqttMessage(encrypted: Bool, message: [Int8]) {
var padding:Int = 16 - ((message.count) % 16)
padding = padding == 0 ? 16 : padding
var payload:[Int8]
payload = Array(message[0..<0+message.count])
let bufferSize = Constants.FRAME_HEADER_SIZE + payload.count + 1 + Constants.FRAME_TAIL_SIZE
You can use this for ByteBuffer.. it behaves like the Java one.. Some functions might be missing but it should be easy to add them.
import Foundation
public class ByteBuffer {
public init(size: Int) {
array.reserveCapacity(size)
}
public func allocate(_ size: Int) {
array = [UInt8]()
array.reserveCapacity(size)
currentIndex = 0
}
public func nativeByteOrder() -> Endianness {
return hostEndianness
}
public func currentByteOrder() -> Endianness {
return currentEndianness
}
public func order(_ endianness: Endianness) -> ByteBuffer {
currentEndianness = endianness
return self
}
public func put(_ value: UInt8) -> ByteBuffer {
array.append(value)
return self
}
public func put(_ value: Int32) -> ByteBuffer {
if currentEndianness == .little {
array.append(contentsOf: to(value.littleEndian))
return self
}
array.append(contentsOf: to(value.bigEndian))
return self
}
public func put(_ value: Int64) -> ByteBuffer {
if currentEndianness == .little {
array.append(contentsOf: to(value.littleEndian))
return self
}
array.append(contentsOf: to(value.bigEndian))
return self
}
public func put(_ value: Int) -> ByteBuffer {
if currentEndianness == .little {
array.append(contentsOf: to(value.littleEndian))
return self
}
array.append(contentsOf: to(value.bigEndian))
return self
}
public func put(_ value: Float) -> ByteBuffer {
if currentEndianness == .little {
array.append(contentsOf: to(value.bitPattern.littleEndian))
return self
}
array.append(contentsOf: to(value.bitPattern.bigEndian))
return self
}
public func put(_ value: Double) -> ByteBuffer {
if currentEndianness == .little {
array.append(contentsOf: to(value.bitPattern.littleEndian))
return self
}
array.append(contentsOf: to(value.bitPattern.bigEndian))
return self
}
public func get() -> UInt8 {
let result = array[currentIndex]
currentIndex += 1
return result
}
public func get(_ index: Int) -> UInt8 {
return array[index]
}
public func getInt32() -> Int32 {
let result = from(Array(array[currentIndex..<currentIndex + MemoryLayout<Int32>.size]), Int32.self)
currentIndex += MemoryLayout<Int32>.size
return currentEndianness == .little ? result.littleEndian : result.bigEndian
}
public func getInt32(_ index: Int) -> Int32 {
let result = from(Array(array[index..<index + MemoryLayout<Int32>.size]), Int32.self)
return currentEndianness == .little ? result.littleEndian : result.bigEndian
}
public func getInt64() -> Int64 {
let result = from(Array(array[currentIndex..<currentIndex + MemoryLayout<Int64>.size]), Int64.self)
currentIndex += MemoryLayout<Int64>.size
return currentEndianness == .little ? result.littleEndian : result.bigEndian
}
public func getInt64(_ index: Int) -> Int64 {
let result = from(Array(array[index..<index + MemoryLayout<Int64>.size]), Int64.self)
return currentEndianness == .little ? result.littleEndian : result.bigEndian
}
public func getInt() -> Int {
let result = from(Array(array[currentIndex..<currentIndex + MemoryLayout<Int>.size]), Int.self)
currentIndex += MemoryLayout<Int>.size
return currentEndianness == .little ? result.littleEndian : result.bigEndian
}
public func getInt(_ index: Int) -> Int {
let result = from(Array(array[index..<index + MemoryLayout<Int>.size]), Int.self)
return currentEndianness == .little ? result.littleEndian : result.bigEndian
}
public func getFloat() -> Float {
let result = from(Array(array[currentIndex..<currentIndex + MemoryLayout<UInt32>.size]), UInt32.self)
currentIndex += MemoryLayout<UInt32>.size
return currentEndianness == .little ? Float(bitPattern: result.littleEndian) : Float(bitPattern: result.bigEndian)
}
public func getFloat(_ index: Int) -> Float {
let result = from(Array(array[index..<index + MemoryLayout<UInt32>.size]), UInt32.self)
return currentEndianness == .little ? Float(bitPattern: result.littleEndian) : Float(bitPattern: result.bigEndian)
}
public func getDouble() -> Double {
let result = from(Array(array[currentIndex..<currentIndex + MemoryLayout<UInt64>.size]), UInt64.self)
currentIndex += MemoryLayout<UInt64>.size
return currentEndianness == .little ? Double(bitPattern: result.littleEndian) : Double(bitPattern: result.bigEndian)
}
public func getDouble(_ index: Int) -> Double {
let result = from(Array(array[index..<index + MemoryLayout<UInt64>.size]), UInt64.self)
return currentEndianness == .little ? Double(bitPattern: result.littleEndian) : Double(bitPattern: result.bigEndian)
}
public enum Endianness {
case little
case big
}
private func to<T>(_ value: T) -> [UInt8] {
var value = value
return withUnsafeBytes(of: &value, Array.init)
}
private func from<T>(_ value: [UInt8], _: T.Type) -> T {
return value.withUnsafeBytes {
$0.load(fromByteOffset: 0, as: T.self)
}
}
private var array = [UInt8]()
private var currentIndex: Int = 0
private var currentEndianness: Endianness = .big
private let hostEndianness: Endianness = OSHostByteOrder() == OSLittleEndian ? .little : .big
}
let buffer = ByteBuffer(size: 100)
buffer.order(.big)
buffer.put(1.019001)
buffer.order(.little)
buffer.put(1005)
buffer.order(.big)
buffer.put(1005)
buffer.order(.big)
print(buffer.getDouble())
buffer.order(.little)
print(buffer.getInt())
buffer.order(.big)
print(buffer.getInt())
You can use Data class as a buffer
var frame = Data(capacity: bufferSize)
but you don't actually need to specify the capacity, and you can wait with initialization of frame until you actually have any data to initialize it with - depending on what type mUserToken will be, you can use appropriate Data initializer
init(bytes: UnsafeRawPointer, count: Int)
init(bytes: <Array<UInt8>>)

Cannot convert value of type 'Int' to expected argument type 'UnsafeMutablePointer<Int32>!'

From using this Answer I'm getting this error in swift 4.1, i.e Cannot convert value of type 'Int' to expected argument type 'UnsafeMutablePointer<Int32>!'
var notify_token: Int
notify_register_dispatch("com.apple.springboard.lockstate", notify_token, DispatchQueue.main, { (_ token: Int) -> Void in
var state: UInt64 = UINT64_MAX
notify_get_state(token, state)
if state == 0 {
print("unlock device")
}
else {
print("lock device")
}
How to resolve this?
Try something like this :
var notify_token: Int32
notify_register_dispatch("com.apple.springboard.lockstate", &notify_token, DispatchQueue.main, { (_ token: Int) -> Void in
var state: UInt64 = UINT64_MAX
notify_get_state(token, state)
if state == 0 {
print("unlock device")
}
else {
print("lock device")
}
}

UIContentSizeCategory does not conform to Comparable?

It says in the type signature in UIKit that UIContentSizeCategory conforms to the Comparable protocol.
The type signature is:
public struct UIContentSizeCategory : RawRepresentable, Equatable, Hashable, Comparable {
public init(rawValue: String)
}
So how come I get this nasty stack trace when I try to compare them?
po UIContentSizeCategory.small < UIContentSizeCategory.medium
error: warning: <EXPR>:12:9: warning: initialization of variable '$__lldb_error_result' was never used; consider replacing with assignment to '_' or removing it
var $__lldb_error_result = __lldb_tmp_error
~~~~^~~~~~~~~~~~~~~~~~~~
_
error: type 'UIContentSizeCategory' does not conform to protocol 'Comparable'
Swift.Comparable:144:24: note: multiple matching functions named '<=' with type '(UIContentSizeCategory, UIContentSizeCategory) -> Bool'
public static func <=(lhs: Self, rhs: Self) -> Bool
^
Swift.<=:10:13: note: candidate exactly matches
public func <=<T>(lhs: T, rhs: T) -> Bool where T : Comparable
^
Swift.<=:1:13: note: candidate exactly matches
public func <=<T>(lhs: T, rhs: T) -> Bool where T : _SwiftNewtypeWrapper, T.RawValue : Comparable
^
Swift.Comparable:151:24: note: multiple matching functions named '>=' with type '(UIContentSizeCategory, UIContentSizeCategory) -> Bool'
public static func >=(lhs: Self, rhs: Self) -> Bool
^
Swift.>=:12:13: note: candidate exactly matches
public func >=<T>(lhs: T, rhs: T) -> Bool where T : Comparable
^
Swift.>=:1:13: note: candidate exactly matches
public func >=<T>(lhs: T, rhs: T) -> Bool where T : _SwiftNewtypeWrapper, T.RawValue : Comparable
^
Swift.Comparable:158:24: note: multiple matching functions named '>' with type '(UIContentSizeCategory, UIContentSizeCategory) -> Bool'
public static func >(lhs: Self, rhs: Self) -> Bool
^
Swift.>:10:13: note: candidate exactly matches
public func ><T>(lhs: T, rhs: T) -> Bool where T : Comparable
^
Swift.>:1:13: note: candidate exactly matches
public func ><T>(lhs: T, rhs: T) -> Bool where T : _SwiftNewtypeWrapper, T.RawValue : Comparable
^
When I try to write my own extension to make UIContentSizeCategory conform to Comparable I get an error that it already conforms.
The goal here is to be able to check if a size is below a certain threshold and clip some text if it is. How do I fix this?
So, although the docs and signature claim conformance, it does not conform.
First time around I tried this:
extension UIContentSizeCategory: Comparable {
static func <(lhs: UIContentSizeCategory, rhs: UIContentSizeCategory) -> Bool {
return true
}
static func >(lhs: UIContentSizeCategory, rhs: UIContentSizeCategory) -> Bool {
return true
}
}
it threw an error because of the redundant conformance to Comparable
When I tried it without:
extension UIContentSizeCategory {
static func <(lhs: UIContentSizeCategory, rhs: UIContentSizeCategory) -> Bool {
return true
}
static func >(lhs: UIContentSizeCategory, rhs: UIContentSizeCategory) -> Bool {
return true
}
}
Magic! Works!
Full code (just in case you were wondering):
extension UIContentSizeCategory {
static var orderedSizes: [UIContentSizeCategory] {
return [.extraSmall,
.small,
.accessibilityMedium,
.medium,
.accessibilityLarge,
.large,
.accessibilityExtraLarge,
.extraLarge,
.accessibilityExtraExtraLarge,
.extraExtraLarge,
.extraExtraExtraLarge,
.accessibilityExtraExtraExtraLarge
]
}
static func < (lhs: UIContentSizeCategory, rhs: UIContentSizeCategory) -> Bool {
var sizes = orderedSizes
while sizes.contains(lhs) {
sizes.removeFirst()
}
return sizes.contains(rhs)
}
static func > (lhs: UIContentSizeCategory, rhs: UIContentSizeCategory) -> Bool {
var sizes = orderedSizes
while sizes.contains(lhs) {
sizes.removeLast()
}
return sizes.contains(rhs)
}
static func <= (lhs: UIContentSizeCategory, rhs: UIContentSizeCategory) -> Bool {
guard lhs != rhs else { return true }
return lhs < rhs
}
static func >= (lhs: UIContentSizeCategory, rhs: UIContentSizeCategory) -> Bool {
guard lhs != rhs else { return true }
return lhs > rhs
}
}

Issues converting obj c methods to swift [duplicate]

I have the following methods
var photos = [MWPhoto] = [MWPhoto]()
func numberOfPhotosInPhotoBrowser(photoBrowser: MWPhotoBrowser!) -> UInt {
return self.photos.count
}
func photoBrowser(photoBrowser: MWPhotoBrowser!, photoAtIndex index: UInt) -> MWPhotoProtocol! {
return self.photos[index]
}
However for the first I get Int is not convertible to UInt (since self.photos.count is an Int
and for the second UInt is not convertible to Int - since the self.photos[ can only take an Int for its index.
How can I correctly convert the UInt to Int and back?
In the first one, the return type is UInt, but you return Int since count returns Int.
Basically UInt has initializer which take variants of value types arguments such as Int, CGFloat, Double or event string and return a new value type.
UInt(8) // result is 8 UInt value type
UInt(20.12) // result is 20 UInt value type
UInt(Double(10)) // result is 10 UInt value type
UInt("10") // result is 10 UInt value type, note this is failable initializer, can be a value or nil
-
func numberOfPhotosInPhotoBrowser(photoBrowser: MWPhotoBrowser!) -> UInt {
return UInt(self.photos.count)
}
For the second one, the array subscript expects Int value where you are passing UInt, so create a new Int value type from UInt,
func photoBrowser(photoBrowser: MWPhotoBrowser!, photoAtIndex index: UInt) -> MWPhotoProtocol! {
return self.photos[Int(index)]
}
// initializing Int
var someInt: Int = 8
someInt
// Converting Int to UInt
var someIntToUInt: UInt = UInt(someInt)
someIntToUInt
// initializing UInt
var someUInt: UInt = 10
someUInt
// Converting UInt to Int
var someUIntToInt: Int = Int(someUInt)
someUIntToInt
If you want unsigned int from negative value use UInt(bitPattern:)
let intVal = -1
let uintVal = UInt(bitPattern: intVal) // uintVal == 0xffffffffffffffff
I got so frustrated with Swift's cryptic method parameters bitPattern: and truncatingBitPattern: and my inability to remember which one to use when, that I created the following class containing a large number of conversion methods.
I'm not necessarily recommending that you include this in your program. I'm sure many people will say that Swift is trying to protect us from ourselves and that sabotaging that effort is dumb. So maybe you should just keep this file somewhere as a kind of cheat sheet so you can quickly determine how to do a conversion, and copy the parameters into your program when needed.
Incidentally, JDI stands for "just do it".
/// Class containing a large number of static methods to convert an Int to a UInt or vice-versa, and
/// also to perform conversions between different bit sizes, for example UInt32 to UInt8.
///
/// Many of these "conversions" are trivial, and are only included for the sake of completeness.
///
/// A few of the conversions involving Int and UInt can give different results when run on 32-bit
/// and 64-bit systems. All of the conversion where the bit size of both the source and the target
/// are specified will always give the same result independent of platform.
public class JDI {
// MARK: - To signed Int
// To Int8
public static func ToInt8(_ x : Int8) -> Int8 {
return x
}
public static func ToInt8(_ x : Int32) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : Int64) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : Int) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt8) -> Int8 {
return Int8(bitPattern: x)
}
public static func ToInt8(_ x : UInt32) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt64) -> Int8 {
return Int8(truncatingBitPattern: x)
}
public static func ToInt8(_ x : UInt) -> Int8 {
return Int8(truncatingBitPattern: x)
}
// To Int32
public static func ToInt32(_ x : Int8) -> Int32 {
return Int32(x)
}
public static func ToInt32(_ x : Int32) -> Int32 {
return x
}
public static func ToInt32(_ x : Int64) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : Int) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : UInt8) -> Int32 {
return Int32(x)
}
public static func ToInt32(_ x : UInt32) -> Int32 {
return Int32(bitPattern: x)
}
public static func ToInt32(_ x : UInt64) -> Int32 {
return Int32(truncatingBitPattern: x)
}
public static func ToInt32(_ x : UInt) -> Int32 {
return Int32(truncatingBitPattern: x)
}
// To Int64
public static func ToInt64(_ x : Int8) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : Int32) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : Int64) -> Int64 {
return x
}
public static func ToInt64(_ x : Int) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt8) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt32) -> Int64 {
return Int64(x)
}
public static func ToInt64(_ x : UInt64) -> Int64 {
return Int64(bitPattern: x)
}
public static func ToInt64(_ x : UInt) -> Int64 {
return Int64(bitPattern: UInt64(x)) // Does not extend high bit of 32-bit input
}
// To Int
public static func ToInt(_ x : Int8) -> Int {
return Int(x)
}
public static func ToInt(_ x : Int32) -> Int {
return Int(x)
}
public static func ToInt(_ x : Int64) -> Int {
return Int(truncatingBitPattern: x)
}
public static func ToInt(_ x : Int) -> Int {
return x
}
public static func ToInt(_ x : UInt8) -> Int {
return Int(x)
}
public static func ToInt(_ x : UInt32) -> Int {
if MemoryLayout<Int>.size == MemoryLayout<Int32>.size {
return Int(Int32(bitPattern: x)) // For 32-bit systems, non-authorized interpretation
}
return Int(x)
}
public static func ToInt(_ x : UInt64) -> Int {
return Int(truncatingBitPattern: x)
}
public static func ToInt(_ x : UInt) -> Int {
return Int(bitPattern: x)
}
// MARK: - To unsigned Int
// To UInt8
public static func ToUInt8(_ x : Int8) -> UInt8 {
return UInt8(bitPattern: x)
}
public static func ToUInt8(_ x : Int32) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : Int64) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : Int) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt8) -> UInt8 {
return x
}
public static func ToUInt8(_ x : UInt32) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt64) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
public static func ToUInt8(_ x : UInt) -> UInt8 {
return UInt8(truncatingBitPattern: x)
}
// To UInt32
public static func ToUInt32(_ x : Int8) -> UInt32 {
return UInt32(bitPattern: Int32(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt32(_ x : Int32) -> UInt32 {
return UInt32(bitPattern: x)
}
public static func ToUInt32(_ x : Int64) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : Int) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : UInt8) -> UInt32 {
return UInt32(x)
}
public static func ToUInt32(_ x : UInt32) -> UInt32 {
return x
}
public static func ToUInt32(_ x : UInt64) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
public static func ToUInt32(_ x : UInt) -> UInt32 {
return UInt32(truncatingBitPattern: x)
}
// To UInt64
public static func ToUInt64(_ x : Int8) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt64(_ x : Int32) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt64(_ x : Int64) -> UInt64 {
return UInt64(bitPattern: x)
}
public static func ToUInt64(_ x : Int) -> UInt64 {
return UInt64(bitPattern: Int64(x)) // Extend sign bit if necessary, assume minus input significant
}
public static func ToUInt64(_ x : UInt8) -> UInt64 {
return UInt64(x)
}
public static func ToUInt64(_ x : UInt32) -> UInt64 {
return UInt64(x)
}
public static func ToUInt64(_ x : UInt64) -> UInt64 {
return x
}
public static func ToUInt64(_ x : UInt) -> UInt64 {
return UInt64(x) // Does not extend high bit of 32-bit input
}
// To UInt
public static func ToUInt(_ x : Int8) -> UInt {
return UInt(bitPattern: Int(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt(_ x : Int32) -> UInt {
return UInt(truncatingBitPattern: Int64(x)) // Extend sign bit, assume minus input significant
}
public static func ToUInt(_ x : Int64) -> UInt {
return UInt(truncatingBitPattern: x)
}
public static func ToUInt(_ x : Int) -> UInt {
return UInt(bitPattern: x)
}
public static func ToUInt(_ x : UInt8) -> UInt {
return UInt(x)
}
public static func ToUInt(_ x : UInt32) -> UInt {
return UInt(x)
}
public static func ToUInt(_ x : UInt64) -> UInt {
return UInt(truncatingBitPattern: x)
}
public static func ToUInt(_ x : UInt) -> UInt {
return x
}
}
Here's some test code:
public func doTest() {
// To Int8
assert(JDI.ToInt8(42 as Int8) == 42)
assert(JDI.ToInt8(-13 as Int8) == -13)
assert(JDI.ToInt8(42 as Int32) == 42)
assert(JDI.ToInt8(257 as Int32) == 1)
assert(JDI.ToInt8(42 as Int64) == 42)
assert(JDI.ToInt8(257 as Int64) == 1)
assert(JDI.ToInt8(42 as Int) == 42)
assert(JDI.ToInt8(257 as Int) == 1)
assert(JDI.ToInt8(42 as UInt8) == 42)
assert(JDI.ToInt8(0xf3 as UInt8) == -13)
assert(JDI.ToInt8(42 as UInt32) == 42)
assert(JDI.ToInt8(0xfffffff3 as UInt32) == -13)
assert(JDI.ToInt8(42 as UInt64) == 42)
assert(JDI.ToInt8(UInt64.max - 12) == -13)
assert(JDI.ToInt8(42 as UInt) == 42)
assert(JDI.ToInt8(UInt.max - 12) == -13)
// To Int32
assert(JDI.ToInt32(42 as Int8) == 42)
assert(JDI.ToInt32(-13 as Int8) == -13)
assert(JDI.ToInt32(42 as Int32) == 42)
assert(JDI.ToInt32(-13 as Int32) == -13)
assert(JDI.ToInt32(42 as Int64) == 42)
assert(JDI.ToInt32(Int64(Int32.min) - 1) == Int32.max)
assert(JDI.ToInt32(42 as Int) == 42)
assert(JDI.ToInt32(-13 as Int) == -13)
assert(JDI.ToInt32(42 as UInt8) == 42)
assert(JDI.ToInt32(0xf3 as UInt8) == 243)
assert(JDI.ToInt32(42 as UInt32) == 42)
assert(JDI.ToInt32(0xfffffff3 as UInt32) == -13)
assert(JDI.ToInt32(42 as UInt64) == 42)
assert(JDI.ToInt32(UInt64.max - 12) == -13)
assert(JDI.ToInt32(42 as UInt) == 42)
assert(JDI.ToInt32(UInt.max - 12) == -13)
// To Int64
assert(JDI.ToInt64(42 as Int8) == 42)
assert(JDI.ToInt64(-13 as Int8) == -13)
assert(JDI.ToInt64(42 as Int32) == 42)
assert(JDI.ToInt64(-13 as Int32) == -13)
assert(JDI.ToInt64(42 as Int64) == 42)
assert(JDI.ToInt64(-13 as Int64) == -13)
assert(JDI.ToInt64(42 as Int) == 42)
assert(JDI.ToInt64(-13 as Int) == -13)
assert(JDI.ToInt64(42 as UInt8) == 42)
assert(JDI.ToInt64(0xf3 as UInt8) == 243)
assert(JDI.ToInt64(42 as UInt32) == 42)
assert(JDI.ToInt64(0xfffffff3 as UInt32) == 4294967283)
assert(JDI.ToInt64(42 as UInt64) == 42)
assert(JDI.ToInt64(UInt64.max - 12) == -13)
assert(JDI.ToInt64(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToInt64(UInt.max - 12) == 4294967283) // For 32-bit systems
#else
assert(JDI.ToInt64(UInt.max - 12) == -13) // For 64-bit systems
#endif
// To Int
assert(JDI.ToInt(42 as Int8) == 42)
assert(JDI.ToInt(-13 as Int8) == -13)
assert(JDI.ToInt(42 as Int32) == 42)
assert(JDI.ToInt(-13 as Int32) == -13)
assert(JDI.ToInt(42 as Int64) == 42)
assert(JDI.ToInt(-13 as Int64) == -13)
assert(JDI.ToInt(42 as Int) == 42)
assert(JDI.ToInt(-13 as Int) == -13)
assert(JDI.ToInt(42 as UInt8) == 42)
assert(JDI.ToInt(0xf3 as UInt8) == 243)
assert(JDI.ToInt(42 as UInt32) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToInt(0xfffffff3 as UInt32) == -13) // For 32-bit systems
#else
assert(JDI.ToInt(0xfffffff3 as UInt32) == 4294967283) // For 64-bit systems
#endif
assert(JDI.ToInt(42 as UInt64) == 42)
assert(JDI.ToInt(UInt64.max - 12) == -13)
assert(JDI.ToInt(42 as UInt) == 42)
assert(JDI.ToInt(UInt.max - 12) == -13)
// To UInt8
assert(JDI.ToUInt8(42 as Int8) == 42)
assert(JDI.ToUInt8(-13 as Int8) == 0xf3)
assert(JDI.ToUInt8(42 as Int32) == 42)
assert(JDI.ToUInt8(-13 as Int32) == 0xf3)
assert(JDI.ToUInt8(42 as Int64) == 42)
assert(JDI.ToUInt8(-13 as Int64) == 0xf3)
assert(JDI.ToUInt8(Int64.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as Int) == 42)
assert(JDI.ToUInt8(-13 as Int) == 0xf3)
assert(JDI.ToUInt8(Int.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as UInt8) == 42)
assert(JDI.ToUInt8(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt8(42 as UInt32) == 42)
assert(JDI.ToUInt8(0xfffffff3 as UInt32) == 0xf3)
assert(JDI.ToUInt8(42 as UInt64) == 42)
assert(JDI.ToUInt8(UInt64.max - 12) == 0xf3)
assert(JDI.ToUInt8(42 as UInt) == 42)
assert(JDI.ToUInt8(UInt.max - 12) == 0xf3)
// To UInt32
assert(JDI.ToUInt32(42 as Int8) == 42)
assert(JDI.ToUInt32(-13 as Int8) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int32) == 42)
assert(JDI.ToUInt32(-13 as Int32) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int64) == 42)
assert(JDI.ToUInt32(-13 as Int64) == 0xfffffff3)
assert(JDI.ToUInt32(Int64.max - 12) == 0xfffffff3)
assert(JDI.ToUInt32(42 as Int) == 42)
assert(JDI.ToUInt32(-13 as Int) == 0xfffffff3)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt32(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt32(Int.max - 12) == 0xfffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt32(42 as UInt8) == 42)
assert(JDI.ToUInt32(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt32(42 as UInt32) == 42)
assert(JDI.ToUInt32(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt32(42 as UInt64) == 42)
assert(JDI.ToUInt32(UInt64.max - 12) == 0xfffffff3)
assert(JDI.ToUInt32(42 as UInt) == 42)
assert(JDI.ToUInt32(UInt.max - 12) == 0xfffffff3)
// To UInt64
assert(JDI.ToUInt64(42 as Int8) == 42)
assert(JDI.ToUInt64(-13 as Int8) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as Int32) == 42)
assert(JDI.ToUInt64(-13 as Int32) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as Int64) == 42)
assert(JDI.ToUInt64(-13 as Int64) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(Int64.max - 12) == (UInt64.max >> 1) - 12)
assert(JDI.ToUInt64(42 as Int) == 42)
assert(JDI.ToUInt64(-13 as Int) == 0xfffffffffffffff3)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt64(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt64(Int.max - 12) == 0x7ffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt64(42 as UInt8) == 42)
assert(JDI.ToUInt64(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt64(42 as UInt32) == 42)
assert(JDI.ToUInt64(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt64(42 as UInt64) == 42)
assert(JDI.ToUInt64(UInt64.max - 12) == 0xfffffffffffffff3)
assert(JDI.ToUInt64(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt64(UInt.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt64(UInt.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
// To UInt
assert(JDI.ToUInt(42 as Int8) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int8) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(-13 as Int8) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as Int32) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int32) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(-13 as Int32) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as Int64) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(-13 as Int64) == 0xfffffff3) // For 32-bit systems
assert(JDI.ToUInt(Int64.max - 12) == 0xfffffff3)
#else
assert(JDI.ToUInt(-13 as Int64) == 0xfffffffffffffff3) // For 64-bit systems
assert(JDI.ToUInt(Int64.max - 12) == 0x7ffffffffffffff3)
#endif
assert(JDI.ToUInt(42 as Int) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(Int.max - 12) == 0x7ffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(Int.max - 12) == 0x7ffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as UInt8) == 42)
assert(JDI.ToUInt(0xf3 as UInt8) == 0xf3)
assert(JDI.ToUInt(42 as UInt32) == 42)
assert(JDI.ToUInt(0xfffffff3 as UInt32) == 0xfffffff3)
assert(JDI.ToUInt(42 as UInt64) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(UInt64.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(UInt64.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
assert(JDI.ToUInt(42 as UInt) == 42)
#if (arch(i386) || arch(arm))
assert(JDI.ToUInt(UInt.max - 12) == 0xfffffff3) // For 32-bit systems
#else
assert(JDI.ToUInt(UInt.max - 12) == 0xfffffffffffffff3) // For 64-bit systems
#endif
print("\nTesting JDI complete.\n")
}
Add this anywhere outside of a class:
extension UInt {
/// SwiftExtensionKit
var toInt: Int { return Int(self) }
}
Then just call:
self.photos[index].toInt

Resources