I want to calculate checksum. My expected answer of crc is 0xE1
But when I run this code it returns 0. I am not getting any solution to do this. Please help me to solve this.
Below is my tried code.
My viewController:
class ViewController: UIViewController {
var checkSum:UInt8 = 0
override func viewDidLoad() {
super.viewDidLoad()
let bytes = [0x01,0x0B,0x00,0x0B,0x03,0x07,0x12,0x0E,0x0C,0x10,0x09,0x12,0x0C,0x19,0x09,0xFF,0x14]
for item in bytes {
print(calculateCheckSum(crc: checkSum, byteValue: UInt8(item)))
}
}
func calculateCheckSum(crc:UInt8, byteValue: UInt8) -> UInt8 {
let generator = 0x1D
checkSum = crc ^ byteValue
for _ in 1...8 {
if (crc & 0x80 != 0) {
checkSum = (crc << 1) ^ UInt8(Int8(generator))
}
else {
checkSum <<= UInt8(1)
}
}
return crc
}
}
Rewritten to avoid incorrectly using the global checkSum variable.
func calculateCheckSum(crc:UInt8, byteValue: UInt8) -> UInt8 {
let generator: UInt8 = 0x1D
// a new variable has to be declared inside this function
var newCrc = crc ^ byteValue
for _ in 1...8 {
if newCrc & 0x80 != 0 {
newCrc = (newCrc << 1) ^ generator
}
else {
newCrc <<= 1
}
}
return newCrc
}
Also, it seems you are not using the result of the method correctly:
var checkSum: UInt8 = 0
let bytes = [...]
for item in bytes {
checkSum = calculateCheckSum(crc: checkSum, byteValue: UInt8(item))
}
print(checkSum)
I just made an extension from this github link and it works to get CRC32 checksum in case anyone needs it.
extension Data {
public func checksum() -> UInt32 {
let table: [UInt32] = {
(0...255).map { i -> UInt32 in
(0..<8).reduce(UInt32(i), { c, _ in
(c % 2 == 0) ? (c >> 1) : (0xEDB88320 ^ (c >> 1))
})
}
}()
return ~(self.bytes.reduce(~UInt32(0), { crc, byte in
(crc >> 8) ^ table[(Int(crc) ^ Int(byte)) & 0xFF]
}))
} }
Related
How to convert my stringArray to int8Array. Please give me any solution to convert this.
I want below type of array
let int8Array:[UInt8] = [ox55,0x55,0xff,0x01,0x0B,0x00,0x0B,0x03,0x07,0x12,0x0E,0x0C,0x10,0x09,0x12,0x0C,0x19,0x09,0xFF,0x14]
Below is my ViewController:
class ViewController:UIViewController {
var checkSum:UInt8 = 0
override func viewDidLoad() {
super.viewDidLoad()
let stringArray:[String] = ["0x55", "0x55", "0xff", "0x01", "0x0B", "0x38", "0x18", "0x31", "0x10", "0x18", "0x0E", "0x16", "0x31", "0x10", "0x18", "0x16", "0x30", "0x11", "0x18", "0x20", "0xE1"]
var int8Array:[UInt8] = stringArray.map{ UInt8($0.dropFirst(2), radix: 16)! }
int8Array.removeFirst()
int8Array.removeFirst()
int8Array.removeFirst()
print(int8Array)
for item in int8Array {
checkSum = calculateCheckSum(crc: checkSum, byteValue: UInt8(item))
}
print(checkSum)
}
func calculateCheckSum(crc:UInt8, byteValue: UInt8) -> UInt8 {
let generator: UInt8 = 0x1D
var newCrc = crc ^ byteValue
for _ in 1...8 {
if (newCrc & 0x80 != 0) {
newCrc = (newCrc << 1) ^ generator
}
else {
newCrc <<= 1
}
}
return newCrc
}
}
If it is an option you could switch it around to specify the UInt8 array and derive the String array from that.
let int8Array: [UInt8] = [0x55, 0x55, 0xa5, 0x3f]
var stringArray: [String] {
return int8Array.map { String(format: "0x%02X", $0) }
}
print(stringArray)
// ["0x55", "0x55", "0xA5", "0x3F"]
Just map the stuff, you have to remove 0x to make the UInt8(_:radix:) initializer work.
let uint8Array = stringArray.map{ UInt8($0.dropFirst(2), radix: 16)! }
First take your string array and call map on it then map it to a [UInt8] (where the total result will be [[UInt8]] and call flatMap on the result to get an array of [UInt8].. then you can do forEach on it to calculate your checksum or w/e..
[String].init().map({
[UInt8]($0.utf8)
}).flatMap({ $0 }).forEach({
print($0) //Print each byte or convert to hex or w/e..
})
I am using swift and want to have a number of duplicatable patterns throughout my game.
Ideally I would have some sort of shared class that worked sort of like this (this is sort of pseudo-Swift code):
class RandomNumberUtility {
static var sharedInstance = RandomNumberUtility()
var random1 : Random()
var random2 : Random()
func seedRandom1(seed : Int){
random1 = Random(seed)
}
func seedRandom2(seed : Int){
random2 = Random(seed)
}
func getRandom1() -> Int {
return random1.next(1,10)
}
func getRandom2() -> Int {
return random2.next(1,100)
}
}
Then, to begin the series, anywhere in my program I could go like this:
RandomNumberUtility.sharedInstance.seedNumber1(7)
RandomNumberUtility.sharedInstance.seedNumber2(12)
And then I would know that (for example) the first 4 times I called
RandomNumberUtility.sharedInstance.getRandom1()
I would always get the same values (for example: 6, 1, 2, 6)
This would continue until at some point I seeded the number again, and then I would either get the exact same series back (if I used the same seed), or a different series (if I used a different seed).
And I want to have multiple series of numbers (random1 & random2) at the same time.
I am not sure how to begin to turn this into an actual Swift class.
Here is a possible implementation. It uses the jrand48 pseudo random number generator,
which produces 32-bit numbers.
This PRNG is not as good as arc4random(), but has the advantage
that all its state is stored in a user-supplied array, so that multiple
instances can run independently.
struct RandomNumberGenerator {
// 48 bit internal state for jrand48()
private var state : [UInt16] = [0, 0, 0]
// Return pseudo-random number in the range 0 ... upper_bound-1:
mutating func next(upper_bound: UInt32) -> UInt32 {
// Implementation avoiding the "module bias" problem,
// taken from: http://stackoverflow.com/a/10989061/1187415,
// Swift translation here: http://stackoverflow.com/a/26550169/1187415
let range = UInt32.max - UInt32.max % upper_bound
var rnd : UInt32
do {
rnd = UInt32(truncatingBitPattern: jrand48(&state))
} while rnd >= range
return rnd % upper_bound
}
mutating func seed(newSeed : Int) {
state[0] = UInt16(truncatingBitPattern: newSeed)
state[1] = UInt16(truncatingBitPattern: (newSeed >> 16))
state[2] = UInt16(truncatingBitPattern: (newSeed >> 32))
}
}
Example:
var rnd1 = RandomNumberGenerator()
rnd1.seed(7)
var rnd2 = RandomNumberGenerator()
rnd2.seed(12)
println(rnd1.next(10)) // 2
println(rnd1.next(10)) // 8
println(rnd1.next(10)) // 1
println(rnd2.next(10)) // 6
println(rnd2.next(10)) // 0
println(rnd2.next(10)) // 5
If rnd1 is seeded with the same value as above then it
produces the same numbers again:
rnd1.seed(7)
println(rnd1.next(10)) // 2
println(rnd1.next(10)) // 8
println(rnd1.next(10)) // 1
What you need is a singleton that generates pseudo-random numbers and make sure all your code that need a random number call via this class. The trick is to reset the seed for each run of your code. Here is a simple RandomGenerator class that will do the trick for you (it's optimized for speed which is a good thing when writing games):
import Foundation
// This random number generator comes from: Klimov, A. and Shamir, A.,
// "A New Class of Invertible Mappings", Cryptographic Hardware and Embedded
// Systems 2002, http://dl.acm.org/citation.cfm?id=752741
//
// Very fast, very simple, and passes Diehard and other good statistical
// tests as strongly as cryptographically-secure random number generators (but
// is not itself cryptographically-secure).
class RandomNumberGenerator {
static let sharedInstance = RandomNumberGenerator()
private init(seed: UInt64 = 12347) {
self.seed = seed
}
func nextInt() -> Int {
return next(32)
}
private func isPowerOfTwo(x: Int) -> Bool { return x != 0 && ((x & (x - 1)) == 0) }
func nextInt(max: Int) -> Int {
assert(!(max < 0))
// Fast path if max is a power of 2.
if isPowerOfTwo(max) {
return Int((Int64(max) * Int64(next(31))) >> 31)
}
while (true) {
var rnd = next(31)
var val = rnd % max
if rnd - val + (max - 1) >= 0 {
return val
}
}
}
func nextBool() -> Bool {
return next(1) != 0
}
func nextDouble() -> Double {
return Double((Int64(next(26)) << 27) + Int64(next(27))) /
Double(Int64(1) << 53)
}
func nextInt64() -> Int64 {
let lo = UInt(next(32))
let hi = UInt(next(32))
return Int64(UInt64(lo) | UInt64(hi << 32))
}
func nextBytes(inout buffer: [UInt8]) {
for n in 0..<buffer.count {
buffer[n] = UInt8(next(8))
}
}
var seed: UInt64 {
get {
return _seed
}
set(seed) {
_initialSeed = seed
_seed = seed
}
}
var initialSeed: UInt64 {
return _initialSeed!
}
private func randomNumber() -> UInt32 {
_seed = _seed &+ ((_seed &* _seed) | 5)
return UInt32(_seed >> 32)
}
private func next(bits: Int) -> Int {
assert(bits > 0)
assert(!(bits > 32))
return Int(randomNumber() >> UInt32(32 - bits))
}
private var _initialSeed: UInt64?
private var _seed: UInt64 = 0
}
I worte these methods in Objective-C. They're just checksum and XOR some NSData
- (void)XOR:(NSMutableData *)inputData withKey:(NSData *)key
{
unsigned char* inputByteData = (unsigned char*)[inputData mutableBytes];
unsigned char* keyByteData = (unsigned char*)[key bytes];
for (int i = 0; i < [inputData length]; i++)
{
inputByteData[i] = inputByteData[i] ^ keyByteData[i % [key length]];
}
}
- (Byte)checkSum:(NSMutableData *)data withLength:(Byte)dataLength
{
Byte * dataByte = (Byte *)malloc(dataLength);
memcpy(dataByte, [data bytes], dataLength);
Byte result = 0;
int count = 0;
while (dataLength>0) {
result += dataByte[count];
dataLength--;
count++;
};
result = result&0xff;
return result&0xff;
}
However, I'm not familiar with Bitwise operators, especially in Swift, with these UnsafeMutablePointer<Void>... things.
Can anybody help me converting this ? (Basically, I need checksum and XOR functions)
One more things, should they be put in NSData/NSMutableData extension ?
Thank you.
UnsafeBufferPointer/UnsafeMutableBufferPointer might be what you need now. I've tried translating your code into Swift below. (But the code is not tested well.)
func XOR(inputData: NSMutableData, withKey key: NSData) {
let b = UnsafeMutableBufferPointer<UInt8>(start:
UnsafeMutablePointer(inputData.mutableBytes), count: inputData.length)
let k = UnsafeBufferPointer<UInt8>(start:
UnsafePointer(key.bytes), count: key.length)
for i in 0..<inputData.length {
b[i] ^= k[i % key.length]
}
}
func checkSum(data: NSData) -> Int {
let b = UnsafeBufferPointer<UInt8>(start:
UnsafePointer(data.bytes), count: data.length)
var sum = 0
for i in 0..<data.length {
sum += Int(b[i])
}
return sum & 0xff
}
Swift 3 update:
public extension Data {
public mutating func xor(key: Data) {
for i in 0..<self.count {
self[i] ^= key[i % key.count]
}
}
public func checkSum() -> Int {
return self.map { Int($0) }.reduce(0, +) & 0xff
}
}
You can also create another function: xored(key: Data) -> Data.
Then you can chain these operators: xored(key).checksum()
Swift support operator overloading, so you can easily do let xorData = data1 ^ data2. I have written an extension for non-similar size data to xor.
extension Data {
static func ^ (left: Data, right: Data) -> Data {
if left.count != right.count {
NSLog("Warning! XOR operands are not equal. left = \(left), right = \(right)")
}
var result: Data = Data()
var smaller: Data, bigger: Data
if left.count <= right.count {
smaller = left
bigger = right
} else {
smaller = right
bigger = left
}
let bs:[UInt8] = Array(smaller)
let bb:[UInt8] = Array (bigger)
var br = [UInt8] ()
for i in 0..<bs.count {
br.append(bs[i] ^ bb[i])
}
for j in bs.count..<bb.count {
br.append(bb[j])
}
result = Data(br)
return result
}
}
Updated for Swift 3:
func xor(data: Data, with key: Data) -> Data {
var xorData = data
xorData.withUnsafeMutableBytes { (start: UnsafeMutablePointer<UInt8>) -> Void in
key.withUnsafeBytes { (keyStart: UnsafePointer<UInt8>) -> Void in
let b = UnsafeMutableBufferPointer<UInt8>(start: start, count: xorData.count)
let k = UnsafeBufferPointer<UInt8>(start: keyStart, count: data.count)
let length = data.count
for i in 0..<xorData.count {
b[i] ^= k[i % length]
}
}
}
return xorData
}
I have the following code in Objective-C:
- (double)readDouble
{
double value = 0.0;
if ([self read:(uint8_t *)&value maxLength:8] != 8)
{
NSLog(#"***** Couldn't read double");
}
return value;
}
It works. But I don't know how to convert it to Swift. Here is my code:
public func readDouble() -> Double {
var value : Double = 0.0
var num = self.read((uint8_t *)&value, maxLength:8) // got compiling error here!
if num != 8 {
}
}
The error message is:
Cannot invoke '&' with an argument list of type '($T4, maxLength:
IntegerLiteralConvertible)'
Can anybody help? Thanks
The testing data I'm using (1.25):
14 AE 47 E1 7A 14 F4 3F
UPDATE:
A simple c solution, but how to do this in Swift?
double d = 0;
unsigned char buf[sizeof d] = {0};
memcpy(&d, buf, sizeof d);
This should work:
let num = withUnsafeMutablePointer(&value) {
self.read(UnsafeMutablePointer($0), maxLength: sizeofValue(value))
}
Explanation: withUnsafeMutablePointer() calls the closure (block) with the only argument
($0 in shorthand notation) set to the address of value.
$0 has the type UnsafeMutablePointer<Double> and read() expects an
UnsafeMutablePointer<UInt8> as the first argument, therefore another conversion
is necessary. The return value of the closure is then assigned to num.
The method above does not work for me, using Swift 2 but I discovered a much more simpler method to do this conversion and vice versa:
func binarytotype <T> (value: [UInt8], _: T.Type) -> T
{
return value.withUnsafeBufferPointer
{
return UnsafePointer<T>($0.baseAddress).memory
}
}
func typetobinary <T> (var value: T) -> [UInt8]
{
return withUnsafePointer(&value)
{
Array(UnsafeBufferPointer(start: UnsafePointer<UInt8>($0), count: sizeof(T)))
}
}
let a: Double = 0.25
let b: [UInt8] = typetobinary(a) // -> [0, 0, 0, 0, 0, 0, 208, 63]
let c = binarytotype(b, Double.self) // -> 0.25
I have tested it with Xcode 7.2 in the playground.
Here is the updated version for Swift 3 beta 6 which is different, thanx to Martin.
func binarytotype <T> (_ value: [UInt8], _ : T.Type) -> T
{
return value.withUnsafeBufferPointer
{
UnsafeRawPointer($0.baseAddress!).load(as: T.self)
}
}
func typetobinary <T> (_ value: T) -> [UInt8]
{
var v = value
let size = MemoryLayout<T>.size
return withUnsafePointer(to: &v)
{
$0.withMemoryRebound(to: UInt8.self, capacity: size)
{
Array(UnsafeBufferPointer(start: $0, count: size))
}
}
}
let dd: Double = 1.23456 // -> 1.23456
let d = typetobinary(dd) // -> [56, 50, 143, 252, 193, 192, 243, 63]
let i = binarytotype(d, Double.self) // -> 1.23456
I found some code to encode a Base10-String with to a custom BaseString:
func stringToCustomBase(encode: Int, alphabet: String) -> String {
var base = alphabet.count, int = encode, result = ""
repeat {
let index = alphabet.index(alphabet.startIndex, offsetBy: (int % base))
result = [alphabet[index]] + result
int /= base
} while (int > 0)
return result
}
... calling it with this lines:
let encoded = stringToCustomBase(encode: 9291, alphabet: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
print(encoded)
The encoding above works pretty good. But what about decoding the encoded string?
So because I got no idea how to decode a (in this case Base62 [alphabet.count=62]) to a human readable string (in this case [Base10]) any help would be super appreciated.
PS: (A full code solution is not required, I can also come up with some kind of pseudo-code or maybe just a few-lines of code)
This is what I've tried so far:
func reVal(num: Int) -> Character {
if (num >= 0 && num <= 9) {
return Character("\(num)")
}
return Character("\(num - 10)A");
}
func convertBack() {
var index = 0;
let encoded = "w2RDn3"
var decoded = [Character]()
var inputNum = encoded.count
repeat {
index+=1
decoded[index] = reVal(num: inputNum % 62)
//encoded[index] = reVal(inputNum % 62);
inputNum /= 62;
} while (inputNum > 0)
print(decoded);
}
Based on the original algorithm you need to iterate through each character of the encoded string, find the location of that character within the alphabet, and calculate the new result.
Here are both methods and some test code:
func stringToCustomBase(encode: Int, alphabet: String) -> String {
var base = alphabet.count, string = encode, result = ""
repeat {
let index = alphabet.index(alphabet.startIndex, offsetBy: (string % base))
result = [alphabet[index]] + result
string /= base
} while (string > 0)
return result
}
func customBaseToInt(encoded: String, alphabet: String) -> Int? {
let base = alphabet.count
var result = 0
for ch in encoded {
if let index = alphabet.index(of: ch) {
let mult = result.multipliedReportingOverflow(by: base)
if (mult.overflow) {
return nil
} else {
let add = mult.partialValue.addingReportingOverflow(alphabet.distance(from: alphabet.startIndex, to: index))
if (add.overflow) {
return nil
} else {
result = add.partialValue
}
}
} else {
return nil
}
}
return result
}
let startNum = 234567
let alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
let codedNum = stringToCustomBase(encode: startNum, alphabet: alphabet)
let origNun = customBaseToInt(encoded: codedNum, alphabet: alphabet)
I made the customBaseToInt method return an optional result in case there are characters in the encoded value that are not in the provided alphabet.
You can achieve this via reduce:
enum RadixDecodingError: Error {
case invalidCharacter
case overflowed
}
func customRadixToInt(str: String, alphabet: String) throws -> Int {
return try str.reduce(into: 0) {
guard let digitIndex = alphabet.index(of: $1) else {
throw RadixDecodingError.invalidCharacter
}
let multiplied = $0.multipliedReportingOverflow(by: alphabet.count)
guard !multiplied.overflow else {
throw RadixDecodingError.overflowed
}
let added = multiplied.partialValue.addingReportingOverflow(alphabet.distance(from: alphabet.startIndex, to: digitIndex))
guard !added.overflow else {
throw RadixDecodingError.overflowed
}
$0 = added.partialValue
}
}
I used the exception throwing mechanism so that the caller can distinguish between invalid characters or overflow errors.