Crash specialized String.imageSize(), - ios

I have a crash in this extension method of String:
func imageSize() -> CGSize {
// self = "https://s3-eu-west-1.amazonaws.com/mimg.haraj.com.sa/userfiles30/2018-8-6/524x334-1_-E7VSb5T20mOouX.jpg"
var width = 0
var height = 0
let split0 = self.split(separator: "/")
if split0.count > 0 {
let split1 = split0.last?.split(separator: "-")
if (split1?.count)! > 0 {
let split2 = split1?.first?.decomposedStringWithCanonicalMapping.split(separator: "x")
width = (split2?.first?.decomposedStringWithCanonicalMapping.toInt())!
if (split2?.count)! > 1 {
// let split2 = split1![1].decomposedStringWithCanonicalMapping.split(separator: "-")
height = (split2?.last?.decomposedStringWithCanonicalMapping.toInt())!
}
}
}
return CGSize(width: width, height: height)
}
The crash is on line return CGSize(width: width, height: height)
I have created an NSString version like this to use the same above method:
#objc extension NSString {
func imageSize1() -> CGSize {
return (self as String).imageSize()
}
}
This is then called from obj-c code:
CGSize imageSize = [url imageSize1];
Examples of url are:
https://s3-eu-west-1.amazonaws.com/mimg.haraj.com.sa/userfiles30/2019-02-07/675x900-1_-CdC62Y2hcV7208.jpg
https://s3-eu-west-1.amazonaws.com/mimg.haraj.com.sa/userfiles30/2019-02-07/675x900-1_-697e3no8ec2E1I.jpg
https://s3-eu-west-1.amazonaws.com/mimg.haraj.com.sa/userfiles30/2019-02-07/675x900-1_-8Af5D20wh9b62z.jpg
What this imageSize() method does is that it parses the image size from the url. The urls above contain the sizes 675x900 -> widthxheight.
In rare case we encounter a url where there is no information of the size and the url is not in the format above. So if the size is not found CGSize = (0 , 0) is returned.
I have tested this method on all expected scenarios.
But due to some reasons the method is causing crashes. May be I missed/messed something.
Here is the link to Crashlytics issue.
Any help would be appreciated.

Try don't use force unwrap !
let exampleString1 = "https://s3-eu-west-1.amazonaws.com/mimg.haraj.com.sa/userfiles30/2018-8-6/524x334-1_-E7VSb5T20mOouX.jpg"
let exampleString2 = "https://s3-eu-west-1.amazonaws.com/mimg.haraj.com.sa/userfiles30/2019-02-07/675x900-1_-697e3no8ec2E1I.jpg"
let exampleString3 = "https://s3-eu-west-1.amazonaws.com/mimg.haraj.com.sa/userfiles30/2019-02-07/675x900-1_-CdC62Y2hcV7208.jpg"
extension String {
func imageSize() -> CGSize? {
// last url component
guard let imageName = self.split(separator: "/").last else { return nil }
guard let imageSizeString = imageName.split(separator: "-").first else { return nil }
let sizes = imageSizeString.split(separator: "x")
guard let first = sizes.first,
let last = sizes.last,
let wight = Int(String(first)),
let height = Int(String(last))
else { return nil }
return CGSize(width: wight, height: height)
}
}
exampleString1.imageSize() // Optional((524.0, 334.0))
exampleString2.imageSize() // Optional((675.0, 900.0))
exampleString3.imageSize() // Optional((675.0, 900.0))
Also try to use guard let and return nil if something wrong. For example Url schema can be changed

The crash is most likely due to force unwrapping optionals. There are several cases in your code where you're using it, which will lead to a runtime error if file name in your URL has a different format than you expect. Try
func imageSize() -> CGSize {
// self = "https://s3-eu-west-1.amazonaws.com/mimg.haraj.com.sa/userfiles30/2018-8-6/524x334-1_-E7VSb5T20mOouX.jpg"
var width = 0
var height = 0
let split0 = self.split(separator: "/")
if let split1 = split0.last?.split(separator: "-")
{
if let split2 = split1.first?.decomposedStringWithCanonicalMapping.split(separator: "x")
{
width = (split2.first?.decomposedStringWithCanonicalMapping.toInt()) ?? 0
if split2.count > 1 {
height = (split2.last?.decomposedStringWithCanonicalMapping.toInt()) ?? 0
}
}
}
return CGSize(width: width, height: height)
}

Related

Tensorflow Interpreter throwing error for "data count" iOS

I am using TensorFlowLiteSwift and the model I'm working with is responsible for flattening an image when the image is cropped in a trapezoidal shape.
Now, Tensorflow does not provide much of a documentation. So, I have been trying to implement things from their example projects.
But here is the catch, it throws error saying "Provided data count must match the required count" and the required count is 4. I backtracked the byteCount in Interpreter.swift but could not find the actual setter.
So, is the .tflite model responsible for the "required count?" And if no, then how does this get set?
Here is a chunk of code I think would help understanding my problem:
/// Performs image preprocessing, invokes the `Interpreter`, and processes the inference results.
func runModel(on item: ImageProcessInfo) -> UIImage? {
let rgbData = item.resizedImage.scaledData(with: CGSize(width: 1000, height: 900),
byteCount: inputWidth * inputHeight
* batchSize,
isQuantized: false)
var corner = item.corners.map { $0.map { p -> (Float, Float) in
return (Float(p.x), Float(p.y))
} }
var item = item
guard let height = NSMutableData(capacity: 0) else { return nil }
height.append(&item.originalHeight, length: 4)
guard let width = NSMutableData(capacity: 0) else { return nil }
width.append(&item.originalWidth, length: 4)
guard let corners = NSMutableData(capacity: 0) else { return nil }
corners.append(&corner, length: 4)
do {
try interpreter.copy(rgbData!, toInputAt: 0)
try interpreter.copy(height as Data, toInputAt: 1)
try interpreter.copy(width as Data, toInputAt: 2)
try interpreter.copy(corners as Data, toInputAt: 3)
try interpreter.invoke()
let outputTensor1 = try self.interpreter.output(at: 0)
guard let cgImage = postprocessImageData(data: outputTensor1.data, size: CGSize(width: 1000, height: 900)) else {
return nil
}
let outputImage = UIImage(cgImage: cgImage)
return outputImage
} catch {
dump(error)
return nil
}
}
extension UIImage {
func scaledData(with size: CGSize, byteCount: Int, isQuantized: Bool) -> Data? {
guard let cgImage = self.cgImage, cgImage.width > 0, cgImage.height > 0 else { return nil }
guard let imageData = imageData(from: cgImage, with: size) else { return nil }
var scaledBytes = [UInt8](repeating: 0, count: byteCount)
var index = 0
for component in imageData.enumerated() {
let offset = component.offset
let isAlphaComponent = (offset % 4)
== 3
guard !isAlphaComponent else { continue }
scaledBytes[index] = component.element
index += 1
}
if isQuantized { return Data(scaledBytes) }
let scaledFloats = scaledBytes.map { (Float32($0) - 127.5) / 127.5 }
return Data(copyingBufferOf: scaledFloats)
}
private func imageData(from cgImage: CGImage, with size: CGSize) -> Data? {
let bitmapInfo = CGBitmapInfo(
rawValue: CGBitmapInfo.byteOrder32Big.rawValue | CGImageAlphaInfo.premultipliedLast.rawValue
)
let width = Int(size.width)
let scaledBytesPerRow = (cgImage.bytesPerRow / cgImage.width) * width
guard let context = CGContext(
data: nil,
width: width,
height: Int(size.height),
bitsPerComponent: cgImage.bitsPerComponent,
bytesPerRow: scaledBytesPerRow,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: bitmapInfo.rawValue)
else {
return nil
}
context.draw(cgImage, in: CGRect(origin: .zero, size: size))
return context.makeImage()?.dataProvider?.data as Data?
}
}
#discardableResult
public func copy(_ data: Data, toInputAt index: Int) throws -> Tensor {
let maxIndex = inputTensorCount - 1
guard case 0...maxIndex = index else {
throw InterpreterError.invalidTensorIndex(index: index, maxIndex: maxIndex)
}
guard let cTensor = TfLiteInterpreterGetInputTensor(cInterpreter, Int32(index)) else {
throw InterpreterError.allocateTensorsRequired
}
/* Error here */
let byteCount = TfLiteTensorByteSize(cTensor)
guard data.count == byteCount else {
throw InterpreterError.invalidTensorDataCount(provided: data.count, required: byteCount)
}
#if swift(>=5.0)
let status = data.withUnsafeBytes {
TfLiteTensorCopyFromBuffer(cTensor, $0.baseAddress, data.count)
}
#else
let status = data.withUnsafeBytes { TfLiteTensorCopyFromBuffer(cTensor, $0, data.count) }
#endif // swift(>=5.0)
guard status == kTfLiteOk else { throw InterpreterError.failedToCopyDataToInputTensor }
return try input(at: index)
}
What are the input shapes? Can you identify which one is complaining about the size?
At the first glance, corners.append(&corner, length: 4) seems weird - does corners contain only 1 Float (byte size 4)?
The byteCount for a tensor is filled by underlying C API, and simply returns tensor->bytes for underlying TfLiteTensor struct that is filled in the model loading stage.

Emoji skin-tone detect

Following this post I tried to update the code from Swift 2.0 to Swift 5.0 to check which emojis have skin tones available or not and other variations already present.
My updated code in detail:
extension String {
var emojiSkinToneModifiers: [String] {
return [ "🏻", "🏼", "🏽", "🏾", "🏿" ]
}
var emojiVisibleLength: Int {
var count = 0
enumerateSubstrings(in: startIndex..<endIndex, options: .byComposedCharacterSequences) { (_, _, _, _) in
count = count + 1
}
return count
}
var emojiUnmodified: String {
if self.count == 0 {
return ""
}
let range = String(self[..<self.index(self.startIndex, offsetBy: 1)])
return range
}
var canHaveSkinToneModifier: Bool {
if self.count == 0 {
return false
}
let modified = self.emojiUnmodified + self.emojiSkinToneModifiers[0]
return modified.emojiVisibleLength == 1
}
}
And use this with an array:
let emojis = [ "πŸ‘", "πŸ‘πŸΏ", "🐸" ]
for emoji in emojis {
if emoji.canHaveSkinToneModifier {
let unmodified = emoji.emojiUnmodified
print(emoji)
for modifier in emoji.emojiSkinToneModifiers {
print(unmodified + modifier)
}
} else {
print(emoji)
}
}
The output:
πŸ‘πŸ‘πŸ»πŸ‘πŸΌπŸ‘πŸ½πŸ‘πŸΎπŸ‘πŸΏ πŸ‘πŸΏπŸ‘πŸΏπŸ»πŸ‘πŸΏπŸΌπŸ‘πŸΏπŸ½πŸ‘πŸΏπŸΎπŸ‘πŸΏπŸΏ 🐸🐸🏻🐸🏼🐸🏽🐸🏾🐸🏿
assigns variations to emojis that do not have them or that already is instead of: πŸ‘πŸ‘πŸ»πŸ‘πŸΌπŸ‘πŸ½πŸ‘πŸΎπŸ‘πŸΏ πŸ‘πŸΏ 🐸
I suppose enumerateSubstringsInRange is incorrect and self.characters.count now became self.count easy and correct to count one emoji (composed) compared to before Swift 4 but maybe not useful in this case. What am I not seeing wrong?
Thanks
A "hack" would be to compare the visual representation of a correct emoji (like "🐸") and a wanna-be emoji (like "🐸🏽").
I've modified your code here and there to make it work:
extension String {
static let emojiSkinToneModifiers: [String] = ["🏻", "🏼", "🏽", "🏾", "🏿"]
var emojiVisibleLength: Int {
var count = 0
let nsstr = self as NSString
let range = NSRange(location: 0, length: nsstr.length)
nsstr.enumerateSubstrings(in: range,
options: .byComposedCharacterSequences)
{ (_, _, _, _) in
count = count + 1
}
return count
}
var emojiUnmodified: String {
if isEmpty {
return self
}
let string = String(self.unicodeScalars.first!)
return string
}
private static let emojiReferenceSize: CGSize = {
let size = CGSize(width : CGFloat.greatestFiniteMagnitude,
height: CGFloat.greatestFiniteMagnitude)
let rect = ("πŸ‘" as NSString).boundingRect(with: size,
options: .usesLineFragmentOrigin,
context: nil)
return rect.size
}()
var canHaveSkinToneModifier: Bool {
if isEmpty {
return false
}
let modified = self.emojiUnmodified + String.emojiSkinToneModifiers[0]
let size = (modified as NSString)
.boundingRect(with: CGSize(width : CGFloat.greatestFiniteMagnitude,
height: .greatestFiniteMagnitude),
options: .usesLineFragmentOrigin,
context: nil).size
return size == String.emojiReferenceSize
}
}
Let's try it out:
let emojis = [ "πŸ‘", "πŸ‘πŸΏ", "🐸" ]
for emoji in emojis {
if emoji.canHaveSkinToneModifier {
let unmodified = emoji.emojiUnmodified
print(unmodified)
for modifier in String.emojiSkinToneModifiers {
print(unmodified + modifier)
}
} else {
print(emoji)
}
print("\n")
}
And voila!
πŸ‘
πŸ‘πŸ»
πŸ‘πŸΌ
πŸ‘πŸ½
πŸ‘πŸΎ
πŸ‘πŸΏ
πŸ‘
πŸ‘πŸ»
πŸ‘πŸΌ
πŸ‘πŸ½
πŸ‘πŸΎ
πŸ‘πŸΏ
🐸

How to solve Command failed due to signal: Segmentation fault: 11

I was searching for this issue here, and found out that this issue is not common one.
Everyone had different approaches to solve it. I'm using Firebase and Gifu framework. Actually for the last one - i copied all the files to my project, but that didn't helped either.
Here is my source code:
import FirebaseDatabase
import FirebaseAuth
import Firebase
import UIKit
import Gifu
class ViewController: UIViewController {
#IBOutlet weak var userImage: AnimatableImageView!
var displayedUserId = ""
var AcceptedOrRejected = ""
override func viewDidLoad() {
super.viewDidLoad()
let urlArray = ["http://i.imgur.com/VAWlQ0S.gif", "http://i.imgur.com/JDzGqvE.gif", "http://67.media.tumblr.com/4cd2a04b60bb867bb4746d682aa60020/tumblr_mjs2dvWX6x1rvn6njo1_400.gif", "https://media.giphy.com/media/TlK63ELk5OPDzpb6Tao/giphy.gif", "http://i3.photobucket.com/albums/y90/spicestas/GeriHalliwell-Calling-new1.gif", "http://media.tumblr.com/tumblr_lnb9aozmM71qbxrlp.gif"]
var counter = 1
for url in urlArray {
let nsUrl = NSURL(string: url)
let girls = ProfileClass()
girls.profilePhotoUrl = url
girls.profileGender = "female"
girls.profileName = "girlsname\(counter)"
girls.profileSurname = "girlsurname\(counter)"
girls.interest = "men"
girls.uid = "\(randomStringWithLength(45))"
counter++
girls.SaveUser()
}
//----setting variables
let label = UILabel(frame: CGRectMake(self.view.bounds.width / 2 - 100, self.view.bounds.height / 2 - 50, 300, 100))
//label.text = "Drag me!"
//label.textAlignment = NSTextAlignment.Center
self.view.addSubview(label)
let gesture = UIPanGestureRecognizer(target: self, action: Selector("wasDragged:"))
userImage.addGestureRecognizer(gesture)
userImage.userInteractionEnabled = true
//----getting access to database
let thisUserRef = URL_BASE.childByAppendingPath("profile")
thisUserRef.queryOrderedByChild("Interest").queryEqualToValue("men").observeEventType(.Value, withBlock: {
snapshot in
for child in snapshot.children{
self.displayedUserId = (child.value["uid"] as? String)!
let imageURL = child.value["photo"] as? String
let imURL = NSURL(string: imageURL!)
//print(imageURL)
if self.AcceptedOrRejected != "" {
let AcceptedArray = child.value[AcceptedOrRejected] as? Array
AcceptedArray.append(displayedUserId)
}
if let picData = NSData(contentsOfURL: imURL!) {
self.userImage.animateWithImageData(picData)
//self.userImage.image = UIImage(data: picData)
}
}
})
}
//-----Dragging function-----------
func wasDragged(gesture: UIPanGestureRecognizer) {
//set traslations
let translation = gesture.translationInView(self.view)
let label = gesture.view
//set center position
label!.center = CGPoint(x: self.view.bounds.width / 2 + translation.x, y: self.view.bounds.height / 2 - translation.y )
let xfromCenter = (label?.center.x)! - self.view.bounds.width / 2
let scale = min(100 / abs(xfromCenter),1)
var rotation = CGAffineTransformMakeRotation(xfromCenter / 200)
var strech = CGAffineTransformScale(rotation, scale, scale)
label?.transform = strech
if gesture.state == UIGestureRecognizerState.Ended {
if label?.center.x < 100 {
self.AcceptedOrRejected = "Accepted"
} else if label?.center.x > self.view.bounds.width / 100 {
self.AcceptedOrRejected = "Rejected"
}
rotation = CGAffineTransformMakeRotation(0)
strech = CGAffineTransformScale(rotation, 1, 1)
label?.transform = strech
label?.center = CGPoint(x: self.view.bounds.width / 2 , y: self.view.bounds.height / 2 )
}
}
//---Function for generating randomn userid
func randomStringWithLength (len : Int) -> NSString {
let letters : NSString = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
let randomString : NSMutableString = NSMutableString(capacity: len)
for (var i=0; i < len; i += 1){
let length = UInt32 (letters.length)
let rand = arc4random_uniform(length)
randomString.appendFormat("%C", letters.characterAtIndex(Int(rand)))
}
return randomString
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
if segue.identifier == "LogOutSegue" {
try! FIRAuth.auth()!.signOut()
}
}
}
And I get this error
1. While emitting SIL for 'viewDidLoad' at /Users/zkid18/Desktop/wrk/Bloom/Bloom/ViewController.swift:23:14
2. While silgen closureexpr SIL function #_TFFC5Bloom14ViewController11viewDidLoadFT_T_U_FCSo15FIRDataSnapshotT_ for expression at [/Users/zkid18/Desktop/wrk/Bloom/Bloom/ViewController.swift:68:114 - line:107:9] RangeText="{
snapshot in
for child in snapshot.children{
self.displayedUserId = (child.value["uid"] as? String)!
let imageURL = child.value["photo"] as? String
let imURL = NSURL(string: imageURL!)
//print(imageURL)
if self.AcceptedOrRejected != "" {
let AcceptedArray = child.value[AcceptedOrRejected] as? Array
AcceptedArray.append(displayedUserId)
}
if let picData = NSData(contentsOfURL: imURL!) {
self.userImage.animateWithImageData(picData)
//self.userImage.image = UIImage(data: picData)
}
}
}"
I don't really know what to do with that
I just attempted a conversion to Swift 3 to get a jump on fixing issues in my code. I have similar error but I just integrated Firebase into all of my project. I found that by removing the pod and framework from the app and commenting out all firebase code fixed this compile issue.

How to get image file size in Swift?

I am using
UIImagePickerControllerDelegate,
UINavigationControllerDelegate,
UIPopoverControllerDelegate
these delegates for choosing image from my gallery or my camera. So, how can I get image file size after choosing an image?
I want to use this:
let filePath = "your path here"
var fileSize : UInt64 = 0
do {
let attr : NSDictionary? = try NSFileManager.defaultManager().attributesOfItemAtPath(filePath)
if let _attr = attr {
fileSize = _attr.fileSize();
print(fileSize)
}
} catch {
}
but here I need a path, but how can I get without a path, just by image file?
Please check the google for 1 kb to bytes it will be 1000.
https://www.google.com/search?q=1+kb+%3D+how+many+bytes&oq=1+kb+%3D+how+many+bytes&aqs=chrome..69i57.8999j0j1&sourceid=chrome&ie=UTF-8
So while getting the proper size I’ve added multiple scenario by adding image in App Bundle and in photos in simulator.
Well the image which I took from my Mac was of 299.0 KB.
Scenario 1: Adding image to Application Bundle
On adding image in your Xcode the size of the image will remain same in project directory. But you get it from its path the size will be reduced to 257.0 KB. Which is the actual size of the image used in the device or simulator.
guard let aStrUrl = Bundle.main.path(forResource: "1", ofType: "png") else { return }
let aUrl = URL(fileURLWithPath: aStrUrl)
print("Img size = \((Double(aUrl.fileSize) / 1000.00).rounded()) KB")
extension URL {
var attributes: [FileAttributeKey : Any]? {
do {
return try FileManager.default.attributesOfItem(atPath: path)
} catch let error as NSError {
print("FileAttribute error: \(error)")
}
return nil
}
var fileSize: UInt64 {
return attributes?[.size] as? UInt64 ?? UInt64(0)
}
var fileSizeString: String {
return ByteCountFormatter.string(fromByteCount: Int64(fileSize), countStyle: .file)
}
var creationDate: Date? {
return attributes?[.creationDate] as? Date
}
}
Scenario 2: Adding image to Photos in Simulator
On adding image to photos in simulator or device the size of the image increased from 299.0 KB to 393.0 KB. Which is the actual size of the image stored in the device or simulator’s document directory.
Swift 4 and earlier
var image = info[UIImagePickerControllerOriginalImage] as! UIImage
var imgData: NSData = NSData(data: UIImageJPEGRepresentation((image), 1))
// var imgData: NSData = UIImagePNGRepresentation(image)
// you can also replace UIImageJPEGRepresentation with UIImagePNGRepresentation.
var imageSize: Int = imgData.count
print("size of image in KB: %f ", Double(imageSize) / 1000.0)
Swift 5
let image = info[UIImagePickerController.InfoKey.originalImage] as! UIImage
let imgData = NSData(data: image.jpegData(compressionQuality: 1)!)
var imageSize: Int = imgData.count
print("actual size of image in KB: %f ", Double(imageSize) / 1000.0)
By adding .rounded() it will give you 393.0 KB and without using it it will give 393.442 KB. So please check the image size manually once using the above code. As the size of image may vary in different devices and mac. I've check it only on mac mini and simulator iPhone XS.
extension UIImage {
public enum DataUnits: String {
case byte, kilobyte, megabyte, gigabyte
}
func getSizeIn(_ type: DataUnits)-> String {
guard let data = self.pngData() else {
return ""
}
var size: Double = 0.0
switch type {
case .byte:
size = Double(data.count)
case .kilobyte:
size = Double(data.count) / 1024
case .megabyte:
size = Double(data.count) / 1024 / 1024
case .gigabyte:
size = Double(data.count) / 1024 / 1024 / 1024
}
return String(format: "%.2f", size)
}
}
Usage example : print("Image size \(yourImage.getSizeIn(.megabyte)) mb")
Swift 3/4:
if let imageData = UIImagePNGRepresentation(image) {
let bytes = imageData.count
let kB = Double(bytes) / 1000.0 // Note the difference
let KB = Double(bytes) / 1024.0 // Note the difference
}
Please note the difference between kB and KB. Answering here because in my case we had an issue while we considered kilobyte as 1024 bytes but server side considered it as 1000 bytes which caused an issue. Link to learn more.
PS. Almost sure you'll go with kB (1000).
Details
Xcode 10.2.1 (10E1001), Swift 5
Solution
extension String {
func getNumbers() -> [NSNumber] {
let formatter = NumberFormatter()
formatter.numberStyle = .decimal
let charset = CharacterSet.init(charactersIn: " ,.")
return matches(for: "[+-]?([0-9]+([., ][0-9]*)*|[.][0-9]+)").compactMap { string in
return formatter.number(from: string.trimmingCharacters(in: charset))
}
}
// https://stackoverflow.com/a/54900097/4488252
func matches(for regex: String) -> [String] {
guard let regex = try? NSRegularExpression(pattern: regex, options: [.caseInsensitive]) else { return [] }
let matches = regex.matches(in: self, options: [], range: NSMakeRange(0, self.count))
return matches.compactMap { match in
guard let range = Range(match.range, in: self) else { return nil }
return String(self[range])
}
}
}
extension UIImage {
func getFileSizeInfo(allowedUnits: ByteCountFormatter.Units = .useMB,
countStyle: ByteCountFormatter.CountStyle = .file) -> String? {
// https://developer.apple.com/documentation/foundation/bytecountformatter
let formatter = ByteCountFormatter()
formatter.allowedUnits = allowedUnits
formatter.countStyle = countStyle
return getSizeInfo(formatter: formatter)
}
func getFileSize(allowedUnits: ByteCountFormatter.Units = .useMB,
countStyle: ByteCountFormatter.CountStyle = .memory) -> Double? {
guard let num = getFileSizeInfo(allowedUnits: allowedUnits, countStyle: countStyle)?.getNumbers().first else { return nil }
return Double(truncating: num)
}
func getSizeInfo(formatter: ByteCountFormatter, compressionQuality: CGFloat = 1.0) -> String? {
guard let imageData = jpegData(compressionQuality: compressionQuality) else { return nil }
return formatter.string(fromByteCount: Int64(imageData.count))
}
}
Usage
guard let image = UIImage(named: "img") else { return }
if let imageSizeInfo = image.getFileSizeInfo() {
print("\(imageSizeInfo), \(type(of: imageSizeInfo))") // 51.9 MB, String
}
if let imageSizeInfo = image.getFileSizeInfo(allowedUnits: .useBytes, countStyle: .file) {
print("\(imageSizeInfo), \(type(of: imageSizeInfo))") // 54,411,697 bytes, String
}
if let imageSizeInfo = image.getFileSizeInfo(allowedUnits: .useKB, countStyle: .decimal) {
print("\(imageSizeInfo), \(type(of: imageSizeInfo))") // 54,412 KB, String
}
if let size = image.getFileSize() {
print("\(size), \(type(of: size))") // 51.9, Double
}
Swift 3
let uploadData = UIImagePNGRepresentation(image)
let array = [UInt8](uploadData)
print("Image size in bytes:\(array.count)")
try this for getting size from url
func fileSize(url: URL) -> String? {
var fileSize:Int?
do {
let resources = try url.resourceValues(forKeys:[.fileSizeKey])
fileSize = resources.fileSize!
print ("\(String(describing: fileSize))")
} catch {
print("Error: \(error)")
}
// bytes
if fileSize! < 999 {
return String(format: "%lu bytes", CUnsignedLong(bitPattern: fileSize!))
}
// KB
var floatSize = Float(fileSize! / 1000)
if floatSize < 999 {
return String(format: "%.1f KB", floatSize)
}
// MB
floatSize = floatSize / 1000
if floatSize < 999 {
return String(format: "%.1f MB", floatSize)
}
// GB
floatSize = floatSize / 1000
return String(format: "%.1f GB", floatSize)
}
Use Example
let sizeInString = fileSize(url: url)
print("FileSize = "+sizeInString!)
let selectedImage = info[UIImagePickerControllerOriginalImage] as! UIImage
let selectedImageData: NSData = NSData(data:UIImageJPEGRepresentation((selectedImage), 1))
let selectedImageSize:Int = selectedImageData.length
print("Image Size: %f KB", selectedImageSize /1024.0)
let data = UIImageJPEGRepresentation(image, 1)
let imageSize = data?.count
Duplicate of How to get the size of a UIImage in KB?
let imageData = UIImageJPEGRepresentation(image, 1)
let imageSize = imageData?.count
UIImageJPEGRepresentation β€” returns the Data object for the specified image in JPEG format. The value 1.0 represents the least compression (close to original image).
imageData?.count β€” return data length (chars count equals bytes).
Important! UIImageJPEGRepresentation or UIImagePNGRepresentation will not return the original image. But if use given Data as source for uploading - than file size be the same as on the server (even using compression).
Swift 4.2
let jpegData = image.jpegData(compressionQuality: 1.0)
let jpegSize: Int = jpegData?.count ?? 0
print("size of jpeg image in KB: %f ", Double(jpegSize) / 1024.0)
Try this code (Swift 4.2)
extension URL {
var attributes: [FileAttributeKey : Any]? {
do {
return try FileManager.default.attributesOfItem(atPath: path)
} catch let error as NSError {
print("FileAttribute error: \(error)")
}
return nil
}
var fileSize: UInt64 {
return attributes?[.size] as? UInt64 ?? UInt64(0)
}
var fileSizeString: String {
return ByteCountFormatter.string(fromByteCount: Int64(fileSize), countStyle: .file)
}
var creationDate: Date? {
return attributes?[.creationDate] as? Date
}
}
And use example
guard let aStrUrl = Bundle.main.path(forResource: "example_image", ofType: "jpg") else { return }
let aUrl = URL(fileURLWithPath: aStrUrl)
print("Img size = \((Double(aUrl.fileSize) / 1000.00).rounded()) KB")
//Swift 4
if let pickedImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
///check image Size
let imgData = NSData(data: UIImageJPEGRepresentation((pickedImage), 1)!)
let imageSize: Int = imgData.count
print("size of image in KB: %f ", Double(imageSize) / 1024.0)
print("size of image in MB: %f ", Double(imageSize) / 1024.0 / 1024)
}
I make work around data units conversion :
Bytes -> KB -> MB -> GB -> ... -> Extremest Monster Data
enum dataUnits:CaseIterable {
case B //Byte
case KB //kilobyte
case MB //megabyte
case GB //gigabyte
case TB //terabyte
case PB //petabyte
case EB //exabyte
case ZB //zettabyte
case YB //yottabyte
case BD //Big Data
case BBx // Extra Big Bytes
case BBxx // 2 time Extra Big Bytes
case BBxxx // 3 time Extra Big Bytes
case BBxxxx // 4 time Extra Big Bytes
case MBB // Monster Big Bytes
}
func convertStorageUnit(data n:Double,inputDataUnit unitLevel:Int,roundPoint:Int = 2,nG:Double = 1000.0)->String{
if(n>=nG){
return convertStorageUnit(data:n/1024,inputDataUnit:unitLevel+1)
}else{
let ut = unitLevel > dataUnits.allCases.count + 1 ? "Extreme Monster Data" : dataUnits.allCases.map{"\($0)"}[unitLevel]
return "\(String(format:"%.\(roundPoint)f",n)) \(ut)"
}
}
print(
convertStorageUnit(data:99922323343439789798789898989897987945454545920,
inputDataUnit:dataUnits.allCases.firstIndex(of: .B)!,roundPoint: 0)
)
output : 8.87 PB
Note: Input data length should be less than 64-bit OR Change data type According
Try this
import Darwin
...
let size = malloc_size(&_attr)

Parse a string of 4 numbers into a CGRect [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 7 years ago.
Improve this question
I have this string (325, 140, 739, 979) . I want to parse it and take the four numbers separately.
This string represents a CGRect. I want to take the first number as x the second as y the third as width and the last as height.
What is the best way to achieve this ?
Given your string, you can fetch the numbers as an array of optional Int like this:
let str = "(325, 140, 739, 979)"
let nums = split(str) { contains("(), ", $0) }.map { $0.toInt() }
That split will remove any of the characters in the string passed to contains. You now have an array of optionals, which you can check for the correct contents:
let rect: CGRect
if nums.count == 4,
let x = nums[0], y = nums[1],
w = nums[2], h = nums[3]
{
rect = CGRect(x: x, y: y, width: w, height: h)
}
else {
// report an error, or default the values if you prefer
fatalError("Malformed input string")
}
Any extraneous characters in your input string will result in nil for one of the integers, or the wrong count in the array, so this should be safe against any garbage input.
For convenience, you could put all this in a failable initializer for CGRect:
extension CGRect {
init?(string: String) {
// note, since CGRect also has a contains method, need to specify Swift.contains
let nums = split(string) { Swift.contains("(), ", $0) }.map { $0.toInt() }
if nums.count == 4,
let x = nums[0], y = nums[1],
w = nums[2], h = nums[3]
{
self = CGRect(x: x, y: y, width: w, height: h)
}
else {
return nil
}
}
}
let rectangles = [
"(325, -140, 739, 979)", // valid
"(1,2,3,asdadaf)", // invalid (non-integer)
"1,2,3,4,", // valid
"(1,2,3,4,5)", // invalid (wrong count)
]
// returns an array of 2 valid CGRect and 2 nil
let cgrects = rectangles.map { CGRect(string: $0) }
Obviously there’s lots you could tweak here if you wanted to be more or less permissive in terms of the kind of input you’re willing to convert.
Here is my solution for your case:
func parse(str : String) -> [Int] {
var firstStepStr = str.stringByTrimmingCharactersInSet(NSCharacterSet(charactersInString: "()")) //remove ( and )
var secondStepArray = firstStepStr.componentsSeparatedByCharactersInSet(NSCharacterSet(charactersInString: ", ")).filter{$0 != ""} //separated by , and ignore ""
return secondStepArray.map{$0.toInt() ?? 0} //convert to [Int], if cannot convert to Int, return 0
}
let cgrectString = "(325, 140, 739, 979)"
let intArray = parse(cgrectString)
First, we could get rid of the parentheses
let myString = "(325, 140, 739, 979)"
let myReplacementString = String(map(myString.generate()) {
$0 == "(" || $0 == ")" ? "-" : $0
})
You could also take a substring, which doesn't seem to be any nicer.
Then we could split the string into array
var myArray = myReplacementString.componentsSeparatedByString(", ")
And then use a loop to cast string to int
for item in myArray {
item.toInt()
}
or just use myArray[i].toInt() to give them straight to constructor etc.
This will get you an array of the four CGFloat values you are looking for:
func stringToCGFloatArray(string: String) -> [CGFloat] {
return string.componentsSeparatedByCharactersInSet(
NSCharacterSet(charactersInString: "(), ")).reduce([CGFloat]()) {
if let x = $1.toInt() {
return $0 + [CGFloat(x)]
} else {
return $0
}
}
}
You can then convert it to an optional tuple that can be provided directly to CGMakeRect:
func arrayToRectParameters(array: [CGFloat]) -> (CGFloat, CGFloat, CGFloat, CGFloat)? {
switch array.count {
case 4:
return (array[0], array[1], array[2], array[3])
default:
return nil
}
}
Now you can create a CGRect like this:
let str = "(325, 140, 739, 979)"
if let rectParameters = arrayToRectParameters(stringToCGFloatArray(str)) {
let myCGRect = CGRectMake(rectParameters)
println(myCGRect) // prints (325.0,140.0,739.0,979.0)
}

Resources