resize sdwebImage swift - ios

Hello I am displaying images on my app using sdwebImage. I have a code here to resize the image
func resizeImage(image: UIImage, newWidth: CGFloat) -> UIImage {
let scale = newWidth / image.size.width
let newHeight = image.size.height * scale
UIGraphicsBeginImageContext(CGSizeMake(newWidth, newHeight))
image.drawInRect(CGRectMake(0, 0, newWidth, newHeight))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
The problem is the above function accepts the UIImage as parameter and sdwebimage accepts the URL. How can I call the above resize function in sdwebimage. or in short how Can I resize the image that are presenting through sdwebImage here
cell.profileImageView.sd_setImageWithURL(UIImage().absoluteURL(profileImageUrl as! String), placeholderImage: UIImage.init(named: "default-profile-icon")?.circle!, completed: completionBlock)

do like
cell.profileImageView.sd_setImageWithURL(
NSURL(string: profileImageUrl as! String),
placeholderImage: UIImage.init(named: "default-profile-icon"),
options: nil,
progress: nil,
completed: { (image: UIImage?, error: NSError?, cacheType: SDImageCacheType!, imageURL: NSURL?) in
guard let image = image else { return }
print("Image arrived!")
cell.profileImageView.image = resizeImage(image, newWidth: 200)
}
)

SDWebImage supports image handling directly through its SDWebImageManagerDelegate protocol. You can use imageManager:transformDownloadedImage:withURL: method to transform the downloaded image.
You can set the image manager delegate like this:
SDWebImageManager.sharedManager.delegate = self;

Use shared SDWebImageManager is not good idea. Some processes can download images in this time.
My swift 3 example with custom SDWebImageManager in the class and custom SDWebImageManagerDelegate resizer.
import SDWebImage
class ImageResizer: NSObject, SDWebImageManagerDelegate {
private func resizeImage(_ image: UIImage, newHeight: CGFloat) -> UIImage {
let scale = newHeight / image.size.height
let newWidth = image.size.width * scale
UIGraphicsBeginImageContextWithOptions(CGSize(width: newWidth, height: newHeight), false, 0)
image.draw(in: CGRect(x: 0, y: 0, width: newWidth, height: newHeight))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
public func imageManager(_ imageManager: SDWebImageManager, transformDownloadedImage image: UIImage?, with imageURL: URL?) -> UIImage? {
guard let _image = image else {
return nil
}
return resizeImage(_image, newHeight: 20)
}
}
class BasicTrainView: XibView {
static let imageManager: SDWebImageManager = SDWebImageManager()
static let imageResizer = ImageResizer()
func xxx() {
BasicTrainView.imageManager.delegate = BasicTrainView.imageResizer
BasicTrainView.imageManager.loadImage(with: logoURL, options: [], progress: nil) { (image, _, error, sdImageCacheType, _, url) -> Void in
guard let _image = image else {
self.carrierLogoImageView.image = nil
return
}
self.carrierLogoImageView.image = _image
}
}
}

SDWebImage has this functionality built in. This is how to use it
let imageSize = cell.fanartImageView.bounds.size * UIScreen.main.scale
let transformer = SDImageResizingTransformer(size: imageSize, scaleMode: .fill)
cell.fanartImageView.sd_setImage(with: url, placeholderImage: image,
options: SDWebImageOptions(rawValue: 0),
context: [.imageTransformer: transformer],
progress: nil) { (image, error, cache, url) in
if error != nil {
// handle error
}
}

Related

How to get thumbnail and Original image from UIImagePickerViewcontroller?

After captured photo from camera, I was doing image compression For (400kb and 1 Mb), it look almost 3 seconds in iPhone 6 and less than a second in iPhone 6s.
Is there any way to get thumbnail and original image without doing manual compression?
Code used for image compression
Extension for UIImage
extension UIImage {
// MARK: - UIImage+Resize
func compressTo(_ expectedSizeInMb:Int) -> Data? {
let sizeInBytes = expectedSizeInMb * 1024 * 1024
var needCompress:Bool = true
var imgData:Data?
var compressingValue:CGFloat = 1.0
while (needCompress && compressingValue > 0.0) {
if let data:Data = jpegData(compressionQuality: compressingValue) {
if data.count < sizeInBytes {
needCompress = false
imgData = data
} else {
compressingValue -= 0.1
}
}
}
if let data = imgData {
if (data.count < sizeInBytes) {
return data
}
}
return nil
}
}
usage:
if let imageData = image.compressTo(1) {
print(imageData)
}
For images saved in Photos Library :
Try :
let phAsset = info[UIImagePickerController.InfoKey.phAsset] as! PHAsset
let options = PHImageRequestOptions()
options.deliveryMode = .fastFormat
options.isSynchronous = false
// you can change your target size to CGSize(width: Int , height: Int) any number you want.
PHImageManager.default().requestImage(for: phAsset, targetSize: PHImageManagerMaximumSize, contentMode: .default, options: options, resultHandler: { image , _ in
let thumbnail = image
// use your thumbnail
})
For Captured images from Camera, you can get image pixels without recalculating data count :
let image = info[UIImagePickerController.InfoKey.originalImage] as! UIImage
// pixels are the same on each device’s camera
let widthPixels = image.size.width * image.scale
let heightPixels = image.size.height * image.scale
let sizeInBytes = 1024 * 1024
var thumbnail : UIImage! = nil
if Int(widthPixels * heightPixels) > sizeInBytes {
// assign custom width and height you need
let rect = CGRect(x: 0.0, y: 0.0, width: 100, height: 100)
UIGraphicsBeginImageContextWithOptions(rect.size, false, 1)
let context = UIGraphicsGetCurrentContext()
context?.interpolationQuality = .low
image.draw(in: rect)
let resizedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
thumbnail = resizedImage
} else {
thumbnail = image
}

Is there a way to request multiple distinct resources in parallel using URLSession.shared.dataTask

I found this piece of code here on how to download images simultaneously without any breakages,
func loadImageRobsAnswer(with urlString: String?) {
// cancel prior task, if any
weak var oldTask = currentTask
currentTask = nil
oldTask?.cancel()
// reset imageview's image
self.image = nil
// allow supplying of `nil` to remove old image and then return immediately
guard let urlString = urlString else { return }
// check cache
if let cachedImage = DataCache.shared.object(forKey: urlString) {
self.transition(toImage: cachedImage as? UIImage)
//self.image = cachedImage
return
}
// download
let url = URL(string: urlString)!
currentURL = url
let task = URLSession.shared.dataTask(with: url) { [weak self] data, response, error in
self?.currentTask = nil
if let error = error {
if (error as NSError).domain == NSURLErrorDomain && (error as NSError).code == NSURLErrorCancelled {
return
}
print(error)
return
}
guard let data = data, let downloadedImage = UIImage(data: data) else {
print("unable to extract image")
return
}
DataCache.shared.saveObject(object: downloadedImage, forKey: urlString)
if url == self?.currentURL {
DispatchQueue.main.async {
self?.transition(toImage: downloadedImage)
}
}
}
// save and start new task
currentTask = task
task.resume()
}
However this code is used in a UIImageView extension,
public extension UIImageView {
private static var taskKey = 0
private static var urlKey = 0
private var currentTask: URLSessionTask? {
get { return objc_getAssociatedObject(self, &UIImageView.taskKey) as? URLSessionTask }
set { objc_setAssociatedObject(self, &UIImageView.taskKey, newValue, .OBJC_ASSOCIATION_RETAIN_NONATOMIC) }
}
private var currentURL: URL? {
get { return objc_getAssociatedObject(self, &UIImageView.urlKey) as? URL }
set { objc_setAssociatedObject(self, &UIImageView.urlKey, newValue, .OBJC_ASSOCIATION_RETAIN_NONATOMIC) }
}}}
This is how i have tried to make this code dynamic so it wont be limited to only an UIImageView but can be used to download multiple resources.
class DataRequest {
private static var taskKey = 0
private static var urlKey = 0
static let shared = DataRequest()
typealias ImageDataCompletion = (_ image: UIImage?, _ error: Error? ) -> Void
private var currentTask: URLSessionTask? {
get { return objc_getAssociatedObject(self, &DataRequest.taskKey) as? URLSessionTask }
set { objc_setAssociatedObject(self, &DataRequest.taskKey, newValue, .OBJC_ASSOCIATION_RETAIN_NONATOMIC) }
}
private var currentURL: URL? {
get { return objc_getAssociatedObject(self, &DataRequest.urlKey) as? URL }
set { objc_setAssociatedObject(self, &DataRequest.urlKey, newValue, .OBJC_ASSOCIATION_RETAIN_NONATOMIC) }
}
func downloadImage(with urlString: String?, completion: #escaping ImageDataCompletion) {
weak var oldTask = currentTask
currentTask = nil
oldTask?.cancel()
guard let urlString = urlString else { return }
if let cachedImage = DataCache.shared.object(forKey: urlString) {
DispatchQueue.main.async {
completion(cachedImage as? UIImage ,nil)
}
// self.transition(toImage: cachedImage as? UIImage)
//self.image = cachedImage
return
}
// download
let url = URL(string: urlString)!
currentURL = url
let task = URLSession.shared.dataTask(with: url) { [weak self] data, response, error in
self?.currentTask = nil
if let error = error {
if (error as NSError).domain == NSURLErrorDomain && (error as NSError).code == NSURLErrorCancelled {
return
}
completion(nil,nil)
return
}
guard let data = data, let downloadedImage = UIImage(data: data) else {
print("unable to extract image")
return
}
DataCache.shared.saveObject(object: downloadedImage, forKey: urlString)
if url == self?.currentURL {
DispatchQueue.main.async {
completion(downloadedImage ,nil)
}
}
}
// save and start new task
currentTask = task
task.resume()
}
So that i can now use it in an UIImageview extension like this
extension UIImageView {
func setImage(url: String?) {
self.image = nil
DataRequest.shared.downloadImage(with: url) { (image, error) in
DispatchQueue.main.async {
self.image = image
}
}
}
}
Concluding using my approach on a UICollectionView is displaying the wrong images into a cell and duplicating ,How do i prevent this?
You ask:
Is there a way to request multiple distinct resources in parallel using URLSession.shared.dataTask
By default, it does perform requests in parallel.
Let’s step back for a second: In your prior question, you were asking how to implement a Kingfisher-like UIImageView extension. In my answer, I mentioned using objc_getAssociatedObject and objc_setAssociatedObject to achieve that. But in your question here, you’ve taken that associated object logic and put it in your DataRequest object.
Your thought process, to pull the asynchronous image retrieval logic out of the UIImageView is a good idea: You may want to request images for buttons. You might a general “fetch image asynchronously” routine, completely separate from any UIKit objects. So abstracting the network layer code out of the extension is an excellent idea.
But the whole idea behind asynchronous image retrieval UIImageView/UIButton extensions is that we want a UIKit control where not only can it perform asynchronous requests, but that if the cell with the control is reused, that it will cancel the prior asynchronous request (if any) before starting the next one. That way, if we scroll quickly down to images 80 through 99, the requests for cells 0 through 79 will be canceled, and the visible images won’t get backlogged behind all these old image requests.
But to achieve that, that means that the control needs some way to keep track of the prior request for that reused cell somehow. And because we can’t add stored properties in a UIImageView extension, that’s why we use the objc_getAssociatedObject and objc_setAssociatedObject pattern. But that has to be in the image view.
Unfortunately, in your code above, the associated object is in your DataRequest object. First, as I’ve tried to outline, the whole idea is that the image view must keep track of the prior request for that control. Putting this “keep track of the prior request” inside the DataRequest object defeats that purpose. Second, it’s worth noting that you don’t need associated objects in your own types, like DataRequest. You’d just have a stored property. You only need to go through this associated object silliness when extending another type, such as UIImageView.
Below, is a quick example that I whipped together showing a UIImageView extension for asynchronous image retrieval. Note, this doesn’t have the abstraction of the network code out of the extension, but do note that the associated object logic to keep track of the prior request must remain with the extension.
private var taskKey: Void?
extension UIImageView {
private static let imageProcessingQueue = DispatchQueue(label: Bundle.main.bundleIdentifier! + ".imageprocessing", attributes: .concurrent)
private var savedTask: URLSessionTask? {
get { return objc_getAssociatedObject(self, &taskKey) as? URLSessionTask }
set { objc_setAssociatedObject(self, &taskKey, newValue, .OBJC_ASSOCIATION_RETAIN) }
}
/// Set image asynchronously.
///
/// - Parameters:
/// - url: `URL` for image resource.
/// - placeholder: `UIImage` of placeholder image. If not supplied, `image` will be set to `nil` while request is underway.
/// - shouldResize: Whether the image should be scaled to the size of the image view. Defaults to `true`.
func setImage(_ url: URL, placeholder: UIImage? = nil, shouldResize: Bool = true) {
savedTask?.cancel()
savedTask = nil
image = placeholder
if let image = ImageCache.shared[url] {
DispatchQueue.main.async {
UIView.transition(with: self, duration: 0.1, options: .transitionCrossDissolve, animations: {
self.image = image
}, completion: nil)
}
return
}
var task: URLSessionTask!
let size = bounds.size * UIScreen.main.scale
task = URLSession.shared.dataTask(with: url) { [weak self] data, response, error in
guard
error == nil,
let httpResponse = response as? HTTPURLResponse,
(200..<300) ~= httpResponse.statusCode,
let data = data
else {
return
}
UIImageView.imageProcessingQueue.async { [weak self] in
var image = UIImage(data: data)
if shouldResize {
image = image?.scaledAspectFit(to: size)
}
ImageCache.shared[url] = image
DispatchQueue.main.async {
guard
let self = self,
let savedTask = self.savedTask,
savedTask.taskIdentifier == task.taskIdentifier
else {
return
}
self.savedTask = nil
UIView.transition(with: self, duration: 0.1, options: .transitionCrossDissolve, animations: {
self.image = image
}, completion: nil)
}
}
}
task.resume()
savedTask = task
}
}
class ImageCache {
static let shared = ImageCache()
private let cache = NSCache<NSURL, UIImage>()
private var observer: NSObjectProtocol?
init() {
observer = NotificationCenter.default.addObserver(forName: UIApplication.didReceiveMemoryWarningNotification, object: nil, queue: nil) { [weak self] _ in
self?.cache.removeAllObjects()
}
}
deinit {
NotificationCenter.default.removeObserver(observer!)
}
subscript(url: URL) -> UIImage? {
get {
return cache.object(forKey: url as NSURL)
}
set {
if let data = newValue {
cache.setObject(data, forKey: url as NSURL)
} else {
cache.removeObject(forKey: url as NSURL)
}
}
}
}
And this is my resizing routine:
extension UIImage {
/// Resize the image to be the required size, stretching it as needed.
///
/// - parameter newSize: The new size of the image.
/// - parameter contentMode: The `UIView.ContentMode` to be applied when resizing image.
/// Either `.scaleToFill`, `.scaleAspectFill`, or `.scaleAspectFit`.
///
/// - returns: Return `UIImage` of resized image.
func scaled(to newSize: CGSize, contentMode: UIView.ContentMode = .scaleToFill) -> UIImage? {
switch contentMode {
case .scaleToFill:
return filled(to: newSize)
case .scaleAspectFill, .scaleAspectFit:
let horizontalRatio = size.width / newSize.width
let verticalRatio = size.height / newSize.height
let ratio: CGFloat!
if contentMode == .scaleAspectFill {
ratio = min(horizontalRatio, verticalRatio)
} else {
ratio = max(horizontalRatio, verticalRatio)
}
let sizeForAspectScale = CGSize(width: size.width / ratio, height: size.height / ratio)
let image = filled(to: sizeForAspectScale)
let doesAspectFitNeedCropping = contentMode == .scaleAspectFit && (newSize.width > sizeForAspectScale.width || newSize.height > sizeForAspectScale.height)
if contentMode == .scaleAspectFill || doesAspectFitNeedCropping {
let subRect = CGRect(
x: floor((sizeForAspectScale.width - newSize.width) / 2.0),
y: floor((sizeForAspectScale.height - newSize.height) / 2.0),
width: newSize.width,
height: newSize.height)
return image?.cropped(to: subRect)
}
return image
default:
return nil
}
}
/// Resize the image to be the required size, stretching it as needed.
///
/// - parameter newSize: The new size of the image.
///
/// - returns: Resized `UIImage` of resized image.
func filled(to newSize: CGSize) -> UIImage? {
let format = UIGraphicsImageRendererFormat()
format.opaque = false
format.scale = scale
return UIGraphicsImageRenderer(size: newSize, format: format).image { _ in
draw(in: CGRect(origin: .zero, size: newSize))
}
}
/// Crop the image to be the required size.
///
/// - parameter bounds: The bounds to which the new image should be cropped.
///
/// - returns: Cropped `UIImage`.
func cropped(to bounds: CGRect) -> UIImage? {
// if bounds is entirely within image, do simple CGImage `cropping` ...
if CGRect(origin: .zero, size: size).contains(bounds) {
return cgImage?.cropping(to: bounds * scale).flatMap {
UIImage(cgImage: $0, scale: scale, orientation: imageOrientation)
}
}
// ... otherwise, manually render whole image, only drawing what we need
let format = UIGraphicsImageRendererFormat()
format.opaque = false
format.scale = scale
return UIGraphicsImageRenderer(size: bounds.size, format: format).image { _ in
let origin = CGPoint(x: -bounds.minX, y: -bounds.minY)
draw(in: CGRect(origin: origin, size: size))
}
}
/// Resize the image to fill the rectange of the specified size, preserving the aspect ratio, trimming if needed.
///
/// - parameter newSize: The new size of the image.
///
/// - returns: Return `UIImage` of resized image.
func scaledAspectFill(to newSize: CGSize) -> UIImage? {
return scaled(to: newSize, contentMode: .scaleAspectFill)
}
/// Resize the image to fit within the required size, preserving the aspect ratio, with no trimming taking place.
///
/// - parameter newSize: The new size of the image.
///
/// - returns: Return `UIImage` of resized image.
func scaledAspectFit(to newSize: CGSize) -> UIImage? {
return scaled(to: newSize, contentMode: .scaleAspectFit)
}
/// Create smaller image from `Data`
///
/// - Parameters:
/// - data: The image `Data`.
/// - maxSize: The maximum edge size.
/// - scale: The scale of the image (defaults to device scale if 0 or omitted.
/// - Returns: The scaled `UIImage`.
class func thumbnail(from data: Data, maxSize: CGFloat, scale: CGFloat = 0) -> UIImage? {
guard let imageSource = CGImageSourceCreateWithData(data as CFData, nil) else {
return nil
}
return thumbnail(from: imageSource, maxSize: maxSize, scale: scale)
}
/// Create smaller image from `URL`
///
/// - Parameters:
/// - data: The image file URL.
/// - maxSize: The maximum edge size.
/// - scale: The scale of the image (defaults to device scale if 0 or omitted.
/// - Returns: The scaled `UIImage`.
class func thumbnail(from fileURL: URL, maxSize: CGFloat, scale: CGFloat = 0) -> UIImage? {
guard let imageSource = CGImageSourceCreateWithURL(fileURL as CFURL, nil) else {
return nil
}
return thumbnail(from: imageSource, maxSize: maxSize, scale: scale)
}
private class func thumbnail(from imageSource: CGImageSource, maxSize: CGFloat, scale: CGFloat) -> UIImage? {
let scale = scale == 0 ? UIScreen.main.scale : scale
let options: [NSString: Any] = [
kCGImageSourceThumbnailMaxPixelSize: maxSize * scale,
kCGImageSourceCreateThumbnailFromImageAlways: true
]
if let scaledImage = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, options as CFDictionary) {
return UIImage(cgImage: scaledImage, scale: scale, orientation: .up)
}
return nil
}
}
extension CGSize {
static func * (lhs: CGSize, rhs: CGFloat) -> CGSize {
return CGSize(width: lhs.width * rhs, height: lhs.height * rhs)
}
}
extension CGPoint {
static func * (lhs: CGPoint, rhs: CGFloat) -> CGPoint {
return CGPoint(x: lhs.x * rhs, y: lhs.y * rhs)
}
}
extension CGRect {
static func * (lhs: CGRect, rhs: CGFloat) -> CGRect {
return CGRect(origin: lhs.origin * rhs, size: lhs.size * rhs)
}
}
That having been said, we really should constrain our concurrent requests to something reasonable (4-6 at a time) so that they don’t try to start until the prior requests are done (or are canceled) to avoid timeouts. The typical solution is wrapping the requests with asynchronous Operation subclasses, add them to an operation queue, and constrain the maxConcurrentOperationCount to whatever value you choose.

How do I create a croppable banner image in uiimagepickercontroller?

When I present a UIImagePickerController, I want the user to be able to crop a 7.8 ratio for the banner that the user wants to import from their photo library.
This question is similar to many questions relating to creating custom crop rects built to work with UIImagePickerController but all the answers point to outdated libraries or libraries with too much complexity. I want something simple.
Try this:
open func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
var newImage: UIImage
if picker.sourceType == .camera {
if let possibleImage = info["UIImagePickerControllerOriginalImage"] as? UIImage {
// save photo
}
}
if UI_USER_INTERFACE_IDIOM() == UIUserInterfaceIdiom.pad && picker.sourceType == .photoLibrary {
if let possibleImage = info["UIImagePickerControllerOriginalImage"] as? UIImage {
newImage = possibleImage
}
} else {
if let possibleImage = info["UIImagePickerControllerEditedImage"] as? UIImage {
newImage = possibleImage
} else if let possibleImage = info["UIImagePickerControllerOriginalImage"] as? UIImage {
newImage = possibleImage
}
}
// logic to manipulate image
print("image size: \(newImage.size)")
let newHeight = 100 // change to preferred height
let scale = 7.8
let newWidth = newImage.size.width
UIGraphicsBeginImageContext(CGSize(width: newWidth, height: newHeight))
newImage.draw(in: CGRect(x: 0, y: 0,width: newWidth, height: newHeight))
newImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
print("image size: \(newImage.size)")
// add logic here
}

Resize UIImage before uploading to Firebase storage in swift 3

I have set up my application so that when I press the button "cambiaimmagineutente" a picker controller appears and I can choose the image which I then upload to FIRStorage using the "UIImagePickerControllerReferenceURL". I cannot find a way to resize the image before uploading it to save space and to place it in a smaller image view.
Here is the code:
#IBAction func cambiaImmagineUtente(_ sender: UIButton) {
imagePicker.allowsEditing = false
imagePicker.sourceType = .photoLibrary
present(imagePicker, animated: true, completion:nil)
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
picker.dismiss(animated: true, completion:nil)
// if it's a photo from the library, not an image from the camera
if #available(iOS 8.0, *), let referenceUrl = info[UIImagePickerControllerReferenceURL] as? URL {
let assets = PHAsset.fetchAssets(withALAssetURLs: [referenceUrl], options: nil)
let asset = assets.firstObject
asset?.requestContentEditingInput(with: nil, completionHandler: { (contentEditingInput, info) in
let imageFile = contentEditingInput?.fullSizeImageURL
let filePath = FIRAuth.auth()!.currentUser!.uid +
"/\(Int(Date.timeIntervalSinceReferenceDate * 1000))/\(imageFile!.lastPathComponent)"
// [START uploadimage]
self.storageRef.child(filePath)
.putFile(imageFile!, metadata: nil) { (metadata, error) in
if let error = error {
//an error occured
print("Error uploading: \(error)")
return
}
self.uploadSuccess(metadata!, storagePath: filePath)
}
// [END uploadimage]
})
} else {
guard let image = info[UIImagePickerControllerOriginalImage] as? UIImage else { return }
guard let imageData = UIImageJPEGRepresentation(image, 0.8) else { return }
let imagePath = FIRAuth.auth()!.currentUser!.uid +
"/\(Int(Date.timeIntervalSinceReferenceDate * 1000)).jpg"
let metadata = FIRStorageMetadata()
metadata.contentType = "image/jpeg"
self.storageRef.child(imagePath)
.put(imageData, metadata: metadata) { (metadata, error) in
if let error = error {
//an error occured
print("Error uploading: \(error)")
return
}
self.uploadSuccess(metadata!, storagePath: imagePath)
}
}
}
func uploadSuccess(_ metadata: FIRStorageMetadata, storagePath: String) {
print("Upload Succeeded!")
//self.urlTextView.text = metadata.downloadURL()?.absoluteString
UserDefaults.standard.set(storagePath, forKey: "storagePath")
UserDefaults.standard.synchronize()
//self.downloadPicButton.isEnabled = true
}
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
picker.dismiss(animated: true, completion:nil)
}
You can use this:
func resizeImage(image: UIImage, targetSize: CGSize) -> UIImage {
let size = image.size
let widthRatio = targetSize.width / image.size.width
let heightRatio = targetSize.height / image.size.height
var newSize: CGSize
if(widthRatio > heightRatio) {
newSize = CGSize(width: size.width * heightRatio, height: size.height * heightRatio)
} else {
newSize = CGSize(width: size.width * widthRatio, height: size.height * widthRatio)
}
let rect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height)
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.draw(in: rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
Use:
let resizedImage = resizeImage(image: selectedImage, targetSize: CGSize.init(width: 300, height: 300))
make sure you also make a write rule to a max value in your storage rules!

How to Circle the image

Hello I am using SDWebImage in my app. This is my code to make the image in circle
extension UIImage {
var circle: UIImage? {
let square = CGSize(width: min(size.width, size.height), height: min(size.width, size.height))
let imageView = UIImageView(frame: CGRect(origin: CGPoint(x: 0, y: 0), size: square))
imageView.contentMode = .ScaleAspectFill
imageView.image = self
imageView.layer.cornerRadius = square.width/2
imageView.layer.masksToBounds = true
UIGraphicsBeginImageContext(imageView.bounds.size)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
imageView.layer.renderInContext(context)
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return result
}
}
copied it from here
I used to do images in cicle like this
let profilePicture = UIImage(data: NSData(contentsOfURL: NSURL(string:"http://i.stack.imgur.com/Xs4RX.jpg")!)!)!
profilePicture.circle
But Now As I am using SDWebImage its not working
cell.profileImageView.sd_setImageWithURL(UIImage().absoluteURL(profileImageUrl), placeholderImage: UIImage.init(named: "default-profile-icon")?.circle!)
Please let me know how can I make this extension work for SDWebImage
You can use the SDWebImageManager to download the image or take it from the cache and apply the circle in the completion block like this:
SDWebImageManager.sharedManager().downloadWithURL(NSURL(string:"img"), options: [], progress: nil) { (image:UIImage!, error:NSError!, cacheType:SDImageCacheType, finished:Bool) -> Void in
if (image != nil){
let circleImage = image.circle
cell.profileImageView.image = circleImage
}
}
Or you can use the version of the sd_setImageWithURL method that takes a completion block as a parameter
let completionBlock: SDWebImageCompletionBlock! = {(image: UIImage!, error: NSError!, cacheType: SDImageCacheType!, imageURL: NSURL!) -> Void in
if (image != nil){
let circleImage = image.circle
cell.profileImageView.image = circleImage
}
}
cell.profileImageView.sd_setImageWithURL(UIImage().absoluteURL(profileImageUrl), placeholderImage: UIImage.init(named: "default-profile-icon")?.circle!, completed: completionBlock)

Resources