UIImageJpgRepresentation doubles image resolution - ios

I am trying to save an image coming from the iPhone camera to a file. I use the following code:
try UIImageJPEGRepresentation(toWrite, 0.8)?.write(to: tempURL, options: NSData.WritingOptions.atomicWrite)
This results in a file double the resolution of the toWrite UIImage. I confirmed in the watch expressions that creating a new UIImage from UIImageJPEGRepresentation doubles its resolution
-> toWrite.size CGSize (width = 3264, height = 2448)
-> UIImage(data: UIImageJPEGRepresentation(toWrite, 0.8)).size CGSize? (width = 6528, height = 4896)
Any idea why this would happen, and how to avoid it?
Thanks

Your initial image has scale factor = 2, but when you init your image from data you will get image with scale factor = 1. Your way to solve it is to control the scale and init the image with scale property:
#available(iOS 6.0, *)
public init?(data: Data, scale: CGFloat)
Playground code that represents the way you can set scale
extension UIImage {
class func with(color: UIColor, size: CGSize) -> UIImage? {
let rect = CGRect(origin: .zero, size: size)
UIGraphicsBeginImageContextWithOptions(size, true, 2.0)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
context.setFillColor(color.cgColor)
context.fill(rect)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
}
let image = UIImage.with(color: UIColor.orange, size: CGSize(width: 100, height: 100))
if let image = image {
let scale = image.scale
if let data = UIImageJPEGRepresentation(image, 0.8) {
if let newImage = UIImage(data: data, scale: scale) {
debugPrint(newImage?.size)
}
}
}

Related

Cropping AVCapturePhoto to overlay rectangle displayed on screen

I am trying to take a picture of a thin piece of metal, cropped to the outline displayed on the screen. I have seen almost every other post on here, but nothing has got it for me yet. This image will then be used for analysis by a library. I can get some cropping to happen, but never to the rectangle displayed. I have tried rotating the image before cropping, and calculating the rect based on the rectangle on screen.
Here is my capture code. PreviewView is the container, videoLayer is for the AVCapture video.
// Photo capture delegate
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imgData = photo.fileDataRepresentation(), let uiImg = UIImage(data: imgData), let cgImg = uiImg.cgImage else {
return
}
print("Original image size: ", uiImg.size, "\nCGHeight: ", cgImg.height, " width: ", cgImg.width)
print("Orientation: ", uiImg.imageOrientation.rawValue)
guard let img = cropImage(image: uiImg) else {
return
}
showImage(image: img)
}
func cropImage(image: UIImage) -> UIImage? {
print("Image size before crop: ", image.size)
//Get the croppedRect from function below
let croppedRect = calculateRect(image: image)
guard let imgRet = image.cgImage?.cropping(to: croppedRect) else {
return nil
}
return UIImage(cgImage: imgRet)
}
func calculateRect(image: UIImage) -> CGRect {
let originalSize: CGSize
let visibleLayerFrame = self.rectangleView.bounds
// Calculate the rect from the rectangleview to translate to the image
let metaRect = (self.videoLayer.metadataOutputRectConverted(fromLayerRect: visibleLayerFrame))
print("MetaRect: ", metaRect)
// check orientation
if (image.imageOrientation == UIImage.Orientation.left || image.imageOrientation == UIImage.Orientation.right) {
originalSize = CGSize(width: image.size.height, height: image.size.width)
} else {
originalSize = image.size
}
let cropRect: CGRect = CGRect(x: metaRect.origin.x * originalSize.width, y: metaRect.origin.y * originalSize.height, width: metaRect.size.width * originalSize.width, height: metaRect.size.height * originalSize.height).integral
print("Calculated Rect: ", cropRect)
return cropRect
}
func showImage(image: UIImage) {
if takenImage != nil {
takenImage = nil
}
takenImage = UIImageView(image: image)
takenImage.frame = CGRect(x: 10, y: 50, width: 400, height: 1080)
takenImage.contentMode = .scaleAspectFit
print("Cropped Image Size: ", image.size)
self.previewView.addSubview(takenImage)
}
And here is along the line of what I keep getting.
What am I screwing up?
I managed to solve the issue for my use case.
private func cropToPreviewLayer(from originalImage: UIImage, toSizeOf rect: CGRect) -> UIImage? {
guard let cgImage = originalImage.cgImage else { return nil }
// This previewLayer is the AVCaptureVideoPreviewLayer which the resizeAspectFill and videoOrientation portrait has been set.
let outputRect = previewLayer.metadataOutputRectConverted(fromLayerRect: rect)
let width = CGFloat(cgImage.width)
let height = CGFloat(cgImage.height)
let cropRect = CGRect(x: (outputRect.origin.x * width), y: (outputRect.origin.y * height), width: (outputRect.size.width * width), height: (outputRect.size.height * height))
if let croppedCGImage = cgImage.cropping(to: cropRect) {
return UIImage(cgImage: croppedCGImage, scale: 1.0, orientation: originalImage.imageOrientation)
}
return nil
}
usage of the piece of code for my case:
let rect = CGRect(x: 25, y: 150, width: 325, height: 230)
let croppedImage = self.cropToPreviewLayer(from: image, toSizeOf: rect)
self.imageView.image = croppedImage
The world of UIKit has the TOP LEFT corner as 0,0.
The 0,0 point in the AVFoundation world is the BOTTOM LEFT corner.
So you have to translate by rotating 90 degrees.
That's why your image is bonkers.
Also remember that because of the origin translation the following rules apply:
X is actually up and down
Y is actually left and right
width and height are swapped
Also be aware that the UIImageView content mode setting WILL impact how your image scales. You might want to use .scaleAspectFill and NOT AspectFit if you really want to see how your image looks in the UIView.
I used this code snippet to see what was behind the curtain:
// figure out how to cut/crop this
let realImageRect = AVMakeRect(aspectRatio: image.size, insideRect: (self.cameraPreview?.frame)!)
NSLog("real image rectangle = \(realImageRect.debugDescription)")
The 'cameraPreview' reference above is the control you're using for your AV Capture Session.
Good luck!

Memory leak when resizing UIImage

I've read through multiple threads concerning the topic but my problem still persists.
When I'm resizing an Image with following code:
extension UIImage {
func thumbnailWithMaxSize(image:UIImage, maxSize: CGFloat) -> UIImage {
let width = image.size.width
let height = image.size.height
var sizeX: CGFloat = 0
var sizeY: CGFloat = 0
if width > height {
sizeX = maxSize
sizeY = maxSize * height/width
}
else {
sizeY = maxSize
sizeX = maxSize * width/height
}
UIGraphicsBeginImageContext(CGSize(width: sizeX, height: sizeY))
let rect = CGRect(x: 0.0, y: 0.0, width: sizeX, height: sizeY)
UIGraphicsBeginImageContext(rect.size)
draw(in: rect)
let thumbnail = UIGraphicsGetImageFromCurrentImageContext()!;
UIGraphicsEndImageContext()
return thumbnail
}
override func viewDidLoad() {
super.viewDidLoad()
let lionImage = UIImage(named: "lion.jpg")!
var thumb = UIImage()
autoreleasepool {
thumb = lionImage.thumbnailWithMaxSize(image: lionImage, maxSize: 2000)
}
myImageView.image = thumb
}
...the memory is not released. So when I navigate through multiple ViewControllers (e.g. with a PageViewController) I end up getting memory warnings and the app eventually crashes.
I also tried to load the image via UIImage(contentsOfFile: path) without success.
Any suggestions?
I noticed your code beginning two contexts but only ending one.
Here's my extension, which is basically the same as your's. Since I'm not having memory issues, it looks like that may be the issue.
extension UIImage {
public func resizeToRect(_ size : CGSize) -> UIImage {
UIGraphicsBeginImageContext(size)
self.draw(in: CGRect(x: 0, y: 0, width: size.width, height: size.height))
let resizedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext();
return resizedImage!
}
}
The problem is this:
UIGraphicsGetImageFromCurrentImageContext()
returns an autoreleased UIImage. The autorelease pool holds on to this image until your code returns control to the runloop, which you do not do for a long time. To solve this problem, make thumb = nil after using it.
var thumb = UIImage()
autoreleasepool {
thumb = lionImage.thumbnailWithMaxSize(image: lionImage, maxSize: 2000)
let myImage:UIImage = UIImage(UIImagePNGRepresentation(thumb));
thumb = nil
}
myImageView.image = myImage

swift ios reduce image size before upload [duplicate]

This question already has answers here:
How do I resize the UIImage to reduce upload image size
(21 answers)
Closed 5 years ago.
I am trying to reduce the file size of my images as much as possible before I upload them. Right now I am doing this:
if let firstImageData = UIImageJPEGRepresentation(pickedImage, 0.1) {
self.imgArray.append(firstImageData)
}
This will take any image coming from the camera or photo album, make it jpg and reduce its size.
I have set the setting to 0.1 but when I upload my images the size still end up around 300-350kb, is there any way I could resize them even more, I am aiming towards 50-70kb
you can resize your image first to smaller size using these extension by resizing it by percent or width
extension UIImage {
func resizeWithPercent(percentage: CGFloat) -> UIImage? {
let imageView = UIImageView(frame: CGRect(origin: .zero, size: CGSize(width: size.width * percentage, height: size.height * percentage)))
imageView.contentMode = .ScaleAspectFit
imageView.image = self
UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
imageView.layer.renderInContext(context)
guard let result = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
UIGraphicsEndImageContext()
return result
}
func resizeWithWidth(width: CGFloat) -> UIImage? {
let imageView = UIImageView(frame: CGRect(origin: .zero, size: CGSize(width: width, height: CGFloat(ceil(width/size.width * size.height)))))
imageView.contentMode = .ScaleAspectFit
imageView.image = self
UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
imageView.layer.renderInContext(context)
guard let result = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
UIGraphicsEndImageContext()
return result
}
}
to use it just call it like this
myImage = myImage.resizeWithWidth(700)!
Now next you can still compress it using compression ratio of your choice
let compressData = UIImageJPEGRepresentation(myImage, 0.5) //max value is 1.0 and minimum is 0.0
let compressedImage = UIImage(data: compressData!)
You only can change size of image (less size = less data) and compress it by using image compression algorithms like JPEG, there is no other way (better algorythm = less size in same quality).
I heard google improved JPEG algorithm using neural networks lately (Google’s TensorFlow)

How to position image view on top of a photo correctly using Swift

Currently I am able to save the photo with an image on top, however the top image is not located at the coordinate it is when appearing on the screen in a preview view.
fileprivate func mergeUIViews( ) -> UIImage?
{
let bottomImage = photo
let topImage = uiViewInstance.image
let bottomImageHeight = bottomImage.size.height
let bottomImageWidth = bottomImage.size.width
let topImageHeight = uiViewInstance.frame.height
let topImageWidth = uiViewInstance.frame.width
let topImageOrigin = uiViewInstance.frame.origin
let bottomImageSize = CGSize(width: bottomImageWidth, height: bottomImageHeight)
let topImageSize = CGSize(width: topImageWidth, height: topImageHeight)
// Merge images
UIGraphicsBeginImageContextWithOptions(bottomImageSize, false, 0.0)
bottomImage.draw(in: CGRect(origin: CGPoint.zero, size: bottomImageSize))
topImage .draw(in: CGRect(origin: topImageOrigin, size: topImageSize)) // Where I believe the problem exists
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
What works instead of using the actual captured photo's size, is setting the photo size to the view of the view controllers size. This resulted in the desired output of the entire screen showing the merged photos and the proper point for the origin of the overlaid image.
fileprivate func mergeUIViews( ) -> UIImage?
{
let bottomImage = photo
let topImage = uiViewInstance.image
let bottomImageSize = self.view.frame.size
let topImageHeight = uiViewInstance.frame.height
let topImageWidth = uiViewInstance.frame.width
let topImageOrigin = uiViewInstance.frame.origin
let topImageSize = uiViewInstance.frame.size
// Merge images
UIGraphicsBeginImageContextWithOptions(bottomImageSize, false, 0.0)
bottomImage.draw(in: CGRect(origin: CGPoint.zero, size: bottomImageSize))
topImage .draw(in: CGRect(origin: topImageOrigin, size: topImageSize)) // Where I believe the problem exists
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}

Photo resizing and compressing

I am resizing and compressing my photos an unusual result.
When I choose the image from photo album, the image compresses and resizes fine. However, If I do it on a image that was passed from the camera, the image becomes oddly small (And unwatchable). What I have done as a test is assign some compression and resizing function in my button that takes an image either from a camera source or photo album. Below are my code and console output
#IBAction func testBtnPressed(sender: AnyObject) {
let img = selectedImageView.image!
print("before resize image \(img.dataLengh_kb)kb size \(img.size)")
let resizedImg = img.resizeWithWidth(1080)
print("1080 After resize image \(resizedImg!.dataLengh_kb)kb size \(resizedImg!.size)")
let compressedImageData = resizedImg!.mediumQualityJPEGNSData
print("Compress to medium quality = \(compressedImageData.length / 1024)kb")
}
extension UIImage {
var mediumQualityJPEGNSData: NSData { return UIImageJPEGRepresentation(self, 0.5)! }
func resizeWithWidth(width: CGFloat) -> UIImage? {
let imageView = UIImageView(frame: CGRect(origin: .zero, size: CGSize(width: width, height: CGFloat(ceil(width/size.width * size.height)))))
imageView.contentMode = .ScaleAspectFit
imageView.image = self
UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
imageView.layer.renderInContext(context)
guard let result = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
UIGraphicsEndImageContext()
return result
}
}
When photo was selected from photo album
before resize image 5004kb size (3024.0, 3024.0)
1080 After resize image 1023kb size (1080.0, 1080.0)
Compress to medium quality = 119kb
When photo was passed by camera
before resize image 4653kb size (24385.536, 24385.536)
1080 After resize image 25kb size (1080.576, 1080.576)
Compress to medium quality = 4kb
I have replaced the image resizing function with the following one and it worked a lot better
func resizeImage(newHeight: CGFloat) -> UIImage {
let scale = newHeight / self.size.height
let newWidth = self.size.width * scale
UIGraphicsBeginImageContext(CGSizeMake(newWidth, newHeight))
self.drawInRect(CGRectMake(0, 0, newWidth, newHeight))
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}

Resources