Slow image uploading on s3 via iOS amazon sdk - ios

Uploading images on multiple threads on s3 via amazon SDK takes a hell lot of time. Let me know if you are facing the same problem and found any solution.

Without your code, it is difficult to answer this. But as you said that it takes a lot of time to upload, I think your issue is with the size of the image. When you pick an image from an iPhone, the image quality is high. Its high resolution and file size is the reason for slow uploading. So before uploading to AWS bucket, compress the image and reduce the resolution as per your requirement.
When you pick the image,
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
let fileManager = FileManager.default
let documentsPath = fileManager.urls(for: .documentDirectory, in: .userDomainMask).first
let imagePath = documentsPath?.appendingPathComponent("image.jpg")
// extract image from the picker and save it
if let pickedImage = self.scale(image: (info[UIImagePickerController.InfoKey.originalImage] as? UIImage)!, toLessThan: 320) {
let imageData = pickedImage.jpegData(compressionQuality: 0.5)
try! imageData!.write(to: imagePath!)
DispatchQueue.main.async {
self.imgProfileImageView.image = pickedImage
}
}
picker.dismiss(animated: true) {
}
}
You can reduce the resolution by using this function
private func scale(image originalImage: UIImage, toLessThan maxResolution: CGFloat) -> UIImage? {
guard let imageReference = originalImage.cgImage else { return nil }
let rotate90 = CGFloat.pi/2.0 // Radians
let rotate180 = CGFloat.pi // Radians
let rotate270 = 3.0*CGFloat.pi/2.0 // Radians
let originalWidth = CGFloat(imageReference.width)
let originalHeight = CGFloat(imageReference.height)
let originalOrientation = originalImage.imageOrientation
var newWidth = originalWidth
var newHeight = originalHeight
if originalWidth > maxResolution || originalHeight > maxResolution {
let aspectRatio: CGFloat = originalWidth / originalHeight
newWidth = aspectRatio > 1 ? maxResolution : maxResolution * aspectRatio
newHeight = aspectRatio > 1 ? maxResolution / aspectRatio : maxResolution
}
let scaleRatio: CGFloat = newWidth / originalWidth
var scale: CGAffineTransform = .init(scaleX: scaleRatio, y: -scaleRatio)
scale = scale.translatedBy(x: 0.0, y: -originalHeight)
var rotateAndMirror: CGAffineTransform
switch originalOrientation {
case .up:
rotateAndMirror = .identity
case .upMirrored:
rotateAndMirror = .init(translationX: originalWidth, y: 0.0)
rotateAndMirror = rotateAndMirror.scaledBy(x: -1.0, y: 1.0)
case .down:
rotateAndMirror = .init(translationX: originalWidth, y: originalHeight)
rotateAndMirror = rotateAndMirror.rotated(by: rotate180 )
case .downMirrored:
rotateAndMirror = .init(translationX: 0.0, y: originalHeight)
rotateAndMirror = rotateAndMirror.scaledBy(x: 1.0, y: -1.0)
case .left:
(newWidth, newHeight) = (newHeight, newWidth)
rotateAndMirror = .init(translationX: 0.0, y: originalWidth)
rotateAndMirror = rotateAndMirror.rotated(by: rotate270)
scale = .init(scaleX: -scaleRatio, y: scaleRatio)
scale = scale.translatedBy(x: -originalHeight, y: 0.0)
case .leftMirrored:
(newWidth, newHeight) = (newHeight, newWidth)
rotateAndMirror = .init(translationX: originalHeight, y: originalWidth)
rotateAndMirror = rotateAndMirror.scaledBy(x: -1.0, y: 1.0)
rotateAndMirror = rotateAndMirror.rotated(by: rotate270)
case .right:
(newWidth, newHeight) = (newHeight, newWidth)
rotateAndMirror = .init(translationX: originalHeight, y: 0.0)
rotateAndMirror = rotateAndMirror.rotated(by: rotate90)
scale = .init(scaleX: -scaleRatio, y: scaleRatio)
scale = scale.translatedBy(x: -originalHeight, y: 0.0)
case .rightMirrored:
(newWidth, newHeight) = (newHeight, newWidth)
rotateAndMirror = .init(scaleX: -1.0, y: 1.0)
rotateAndMirror = rotateAndMirror.rotated(by: CGFloat.pi/2.0)
}
UIGraphicsBeginImageContext(CGSize(width: newWidth, height: newHeight))
guard let context = UIGraphicsGetCurrentContext() else { return nil }
context.concatenate(scale)
context.concatenate(rotateAndMirror)
context.draw(imageReference, in: CGRect(x: 0, y: 0, width: originalWidth, height: originalHeight))
let copy = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return copy
}

Related

How to set outline as per image shape?

I am stuck with this point. I want an outline as per image and I want output as per this video
I tried this code but it was not working smooth.
extension CGPoint {
/**
Rotates the point from the center `origin` by `byDegrees` degrees along the Z axis.
- Parameters:
- origin: The center of he rotation;
- byDegrees: Amount of degrees to rotate around the Z axis.
- Returns: The rotated point.
*/
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = x - origin.x
let dy = y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + byDegrees * .pi / 180.0 // to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
public extension UIImage {
/**
Returns the flat colorized version of the image, or self when something was wrong
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- Returns: the flat colorized version of the image, or the self if something was wrong
*/
func colorized(with color: UIColor = .white) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer {
UIGraphicsEndImageContext()
}
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
color.setFill()
context.translateBy(x: 0, y: size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.clip(to: rect, mask: cgImage)
context.fill(rect)
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return colored
}
/**
Returns the stroked version of the fransparent image with the given stroke color and the thickness.
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- thickness: the thickness of the border. Default to `2`
- quality: The number of degrees (out of 360): the smaller the best, but the slower. Defaults to `10`.
- Returns: the stroked version of the image, or self if something was wrong
*/
func stroked(with color: UIColor = .white, thickness: CGFloat = 2, quality: CGFloat = 10) -> UIImage {
guard let cgImage = cgImage else { return self }
// Colorize the stroke image to reflect border color
let strokeImage = colorized(with: color)
guard let strokeCGImage = strokeImage.cgImage else { return self }
/// Rendering quality of the stroke
let step = quality == 0 ? 10 : abs(quality)
let oldRect = CGRect(x: thickness, y: thickness, width: size.width, height: size.height).integral
let newSize = CGSize(width: size.width + 2 * thickness, height: size.height + 2 * thickness)
let translationVector = CGPoint(x: thickness, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return self }
defer {
UIGraphicsEndImageContext()
}
context.translateBy(x: 0, y: newSize.height)
context.scaleBy(x: 1.0, y: -1.0)
context.interpolationQuality = .high
for angle: CGFloat in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: angle)
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeCGImage, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(cgImage, in: oldRect)
guard let stroked = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return stroked
}
}
try this one
PLS do NOT add constraints to imageView except for top/left in Storyboard.
//
// ViewController.swift
// AddBorderANDZoom
//
// Created by ing.conti on 06/01/22.
//
import UIKit
import CoreGraphics
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var slider: UISlider!
let maZoom = 20.0
override func viewDidLoad() {
super.viewDidLoad()
self.slider.value = 1
self.imageView.image = UIImage(named: "apple")?.outline(borderSize: self.maZoom)
}
#IBAction func didSlide(_ sender: UISlider) {
let value = CGFloat(sender.value * 20)
self.imageView.image = UIImage(named: "apple")?.outline(borderSize: value)
}
}
/////
extension UIImage {
func outline(borderSize: CGFloat = 6.0) -> UIImage? {
let color = UIColor.black
let W = self.size.width
let H = self.size.height
let scale = 1 + (borderSize/W)
let outlinedImageRect = CGRect(x: -borderSize/2,
y: -borderSize/2,
width: W * scale,
height: H * scale)
let imageRect = CGRect(x: 0,
y: 0,
width: W,
height: H)
UIGraphicsBeginImageContextWithOptions(outlinedImageRect.size, false, scale)
self.draw(in: outlinedImageRect)
let context = UIGraphicsGetCurrentContext()!
context.setBlendMode(.sourceIn)
context.setFillColor(color.cgColor)
context.fill(outlinedImageRect)
self.draw(in: imageRect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}

Drawing on an image & implementing AspectFill for CGRect. Can it be done?

The following image is a screen grab from an iPad 9.7 simulator, showing the circles overlayed on an underlying live preview. (The live camera view is not there because it's on the Simulator)
On any sort of iPhone the circles clip off at the screen edges, but on the iPad you can see the clipping. I'm sure this is an AspectFit or AspectFill problem but I can't find a solution. Somehow, the code isn't seeing the correct iPad screen size?
This is the code that does it. Firstly the call, then the functions. It all works fine, except for that clipping...
//=======================================
// draw the circles on the image then return
imgOverlay.image = self.drawCirclesOnImage(fromImage: nil, targetSize: imgOverlay.bounds.size)
and now we have the two functions that draw the circles.
func getImageWithColor(color: UIColor, size: CGSize) -> UIImage {
let rect = CGRect(origin: CGPoint(x: 0, y: 0), size: CGSize(width: screenWidth, height: screenHeight))
UIGraphicsBeginImageContextWithOptions(size, false, 0)
color.setFill()
UIRectFill(rect)
let image: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
func drawCirclesOnImage(fromImage: UIImage? = nil, targetSize: CGSize) -> UIImage? {
if fromImage == nil && targetSize == CGSize.zero {
return nil
}
var tmpimg: UIImage?
if targetSize == CGSize.zero {
tmpimg = fromImage
} else {
tmpimg = getImageWithColor(color: UIColor.clear, size: targetSize)
}
guard let img = tmpimg else {
return nil
}
let imageSize = img.size
let scale: CGFloat = 0
UIGraphicsBeginImageContextWithOptions(imageSize, false, scale)
img.draw(at: CGPoint.zero)
let w = imageSize.width
//let w = screenWidth
let h = imageSize.height
//let h = screenHeight
print("Width 1: \(w) Height 1: \(h)")
let midX = imageSize.width / 2
let midY = imageSize.height / 2
//let midX = screenWidth / 2
//let midY = screenHeight / 2
// red circles - radius in %
let circleRads = [ 0.07, 0.13, 0.17, 0.22, 0.29, 0.36, 0.40, 0.48, 0.60, 0.75 ]
// center "dot" - radius is 1.5%
var circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat(w * 0.010), startAngle: CGFloat(0), endAngle:CGFloat(Double.pi * 2), clockwise: true)
UIColor.white.setFill()
circlePath.stroke()
circlePath.fill()
lineColor = getLineColor(color: color)
lineColor?.setStroke()
for pct in circleRads {
let rad = w * CGFloat(pct)
circlePath = UIBezierPath(arcCenter: CGPoint(x: midX, y: midY), radius: CGFloat(rad), startAngle: CGFloat(0), endAngle:CGFloat(Double.pi * 2), clockwise: true)
circlePath.lineWidth = 2.5
circlePath.stroke()
}
// draw text time stamp on image
let now = Date()
let formatter = DateFormatter()
formatter.timeZone = TimeZone.current
formatter.dateFormat = "yyyy-MM-dd HH:mm"
let dateString = formatter.string(from: now)
// print(dateString)
let paragraphStyle = NSMutableParagraphStyle()
paragraphStyle.alignment = .center
let attrs = [NSAttributedStringKey.font: UIFont(name: "HelveticaNeue-Thin", size: 26)!, NSAttributedStringKey.paragraphStyle: paragraphStyle]
let string = dateString
string.draw(with: CGRect(x: 12, y: 38, width: 448, height: 448), options: .usesLineFragmentOrigin, attributes: attrs, context: nil)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}

how to add colored border to uiimage in swift

It is pretty easy to add border to UIImageView, using layers (borderWidth, borderColor etc.). Is there any possibility to add border to image, not to image view? Does somebody know?
Update:
I tried to follow the suggestion below und used extension. Thank you for that but I did not get the desired result. Here is my code. What is wrong?
import UIKit
class ViewController: UIViewController {
var imageView: UIImageView!
var sizeW = CGFloat()
var sizeH = CGFloat()
override func viewDidLoad() {
super.viewDidLoad()
sizeW = view.frame.width
sizeH = view.frame.height
setImage()
}
func setImage(){
//add image view
imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: sizeW/2, height: sizeH/2))
imageView.center = view.center
imageView.tintColor = UIColor.orange
imageView.contentMode = UIViewContentMode.scaleAspectFit
let imgOriginal = UIImage(named: "plum")!.withRenderingMode(.alwaysTemplate)
let borderImage = imgOriginal.imageWithBorder(width: 2, color: UIColor.blue)
imageView.image = borderImage
view.addSubview(imageView)
}
}
extension UIImage {
func imageWithBorder(width: CGFloat, color: UIColor) -> UIImage? {
let square = CGSize(width: min(size.width, size.height) + width * 2, height: min(size.width, size.height) + width * 2)
let imageView = UIImageView(frame: CGRect(origin: CGPoint(x: 0, y: 0), size: square))
imageView.contentMode = .center
imageView.image = self
imageView.layer.borderWidth = width
imageView.layer.borderColor = color.cgColor
UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
imageView.layer.render(in: context)
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return result
}
}
The second image with the red border is more or less what I need:
Strongly inspired by #herme5, refactored into more compact Swift 5/iOS12+ code as follows (fixed vertical flip issue as well):
public extension UIImage {
/**
Returns the flat colorized version of the image, or self when something was wrong
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- Returns: the flat colorized version of the image, or the self if something was wrong
*/
func colorized(with color: UIColor = .white) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer {
UIGraphicsEndImageContext()
}
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
color.setFill()
context.translateBy(x: 0, y: size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.clip(to: rect, mask: cgImage)
context.fill(rect)
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return colored
}
/**
Returns the stroked version of the fransparent image with the given stroke color and the thickness.
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- thickness: the thickness of the border. Default to `2`
- quality: The number of degrees (out of 360): the smaller the best, but the slower. Defaults to `10`.
- Returns: the stroked version of the image, or self if something was wrong
*/
func stroked(with color: UIColor = .white, thickness: CGFloat = 2, quality: CGFloat = 10) -> UIImage {
guard let cgImage = cgImage else { return self }
// Colorize the stroke image to reflect border color
let strokeImage = colorized(with: color)
guard let strokeCGImage = strokeImage.cgImage else { return self }
/// Rendering quality of the stroke
let step = quality == 0 ? 10 : abs(quality)
let oldRect = CGRect(x: thickness, y: thickness, width: size.width, height: size.height).integral
let newSize = CGSize(width: size.width + 2 * thickness, height: size.height + 2 * thickness)
let translationVector = CGPoint(x: thickness, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return self }
defer {
UIGraphicsEndImageContext()
}
context.translateBy(x: 0, y: newSize.height)
context.scaleBy(x: 1.0, y: -1.0)
context.interpolationQuality = .high
for angle: CGFloat in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: angle)
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeCGImage, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(cgImage, in: oldRect)
guard let stroked = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return stroked
}
}
extension CGPoint {
/**
Rotates the point from the center `origin` by `byDegrees` degrees along the Z axis.
- Parameters:
- origin: The center of he rotation;
- byDegrees: Amount of degrees to rotate around the Z axis.
- Returns: The rotated point.
*/
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = x - origin.x
let dy = y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + byDegrees * .pi / 180.0 // to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
Here is a UIImage extension I wrote in Swift 4. As IOSDealBreaker said this is all about image processing, and some particular cases may occur. You should have a png image with a transparent background, and manage the size if larger than the original.
First get a colorised "shade" version of your image.
Then draw and redraw the shade image all around a given origin point (In our case around (0,0) at a distance that is the border thickness)
Draw your source image at the origin point so that it appears on the foreground.
You may have to enlarge your image if the borders go out of the original rect.
My method uses a lot of util methods and class extensions. Here is some maths to rotate a vector (which is actually a point) around another point: Rotating a CGPoint around another CGPoint
extension CGPoint {
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = self.x - origin.x
let dy = self.y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + (byDegrees * CGFloat.pi / 180.0) // convert it to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
I wrote my custom CIFilter to colorise an image which have a transparent background: Colorize a UIImage in Swift
class ColorFilter: CIFilter {
var inputImage: CIImage?
var inputColor: CIColor?
private let kernel: CIColorKernel = {
let kernelString =
"""
kernel vec4 colorize(__sample pixel, vec4 color) {
pixel.rgb = pixel.a * color.rgb;
pixel.a *= color.a;
return pixel;
}
"""
return CIColorKernel(source: kernelString)!
}()
override var outputImage: CIImage? {
guard let inputImage = inputImage, let inputColor = inputColor else { return nil }
let inputs = [inputImage, inputColor] as [Any]
return kernel.apply(extent: inputImage.extent, arguments: inputs)
}
}
extension UIImage {
func colorized(with color: UIColor) -> UIImage {
guard let cgInput = self.cgImage else {
return self
}
let colorFilter = ColorFilter()
colorFilter.inputImage = CIImage(cgImage: cgInput)
colorFilter.inputColor = CIColor(color: color)
if let ciOutputImage = colorFilter.outputImage {
let context = CIContext(options: nil)
let cgImg = context.createCGImage(ciOutputImage, from: ciOutputImage.extent)
return UIImage(cgImage: cgImg!, scale: self.scale, orientation: self.imageOrientation).alpha(color.rgba.alpha).withRenderingMode(self.renderingMode)
} else {
return self
}
}
At this point you should have everything to make this work:
extension UIImage {
func stroked(with color: UIColor, size: CGFloat) -> UIImage {
let strokeImage = self.colorized(with: color)
let oldRect = CGRect(x: size, y: size, width: self.size.width, height: self.size.height).integral
let newSize = CGSize(width: self.size.width + (2*size), height: self.size.height + (2*size))
let translationVector = CGPoint(x: size, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, self.scale)
if let context = UIGraphicsGetCurrentContext() {
context.interpolationQuality = .high
let step = 10 // reduce the step to increase quality
for angle in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: CGFloat(angle))
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeImage.cgImage!, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(self.cgImage!, in: oldRect)
let newImage = UIImage(cgImage: context.makeImage()!, scale: self.scale, orientation: self.imageOrientation)
UIGraphicsEndImageContext()
return newImage.withRenderingMode(self.renderingMode)
}
UIGraphicsEndImageContext()
return self
}
}
Borders to the images belongs to image processing area of iOS. It's not easy as borders for a UIView, It's pretty deep but if you're willing to go the distance, here is a library and a hint for the journey
https://github.com/BradLarson/GPUImage
try using GPUImageThresholdEdgeDetectionFilter
or try OpenCV https://docs.opencv.org/2.4/doc/tutorials/ios/image_manipulation/image_manipulation.html
Use this simple extension for UIImage
extension UIImage {
func outline() -> UIImage? {
UIGraphicsBeginImageContext(size)
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
self.draw(in: rect, blendMode: .normal, alpha: 1.0)
let context = UIGraphicsGetCurrentContext()
context?.setStrokeColor(red: 1.0, green: 0.5, blue: 1.0, alpha: 1.0)
context?.setLineWidth(5.0)
context?.stroke(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
It will give you an image with pink border.

Cut rounded image with the face from CIDetector and CIFaceFeature

How to cut the frame that I receive as faceViewBounds to make a big circle around the face? It's like a badge with the face of the person.
Maybe I should get the center of faceViewBounds then I have to find this center in theImageView.image and draw a circle with big diameter and then cut the rest outside of the circle by logic, but with code I don't know how to do it.. Any suggestions?
func detectFaceFrom(ImageView theImageView: UIImageView) {
guard let personImage = CIImage(image: theImageView.image!) else {
return
}
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
let faces = faceDetector?.features(in: personImage)
let ciImageSize = personImage.extent.size
var transform = CGAffineTransform(scaleX: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
if(faces?.count==1){
for face in faces as! [CIFaceFeature] {
var faceViewBounds = face.bounds.applying(transform)
let viewSize = theImageView.bounds.size
let scale = min(viewSize.width / ciImageSize.width,
viewSize.height / ciImageSize.height)
let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
faceViewBounds.origin.x += offsetX
faceViewBounds.origin.y += offsetY
let faceBox = UIView(frame: faceViewBounds)
faceBox.layer.borderWidth = 3
faceBox.layer.borderColor = UIColor.green.cgColor
faceBox.backgroundColor = UIColor.clear
drawCircleFromCenter(faceViewBounds.center ???
}
return cuttedCircleWithFace
}else{
return theImageView.image
}
}
I just saw an ad in Facebook with that exact same thing that I want to accomplish:
The problem is that you should use your image.size instead of using theImageView.bounds.size. You should also handle features options CIDetectorImageOrientation.
extension UIImage{
var faces: [UIImage] {
guard let ciimage = CIImage(image: self) else { return [] }
var orientation: NSNumber {
switch imageOrientation {
case .up: return 1
case .upMirrored: return 2
case .down: return 3
case .downMirrored: return 4
case .leftMirrored: return 5
case .right: return 6
case .rightMirrored: return 7
case .left: return 8
}
}
return CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyLow])?
.features(in: ciimage, options: [CIDetectorImageOrientation: orientation])
.compactMap {
let rect = $0.bounds.insetBy(dx: -10, dy: -10)
UIGraphicsBeginImageContextWithOptions(rect.size, false, scale)
defer { UIGraphicsEndImageContext() }
UIImage(ciImage: ciimage.cropped(to: rect)).draw(in: CGRect(origin: .zero, size: rect.size))
guard let face = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
// now that you have your face image you need to properly apply a circle mask to it
let size = face.size
let breadth = min(size.width, size.height)
let breadthSize = CGSize(width: breadth, height: breadth)
UIGraphicsBeginImageContextWithOptions(breadthSize, false, scale)
defer { UIGraphicsEndImageContext() }
guard let cgImage = face.cgImage?.cropping(to: CGRect(origin: CGPoint(x: size.width > size.height ? (size.width-size.height).rounded(.down)/2 : 0, y: size.height > size.width ? (size.height-size.width).rounded(.down)/2 : 0), size: breadthSize))
else { return nil }
let faceRect = CGRect(origin: .zero, size: CGSize(width: min(size.width, size.height), height: min(size.width, size.height)))
UIBezierPath(ovalIn: faceRect).addClip()
UIImage(cgImage: cgImage).draw(in: faceRect)
return UIGraphicsGetImageFromCurrentImageContext()
} ?? []
}
}
let profilePicture = UIImage(data: try! Data(contentsOf: URL(string:"http://i.stack.imgur.com/Xs4RX.jpg")!))!
if let face = profilePicture.faces.first {
print(face.size)
}
If you just want to focus on a face inside of image. You should first set up an image view and mask it into a circle:
let image = UIImage(named: "face.jpg")
let imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: 50.0, height: 50.0))
imageView.image = image
imageView.contentMode = .scaleAspectFill
imageView.layer.cornerRadius = imageView.bounds.height * 0.5
imageView.layer.masksToBounds = true
Next you run the CIDetector
func focusOnFace(in imageView: UIImageView)
{
guard let image = imageView.image,
var personImage = CIImage(image: image) else { return }
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
// This will just take the first detected face but you can do something more sophisticated
guard let face = faceDetector?.features(in: personImage).first as? CIFaceFeature else { return }
// Make the facial rect a square so it will mask nicely to a circle (may not be strictly necessary as `CIFaceFeature` bounds is typically a square)
var rect = face.bounds
rect.size.height = max(face.bounds.height, face.bounds.width)
rect.size.width = max(face.bounds.height, face.bounds.width)
rect = rect.insetBy(dx: -30, dy: -30) // Adds padding around the face so it's not so tightly cropped
// Crop to the face detected
personImage = personImage.cropping(to: rect)
// Set the new cropped image as the image view image
imageView.image = UIImage(ciImage: personImage)
}
Example
Before running focusOnFace:
After running focusOnFace:
Updated Example
Before running focusOnFace:
After running focusOnFace:

camera image resizing

I have an app I'm working on that has a function that allows the user to either take a photo or pick an image from the photo library. The code works but the image always displays larger than the UIImageView box. The box is set to "scale aspect fit" in the storyboard but the camera image refuses to conform.
Here's the code:
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
if let chosenImage = info[UIImagePickerControllerOriginalImage] as? UIImage{
imageField.image = chosenImage
} else{
//error message goes here
}
self.dismiss(animated: true, completion: nil)
}
How do I get chosenImage to conform to the box or to aspect fit?
Try this way please :
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
if let chosenImage = info[UIImagePickerControllerOriginalImage] as? UIImage{
imageField.image = chosenImage
imageField.contentMode = UIViewContentMode.scaleToFill
imageField.clipsToBounds = true
} else{
//error message goes here
}
self.dismiss(animated: true, completion: nil)
}
why ScaleAspectFill and clipsToBounds = true have look this question Why scale to fill give bigger image than UIImageVIew size? (using swift)
if you want to more learn then check this How to scale a UIImageView proportionally?
The below code will resize the image captured by camera which is compatible to swift 4.0.
It checks for EXIF property of image using UIImageOrientaiton and accordind to the value of orientation, it transforms & scales the image so you will get the same return image with same orientation as your camera view orientation.
func scaleAndRotateImage(image: UIImage, MaxResolution iIntMaxResolution: Int) -> UIImage {
let kMaxResolution = iIntMaxResolution
let imgRef = image.cgImage!
let width: CGFloat = CGFloat(imgRef.width)
let height: CGFloat = CGFloat(imgRef.height)
var transform = CGAffineTransform.identity
var bounds = CGRect.init(x: 0, y: 0, width: width, height: height)
if Int(width) > kMaxResolution || Int(height) > kMaxResolution {
let ratio: CGFloat = width / height
if ratio > 1 {
bounds.size.width = CGFloat(kMaxResolution)
bounds.size.height = bounds.size.width / ratio
}
else {
bounds.size.height = CGFloat(kMaxResolution)
bounds.size.width = bounds.size.height * ratio
}
}
let scaleRatio: CGFloat = bounds.size.width / width
let imageSize = CGSize.init(width: CGFloat(imgRef.width), height: CGFloat(imgRef.height))
var boundHeight: CGFloat
let orient = image.imageOrientation
// The output below is limited by 1 KB.
// Please Sign Up (Free!) to remove this limitation.
switch orient {
case .up:
//EXIF = 1
transform = CGAffineTransform.identity
case .upMirrored:
//EXIF = 2
transform = CGAffineTransform.init(translationX: imageSize.width, y: 0.0)
transform = transform.scaledBy(x: -1.0, y: 1.0)
case .down:
//EXIF = 3
transform = CGAffineTransform.init(translationX: imageSize.width, y: imageSize.height)
transform = transform.rotated(by: CGFloat(Double.pi / 2))
case .downMirrored:
//EXIF = 4
transform = CGAffineTransform.init(translationX: 0.0, y: imageSize.height)
transform = transform.scaledBy(x: 1.0, y: -1.0)
case .leftMirrored:
//EXIF = 5
boundHeight = bounds.size.height
bounds.size.height = bounds.size.width
bounds.size.width = boundHeight
transform = CGAffineTransform.init(translationX: imageSize.height, y: imageSize.width)
transform = transform.scaledBy(x: -1.0, y: 1.0)
transform = transform.rotated(by: CGFloat(Double.pi / 2) / 2.0)
break
default: print("Error in processing image")
}
UIGraphicsBeginImageContext(bounds.size)
let context = UIGraphicsGetCurrentContext()
if orient == .right || orient == .left {
context?.scaleBy(x: -scaleRatio, y: scaleRatio)
context?.translateBy(x: -height, y: 0)
}
else {
context?.scaleBy(x: scaleRatio, y: -scaleRatio)
context?.translateBy(x: 0, y: -height)
}
context?.concatenate(transform)
context?.draw(imgRef, in: CGRect.init(x: 0, y: 0, width: width, height: height))
let imageCopy = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return imageCopy!
}

Resources