I wanted to achieve a CIAffineClamp effect where the image could be rotated, scale, and translation and still fill the area. But after trying, I still have some problems.
Show the code in question:
func translateAndClamp(image: UIImage) -> UIImage {
guard let ciImage = CIImage(image: image) else {
return image
}
var transform = CGAffineTransform.identity
transform = transform.concatenating(CGAffineTransform(translationX: -(ciImage.extent.origin.x + ciImage.extent.width/2), y: -(ciImage.extent.origin.y + ciImage.extent.height/2)))
transform = transform.concatenating(CGAffineTransform(translationX: 100, y: 0))
transform = transform.concatenating(CGAffineTransform(scaleX: 0.6, y: 0.6))
// if you open this code, everything effect was gone. Whether the rotation angle will affect the effect?
// transform = transform.concatenating(CGAffineTransform(rotationAngle: 0.2))
transform = transform.concatenating(CGAffineTransform(translationX: (ciImage.extent.origin.x + ciImage.extent.width/2), y: (ciImage.extent.origin.y + ciImage.extent.height/2)))
let outputImage = ciImage.transformed(by: transform).clampedToExtent()
let context = CIContext(options: [CIContextOption.useSoftwareRenderer: true])
let extent = ciImage.extent
guard let result = context.createCGImage(outputImage, from: extent) else {
return image
}
return UIImage(cgImage: result, scale: image.scale, orientation: image.imageOrientation)
}
Related
Currently, I have the following color wheel implemented using the following code
class ColorWheelPalette: UIView {
var cgImage: CGImage?
override func draw(_ rect: CGRect) {
guard let context = UIGraphicsGetCurrentContext() else {return}
if self.cgImage == nil {
// https://stackoverflow.com/a/54819655/72437
let filter = CIFilter(name: "CIHueSaturationValueGradient", parameters: [
"inputColorSpace": CGColorSpaceCreateDeviceRGB(),
"inputDither": 0,
"inputRadius": 160,
"inputSoftness": 0,
"inputValue": 1
])!
let ciImage = filter.outputImage!
let ciContext = CIContext(options: nil)
self.cgImage = ciContext.createCGImage(ciImage, from: ciImage.extent)
}
context.draw(cgImage!, in: rect)
}
}
It looks as following
However, the output image is not what I desire. I want to green color region stay on the top. Hence, I do a quick vertical flipping using the following code
class ColorWheelPalette: UIView {
var cgImage: CGImage?
override func draw(_ rect: CGRect) {
guard let context = UIGraphicsGetCurrentContext() else {return}
if self.cgImage == nil {
// https://stackoverflow.com/a/54819655/72437
let filter = CIFilter(name: "CIHueSaturationValueGradient", parameters: [
"inputColorSpace": CGColorSpaceCreateDeviceRGB(),
"inputDither": 0,
"inputRadius": 160,
"inputSoftness": 0,
"inputValue": 1
])!
let ciImage = filter.outputImage!
let ciContext = CIContext(options: nil)
self.cgImage = ciContext.createCGImage(ciImage, from: ciImage.extent)
}
context.saveGState()
// Flip vertically
context.translateBy(x: 0, y: rect.height)
context.scaleBy(x: 1.0, y: -1.0)
context.draw(cgImage!, in: rect)
context.restoreGState()
}
}
The above code achieve what I want (green region at the top)
However, for performance optimisation purpose, instead of having perform translate and scale transformation operations each time,
is there a way to perform such vertical flipping on the self.cgImage directly for just one-time?
So that the ideal code would look like the following
class ColorWheelPalette: UIView {
...
override func draw(_ rect: CGRect) {
...
if self.cgImage == nil {
...
self.cgImage = ciContext.createCGImage(ciImage, from: ciImage.extent)
// Question: Can we perform one-time vertical flipping on self.cgImage right here?
}
context.draw(cgImage!, in: rect)
}
}
Just create an UIImage from your CGImage and draw it directly:
class ColorWheelPalette: UIView {
var image: UIImage?
override func draw(_ rect: CGRect) {
if image == nil {
guard
let filter = CIFilter(name: "CIHueSaturationValueGradient", parameters: [
"inputColorSpace": CGColorSpaceCreateDeviceRGB(),
"inputDither": 0,
"inputRadius": 160,
"inputSoftness": 0,
"inputValue": 1]),
let ciImage = filter.outputImage,
let cgImage = CIContext(options: nil)
.createCGImage(ciImage, from: ciImage.extent)
else { return }
let size = ciImage.extent.size
let format = UIGraphicsImageRendererFormat.default()
format.opaque = false
image = UIGraphicsImageRenderer(size: size, format: format).image { ctx in
var transform = CGAffineTransform(scaleX: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -size.height)
ctx.cgContext.concatenate(transform)
ctx.cgContext.draw(cgImage, in: CGRect(origin: .zero, size: size))
}
}
image?.draw(in: rect)
}
}
I'm trying to make an AR app using ARkit that allows the user to draw lipstick on their faces. How do I map the screen coordinates to the texture coordinates for the face mesh?
func transformToTextureCoordinates(screenCoordinates: CGPoint) -> CGPoint {
// hit test from screen to the face geometry
let hitTestResults = sceneView.hitTest(screenCoordinates, options: nil)
guard let result = hitTestResults.first else {
return CGPoint(x: -1.0, y: -1.0)
let world_coords = result.worldCoordinates
--- HELP! ---
}
func drawLine(from fromPoint: CGPoint, to toPoint: CGPoint) {
// transform the screen coordinates to texture coordinates
let fromPoint_transformed = transformToTextureCoordinates(screenCoordinates: fromPoint)
let toPoint_transformed = transformToTextureCoordinates(screenCoordinates:toPoint)
// draw line on the texture image
UIGraphicsBeginImageContext(view.frame.size)
guard let context = UIGraphicsGetCurrentContext() else {
return
}
textureImage?.draw(in: view.bounds)
context.move(to: fromPoint_transformed)
context.addLine(to: toPoint_transformed)
context.setLineCap(.round)
context.setBlendMode(.normal)
context.setLineWidth(brushWidth)
context.setStrokeColor(color.cgColor)
context.strokePath()
textureImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
}
It's actually super simple. No need for transformation matrix. Hit test provides the texture coordinates u,v in the 0 to 1 range. So multiply by the texture width and height, you will get the pixel coordinates on the texture
func transformToTextureCoordinates(screenCoordinates: CGPoint) -> CGPoint {
let hitTestResults = sceneView.hitTest(screenCoordinates, options: nil)
guard let result = hitTestResults.first else {
return CGPoint(x: -1, y: -1)
}
let textureCoordinates = result.textureCoordinates(withMappingChannel: 0)
return CGPoint(x: textureCoordinates.x * textureImage.size.width, y: textureCoordinates.y * textureImage.size.height)
}
How can I obtain an image that is blended with an image underneath and rotated around its center?
Im trying to show an image that is blended with whatever view is underneath that image. In my app the user is able to move, expand and rotate the image that is blending.
To do this I have one main imageView, and on top one smaller imageview that is used for the blending. The blending imageview image property is updated every time it's position changes.
The blending of the image works fine as long as the image is not rotated, as soon as the rotation changes, the image that is returned by my function is not at the correct position.
I have tried 2 different versions of the same function.
Version 1: updateBlendingView
create context with size of blending imageview
draw the main imageview at an offset so that the context is at the position of the blending imageview.
move the context to its the center
rotate the context to the rotation of the blending imageview
draw the blending imageview with blendMode
get image from context and update imageview
Version 2: updateBlendingView2
create context with size of blending imageview
move context to its the center
inversely rotate the context
draw the main imageview at an offset from the center
undo the rotate
draw the blending view at it's origin offset from origin of the context
get image from context and update blending imageview
Current result:
Expected result:
Note: keep in mind the intensity of the blending in sketch might differ from the app, but the issue is not that, is about the image positioning :)
func updateBlendingView() {
guard let mainViewImage = getImageFromView(view: mainImageView) else {return}
// Reset image to original state before blending
blendingImageView.image = UIImage(named: "square")!
guard let blendingImage = getImageFromView(view: blendingImageView) else {return}
UIGraphicsBeginImageContextWithOptions(blendingImageView.bounds.size, true, UIScreen.main.scale)
guard let context = UIGraphicsGetCurrentContext() else { UIGraphicsEndImageContext();return}
let blendingOriginTransform = CGAffineTransform(translationX: (blendingImageView.bounds.width * 0.5), y: (blendingImageView.bounds.height * 0.5))
let radians:Float = atan2f(Float(blendingImageView.transform.b), Float(blendingImageView.transform.a))
let rotateTransform: CGAffineTransform = CGAffineTransform(rotationAngle: CGFloat(radians))
let mainViewOrigin = CGPoint(x: -blendingImageView.frame.origin.x , y: -blendingImageView.frame.origin.y )
mainViewImage.draw(at: mainViewOrigin)
context.concatenate(blendingOriginTransform)
context.concatenate(rotateTransform)
let blendingDrawingOrigin = CGPoint(x: -blendingImageView.bounds.width * 0.5, y: -blendingImageView.bounds.height * 0.5)
blendingImage.draw(at: blendingDrawingOrigin, blendMode: .multiply , alpha: 0.5)
guard let blendedImage = UIGraphicsGetImageFromCurrentImageContext() else {
UIGraphicsEndImageContext()
return
}
UIGraphicsEndImageContext()
blendingImageView.image = blendedImage
}
func updateBlendingView2() {
guard let mainViewImage = getImageFromView(view: mainImageView) else {return}
// Reset image to original state before blending
blendingImageView.image = UIImage(named: "square")!
guard let blendingImage = getImageFromView(view: blendingImageView) else {return}
UIGraphicsBeginImageContextWithOptions(blendingImageView.bounds.size, true, UIScreen.main.scale)
guard let context = UIGraphicsGetCurrentContext() else { UIGraphicsEndImageContext();return}
let blendingOriginTransform = CGAffineTransform(translationX: (blendingImageView.bounds.width * 0.5), y: (blendingImageView.bounds.height * 0.5))
let radians:Float = atan2f(Float(blendingImageView.transform.b), Float(blendingImageView.transform.a))
let rotateTransform: CGAffineTransform = CGAffineTransform(rotationAngle: CGFloat(radians))
context.concatenate(blendingOriginTransform)
context.concatenate(rotateTransform.inverted())
let mainViewOrigin = CGPoint(x: -blendingImageView.frame.origin.x - (blendingImageView.bounds.width * 0.5) , y: -blendingImageView.frame.origin.y - (blendingImageView.bounds.height * 0.5))
mainViewImage.draw(at: mainViewOrigin)
// undo the rotate
context.concatenate(rotateTransform)
let blendingDrawingOrigin = CGPoint(x: -blendingImageView.bounds.width * 0.5, y: -blendingImageView.bounds.height * 0.5)
blendingImage.draw(at: blendingDrawingOrigin, blendMode: .multiply , alpha: 0.5)
guard let blendedImage = UIGraphicsGetImageFromCurrentImageContext() else {
UIGraphicsEndImageContext()
return
}
UIGraphicsEndImageContext()
blendingImageView.image = blendedImage
}
func getImageFromView(view: UIView) -> UIImage? {
UIGraphicsBeginImageContextWithOptions(view.bounds.size, true, UIScreen.main.scale)
guard let context = UIGraphicsGetCurrentContext() else { UIGraphicsEndImageContext();return nil}
view.layer.render(in: context)
let snapshotImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return snapshotImage
}
Any help would be appreciated, many thanks.
You can find a small demo project to illustrate this question in this link
https://github.com/Gregortega/BlendDemo
I have also looked into different tutorials on core graphics to understand the issue.
Swift: Translating and Rotating a CGContext, A Visual Explanation (iOS/Xcode)
I will answer my own question. A friend gave me a solution for the rotation issue.
The solution was to account for the difference or change in position of the blending imageview.
func updateBlendingView2(xDiff: CGFloat = 0, yDiff: CGFloat = 0) {
guard let mainViewImage = getImageFromView(view: mainImageView) else {return}
// Reset image to original state before blending
blendingImageView.image = UIImage(named: "square")!
guard let blendingImage = getImageFromView(view: blendingImageView) else {return}
UIGraphicsBeginImageContextWithOptions(blendingImageView.bounds.size, true, UIScreen.main.scale)
guard let context = UIGraphicsGetCurrentContext() else { UIGraphicsEndImageContext();return}
// X, Y translation
let blendingOriginTransform = CGAffineTransform(translationX: (blendingImageView.bounds.width * 0.5), y: (blendingImageView.bounds.height * 0.5))
context.concatenate(blendingOriginTransform)
// Rotation
let radians:Float = atan2f(Float(blendingImageView.transform.b), Float(blendingImageView.transform.a))
let rotateTransform: CGAffineTransform = CGAffineTransform(rotationAngle: CGFloat(radians))
context.concatenate(rotateTransform.inverted())
var mainViewOrigin = self.lastMainViewOrign
mainViewOrigin = CGPoint(x: mainViewOrigin.x + xDiff, y: mainViewOrigin.y + yDiff)
mainViewImage.draw(at: mainViewOrigin)
self.lastMainViewOrign = mainViewOrigin
// undo the rotate
context.concatenate(rotateTransform)
let blendingDrawingOrigin = CGPoint(x: -blendingImageView.bounds.width * 0.5, y: -blendingImageView.bounds.height * 0.5)
blendingImage.draw(at: blendingDrawingOrigin, blendMode: .multiply , alpha: 0.5)
guard let blendedImage = UIGraphicsGetImageFromCurrentImageContext() else {
UIGraphicsEndImageContext()
return
}
UIGraphicsEndImageContext()
blendingImageView.image = blendedImage
}
It is pretty easy to add border to UIImageView, using layers (borderWidth, borderColor etc.). Is there any possibility to add border to image, not to image view? Does somebody know?
Update:
I tried to follow the suggestion below und used extension. Thank you for that but I did not get the desired result. Here is my code. What is wrong?
import UIKit
class ViewController: UIViewController {
var imageView: UIImageView!
var sizeW = CGFloat()
var sizeH = CGFloat()
override func viewDidLoad() {
super.viewDidLoad()
sizeW = view.frame.width
sizeH = view.frame.height
setImage()
}
func setImage(){
//add image view
imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: sizeW/2, height: sizeH/2))
imageView.center = view.center
imageView.tintColor = UIColor.orange
imageView.contentMode = UIViewContentMode.scaleAspectFit
let imgOriginal = UIImage(named: "plum")!.withRenderingMode(.alwaysTemplate)
let borderImage = imgOriginal.imageWithBorder(width: 2, color: UIColor.blue)
imageView.image = borderImage
view.addSubview(imageView)
}
}
extension UIImage {
func imageWithBorder(width: CGFloat, color: UIColor) -> UIImage? {
let square = CGSize(width: min(size.width, size.height) + width * 2, height: min(size.width, size.height) + width * 2)
let imageView = UIImageView(frame: CGRect(origin: CGPoint(x: 0, y: 0), size: square))
imageView.contentMode = .center
imageView.image = self
imageView.layer.borderWidth = width
imageView.layer.borderColor = color.cgColor
UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
imageView.layer.render(in: context)
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return result
}
}
The second image with the red border is more or less what I need:
Strongly inspired by #herme5, refactored into more compact Swift 5/iOS12+ code as follows (fixed vertical flip issue as well):
public extension UIImage {
/**
Returns the flat colorized version of the image, or self when something was wrong
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- Returns: the flat colorized version of the image, or the self if something was wrong
*/
func colorized(with color: UIColor = .white) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer {
UIGraphicsEndImageContext()
}
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
color.setFill()
context.translateBy(x: 0, y: size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.clip(to: rect, mask: cgImage)
context.fill(rect)
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return colored
}
/**
Returns the stroked version of the fransparent image with the given stroke color and the thickness.
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- thickness: the thickness of the border. Default to `2`
- quality: The number of degrees (out of 360): the smaller the best, but the slower. Defaults to `10`.
- Returns: the stroked version of the image, or self if something was wrong
*/
func stroked(with color: UIColor = .white, thickness: CGFloat = 2, quality: CGFloat = 10) -> UIImage {
guard let cgImage = cgImage else { return self }
// Colorize the stroke image to reflect border color
let strokeImage = colorized(with: color)
guard let strokeCGImage = strokeImage.cgImage else { return self }
/// Rendering quality of the stroke
let step = quality == 0 ? 10 : abs(quality)
let oldRect = CGRect(x: thickness, y: thickness, width: size.width, height: size.height).integral
let newSize = CGSize(width: size.width + 2 * thickness, height: size.height + 2 * thickness)
let translationVector = CGPoint(x: thickness, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return self }
defer {
UIGraphicsEndImageContext()
}
context.translateBy(x: 0, y: newSize.height)
context.scaleBy(x: 1.0, y: -1.0)
context.interpolationQuality = .high
for angle: CGFloat in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: angle)
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeCGImage, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(cgImage, in: oldRect)
guard let stroked = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return stroked
}
}
extension CGPoint {
/**
Rotates the point from the center `origin` by `byDegrees` degrees along the Z axis.
- Parameters:
- origin: The center of he rotation;
- byDegrees: Amount of degrees to rotate around the Z axis.
- Returns: The rotated point.
*/
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = x - origin.x
let dy = y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + byDegrees * .pi / 180.0 // to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
Here is a UIImage extension I wrote in Swift 4. As IOSDealBreaker said this is all about image processing, and some particular cases may occur. You should have a png image with a transparent background, and manage the size if larger than the original.
First get a colorised "shade" version of your image.
Then draw and redraw the shade image all around a given origin point (In our case around (0,0) at a distance that is the border thickness)
Draw your source image at the origin point so that it appears on the foreground.
You may have to enlarge your image if the borders go out of the original rect.
My method uses a lot of util methods and class extensions. Here is some maths to rotate a vector (which is actually a point) around another point: Rotating a CGPoint around another CGPoint
extension CGPoint {
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = self.x - origin.x
let dy = self.y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + (byDegrees * CGFloat.pi / 180.0) // convert it to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
I wrote my custom CIFilter to colorise an image which have a transparent background: Colorize a UIImage in Swift
class ColorFilter: CIFilter {
var inputImage: CIImage?
var inputColor: CIColor?
private let kernel: CIColorKernel = {
let kernelString =
"""
kernel vec4 colorize(__sample pixel, vec4 color) {
pixel.rgb = pixel.a * color.rgb;
pixel.a *= color.a;
return pixel;
}
"""
return CIColorKernel(source: kernelString)!
}()
override var outputImage: CIImage? {
guard let inputImage = inputImage, let inputColor = inputColor else { return nil }
let inputs = [inputImage, inputColor] as [Any]
return kernel.apply(extent: inputImage.extent, arguments: inputs)
}
}
extension UIImage {
func colorized(with color: UIColor) -> UIImage {
guard let cgInput = self.cgImage else {
return self
}
let colorFilter = ColorFilter()
colorFilter.inputImage = CIImage(cgImage: cgInput)
colorFilter.inputColor = CIColor(color: color)
if let ciOutputImage = colorFilter.outputImage {
let context = CIContext(options: nil)
let cgImg = context.createCGImage(ciOutputImage, from: ciOutputImage.extent)
return UIImage(cgImage: cgImg!, scale: self.scale, orientation: self.imageOrientation).alpha(color.rgba.alpha).withRenderingMode(self.renderingMode)
} else {
return self
}
}
At this point you should have everything to make this work:
extension UIImage {
func stroked(with color: UIColor, size: CGFloat) -> UIImage {
let strokeImage = self.colorized(with: color)
let oldRect = CGRect(x: size, y: size, width: self.size.width, height: self.size.height).integral
let newSize = CGSize(width: self.size.width + (2*size), height: self.size.height + (2*size))
let translationVector = CGPoint(x: size, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, self.scale)
if let context = UIGraphicsGetCurrentContext() {
context.interpolationQuality = .high
let step = 10 // reduce the step to increase quality
for angle in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: CGFloat(angle))
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeImage.cgImage!, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(self.cgImage!, in: oldRect)
let newImage = UIImage(cgImage: context.makeImage()!, scale: self.scale, orientation: self.imageOrientation)
UIGraphicsEndImageContext()
return newImage.withRenderingMode(self.renderingMode)
}
UIGraphicsEndImageContext()
return self
}
}
Borders to the images belongs to image processing area of iOS. It's not easy as borders for a UIView, It's pretty deep but if you're willing to go the distance, here is a library and a hint for the journey
https://github.com/BradLarson/GPUImage
try using GPUImageThresholdEdgeDetectionFilter
or try OpenCV https://docs.opencv.org/2.4/doc/tutorials/ios/image_manipulation/image_manipulation.html
Use this simple extension for UIImage
extension UIImage {
func outline() -> UIImage? {
UIGraphicsBeginImageContext(size)
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
self.draw(in: rect, blendMode: .normal, alpha: 1.0)
let context = UIGraphicsGetCurrentContext()
context?.setStrokeColor(red: 1.0, green: 0.5, blue: 1.0, alpha: 1.0)
context?.setLineWidth(5.0)
context?.stroke(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
It will give you an image with pink border.
How to cut the frame that I receive as faceViewBounds to make a big circle around the face? It's like a badge with the face of the person.
Maybe I should get the center of faceViewBounds then I have to find this center in theImageView.image and draw a circle with big diameter and then cut the rest outside of the circle by logic, but with code I don't know how to do it.. Any suggestions?
func detectFaceFrom(ImageView theImageView: UIImageView) {
guard let personImage = CIImage(image: theImageView.image!) else {
return
}
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
let faces = faceDetector?.features(in: personImage)
let ciImageSize = personImage.extent.size
var transform = CGAffineTransform(scaleX: 1, y: -1)
transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
if(faces?.count==1){
for face in faces as! [CIFaceFeature] {
var faceViewBounds = face.bounds.applying(transform)
let viewSize = theImageView.bounds.size
let scale = min(viewSize.width / ciImageSize.width,
viewSize.height / ciImageSize.height)
let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
faceViewBounds.origin.x += offsetX
faceViewBounds.origin.y += offsetY
let faceBox = UIView(frame: faceViewBounds)
faceBox.layer.borderWidth = 3
faceBox.layer.borderColor = UIColor.green.cgColor
faceBox.backgroundColor = UIColor.clear
drawCircleFromCenter(faceViewBounds.center ???
}
return cuttedCircleWithFace
}else{
return theImageView.image
}
}
I just saw an ad in Facebook with that exact same thing that I want to accomplish:
The problem is that you should use your image.size instead of using theImageView.bounds.size. You should also handle features options CIDetectorImageOrientation.
extension UIImage{
var faces: [UIImage] {
guard let ciimage = CIImage(image: self) else { return [] }
var orientation: NSNumber {
switch imageOrientation {
case .up: return 1
case .upMirrored: return 2
case .down: return 3
case .downMirrored: return 4
case .leftMirrored: return 5
case .right: return 6
case .rightMirrored: return 7
case .left: return 8
}
}
return CIDetector(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyLow])?
.features(in: ciimage, options: [CIDetectorImageOrientation: orientation])
.compactMap {
let rect = $0.bounds.insetBy(dx: -10, dy: -10)
UIGraphicsBeginImageContextWithOptions(rect.size, false, scale)
defer { UIGraphicsEndImageContext() }
UIImage(ciImage: ciimage.cropped(to: rect)).draw(in: CGRect(origin: .zero, size: rect.size))
guard let face = UIGraphicsGetImageFromCurrentImageContext() else { return nil }
// now that you have your face image you need to properly apply a circle mask to it
let size = face.size
let breadth = min(size.width, size.height)
let breadthSize = CGSize(width: breadth, height: breadth)
UIGraphicsBeginImageContextWithOptions(breadthSize, false, scale)
defer { UIGraphicsEndImageContext() }
guard let cgImage = face.cgImage?.cropping(to: CGRect(origin: CGPoint(x: size.width > size.height ? (size.width-size.height).rounded(.down)/2 : 0, y: size.height > size.width ? (size.height-size.width).rounded(.down)/2 : 0), size: breadthSize))
else { return nil }
let faceRect = CGRect(origin: .zero, size: CGSize(width: min(size.width, size.height), height: min(size.width, size.height)))
UIBezierPath(ovalIn: faceRect).addClip()
UIImage(cgImage: cgImage).draw(in: faceRect)
return UIGraphicsGetImageFromCurrentImageContext()
} ?? []
}
}
let profilePicture = UIImage(data: try! Data(contentsOf: URL(string:"http://i.stack.imgur.com/Xs4RX.jpg")!))!
if let face = profilePicture.faces.first {
print(face.size)
}
If you just want to focus on a face inside of image. You should first set up an image view and mask it into a circle:
let image = UIImage(named: "face.jpg")
let imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: 50.0, height: 50.0))
imageView.image = image
imageView.contentMode = .scaleAspectFill
imageView.layer.cornerRadius = imageView.bounds.height * 0.5
imageView.layer.masksToBounds = true
Next you run the CIDetector
func focusOnFace(in imageView: UIImageView)
{
guard let image = imageView.image,
var personImage = CIImage(image: image) else { return }
let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyLow]
let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
// This will just take the first detected face but you can do something more sophisticated
guard let face = faceDetector?.features(in: personImage).first as? CIFaceFeature else { return }
// Make the facial rect a square so it will mask nicely to a circle (may not be strictly necessary as `CIFaceFeature` bounds is typically a square)
var rect = face.bounds
rect.size.height = max(face.bounds.height, face.bounds.width)
rect.size.width = max(face.bounds.height, face.bounds.width)
rect = rect.insetBy(dx: -30, dy: -30) // Adds padding around the face so it's not so tightly cropped
// Crop to the face detected
personImage = personImage.cropping(to: rect)
// Set the new cropped image as the image view image
imageView.image = UIImage(ciImage: personImage)
}
Example
Before running focusOnFace:
After running focusOnFace:
Updated Example
Before running focusOnFace:
After running focusOnFace: