I need to convert the VNRectangleObservation received CGPoints (bottomLeft,
bottomRight, topLeft, topRight) to another coordinate system (e.g. a view's coordinate on screen).
I define a request:
// Rectangle Request
let rectangleDetectionRequest = VNDetectRectanglesRequest(completionHandler: handleRectangles)
rectangleDetectionRequest.minimumSize = 0.5
rectangleDetectionRequest.maximumObservations = 1
I get the sampleBuffer from camera in delegate call, and perform a detection request:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
var requestOptions:[VNImageOption:Any] = [:]
if let cameraIntrinsicData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil) {
requestOptions = [.cameraIntrinsics:cameraIntrinsicData]
}
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: CGImagePropertyOrientation(rawValue:6)!, options: requestOptions)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
Later in completionHandler I receive the results:
func handleRectangles (request:VNRequest, error:Error?) {
guard let results = request.results as? [VNRectangleObservation] else { return }
let flipTransform = CGAffineTransform(scaleX: 1, y: -1).translatedBy(x: 0, y: -self.previewView.frame.height)
let scaleTransform = CGAffineTransform.identity.scaledBy(x: self.previewView.frame.width, y: self.previewView.frame.height)
for rectangle in results {
let rectangleBounds = rectangle.boundingBox.applying(scaleTransform).applying(flipTransform)
// convertedTopLeft = conversion(rectangle.topLeft)
// convertedTopRight = conversion(rectangle.topRight)
// convertedBottomLeft = conversion(rectangle.bottomLeft)
// convertedBottomRight = conversion(rectangle.bottomRight)
}
}
This works for boundingBox which is CGRect, but I need to transform the CGPoints instead to a coordinate system of another view.
The problem is that I don't know how to get the transformation from the sampleBuffer: CMSampleBuffer's coordinate system to the previewView coordinate system.
Thanks!
That was simply a matter of applying the transform to the CGPoint itself where size is the CGSize of the destination view for which I need transpose the four points.
let transform = CGAffineTransform.identity
.scaledBy(x: 1, y: -1)
.translatedBy(x: 0, y: -size.height)
.scaledBy(x: size.width, y: size.height)
let convertedTopLeft = rectangle.topLeft.applying(transform)
let convertedTopRight = rectangle.topRight.applying(transform)
let convertedBottomLeft = rectangle.bottomLeft.applying(transform)
let convertedBottomRight = rectangle.bottomRight.applying(transform)
#mihaicris answer works, but only in portrait mode. In landscape, we need to do it a little different.
if UIApplication.shared.statusBarOrientation.isLandscape {
transform = CGAffineTransform.identity
.scaledBy(x: -1, y: 1)
.translatedBy(x: -size.width, y: 0)
.scaledBy(x: size.width, y: size.height)
} else {
transform = CGAffineTransform.identity
.scaledBy(x: 1, y: -1)
.translatedBy(x: 0, y: -size.height)
.scaledBy(x: size.width, y: size.height)
}
let convertedTopLeft = rectangle.topLeft.applying(transform)
let convertedTopRight = rectangle.topRight.applying(transform)
let convertedBottomLeft = rectangle.bottomLeft.applying(transform)
let convertedBottomRight = rectangle.bottomRight.applying(transform)
I assume you use layer for the camera, and the layer is AVCaptureVideoPreviewLayer. (https://developer.apple.com/documentation/avfoundation/avcapturevideopreviewlayer).
So if you want to convert single point, use this function:layerPointConverted (https://developer.apple.com/documentation/avfoundation/avcapturevideopreviewlayer/1623502-layerpointconverted). Please notices that the y is inverted because of the VNRectangleObservation coordinates system.
let convertedTopLeft: CGPoint = cameraLayer.layerPointConverted(fromCaptureDevicePoint: CGPoint(x: rectangle.topLeft.x, y: 1 - rectangle.topLeft.y))
let convertedTopRight: CGPoint = cameraLayer.layerPointConverted(fromCaptureDevicePoint: CGPoint(x: rectangle.topRight.x, y: 1 - rectangle.topRight.y))
let convertedBottomLeft: CGPoint = cameraLayer.layerPointConverted(fromCaptureDevicePoint: CGPoint(x: rectangle.bottomLeft.x, y: 1 - rectangle.bottomLeft.y))
let convertedBottomRight: CGPoint = cameraLayer.layerPointConverted(fromCaptureDevicePoint: CGPoint(x: rectangle.bottomRight.x, y: 1 - rectangle.bottomRight.y))
Hope it helped
Related
I am stuck with this point. I want an outline as per image and I want output as per this video
I tried this code but it was not working smooth.
extension CGPoint {
/**
Rotates the point from the center `origin` by `byDegrees` degrees along the Z axis.
- Parameters:
- origin: The center of he rotation;
- byDegrees: Amount of degrees to rotate around the Z axis.
- Returns: The rotated point.
*/
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = x - origin.x
let dy = y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + byDegrees * .pi / 180.0 // to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
public extension UIImage {
/**
Returns the flat colorized version of the image, or self when something was wrong
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- Returns: the flat colorized version of the image, or the self if something was wrong
*/
func colorized(with color: UIColor = .white) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer {
UIGraphicsEndImageContext()
}
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
color.setFill()
context.translateBy(x: 0, y: size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.clip(to: rect, mask: cgImage)
context.fill(rect)
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return colored
}
/**
Returns the stroked version of the fransparent image with the given stroke color and the thickness.
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- thickness: the thickness of the border. Default to `2`
- quality: The number of degrees (out of 360): the smaller the best, but the slower. Defaults to `10`.
- Returns: the stroked version of the image, or self if something was wrong
*/
func stroked(with color: UIColor = .white, thickness: CGFloat = 2, quality: CGFloat = 10) -> UIImage {
guard let cgImage = cgImage else { return self }
// Colorize the stroke image to reflect border color
let strokeImage = colorized(with: color)
guard let strokeCGImage = strokeImage.cgImage else { return self }
/// Rendering quality of the stroke
let step = quality == 0 ? 10 : abs(quality)
let oldRect = CGRect(x: thickness, y: thickness, width: size.width, height: size.height).integral
let newSize = CGSize(width: size.width + 2 * thickness, height: size.height + 2 * thickness)
let translationVector = CGPoint(x: thickness, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return self }
defer {
UIGraphicsEndImageContext()
}
context.translateBy(x: 0, y: newSize.height)
context.scaleBy(x: 1.0, y: -1.0)
context.interpolationQuality = .high
for angle: CGFloat in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: angle)
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeCGImage, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(cgImage, in: oldRect)
guard let stroked = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return stroked
}
}
try this one
PLS do NOT add constraints to imageView except for top/left in Storyboard.
//
// ViewController.swift
// AddBorderANDZoom
//
// Created by ing.conti on 06/01/22.
//
import UIKit
import CoreGraphics
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var slider: UISlider!
let maZoom = 20.0
override func viewDidLoad() {
super.viewDidLoad()
self.slider.value = 1
self.imageView.image = UIImage(named: "apple")?.outline(borderSize: self.maZoom)
}
#IBAction func didSlide(_ sender: UISlider) {
let value = CGFloat(sender.value * 20)
self.imageView.image = UIImage(named: "apple")?.outline(borderSize: value)
}
}
/////
extension UIImage {
func outline(borderSize: CGFloat = 6.0) -> UIImage? {
let color = UIColor.black
let W = self.size.width
let H = self.size.height
let scale = 1 + (borderSize/W)
let outlinedImageRect = CGRect(x: -borderSize/2,
y: -borderSize/2,
width: W * scale,
height: H * scale)
let imageRect = CGRect(x: 0,
y: 0,
width: W,
height: H)
UIGraphicsBeginImageContextWithOptions(outlinedImageRect.size, false, scale)
self.draw(in: outlinedImageRect)
let context = UIGraphicsGetCurrentContext()!
context.setBlendMode(.sourceIn)
context.setFillColor(color.cgColor)
context.fill(outlinedImageRect)
self.draw(in: imageRect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
I am using the below to modify UICollectionViewLayoutAttributes. Method 1 works but not Method 2, anyone can help explain the difference?
guard let newAttrs = attributes?.copy() as? UICollectionViewLayoutAttributes,
let imageViewInfo = imageViewInfo else { return nil }
let center = imageViewInfo.center
//Method 1
newAttrs.transform = CGAffineTransform(scaleX: scale, y: scale)
let x = (newAttrs.center.x-center.x) * scale + igCenter.x
let y = (newAttrs.center.y-center.y) * scale + igCenter.y
newAttrs.center = CGPoint(x: x, y: y)
//Method 2
newAttrs.transform = CGAffineTransform(translationX: -center.x, y: -center.y)
.scaledBy(x: scale, y: scale)
.translatedBy(x: igCenter.x, y: igCenter.y)
https://developer.apple.com/documentation/uikit/uiview/1622459-transform
to change position of the view/cell, modify 'center' instead.
I'm trying to make an AR app using ARkit that allows the user to draw lipstick on their faces. How do I map the screen coordinates to the texture coordinates for the face mesh?
func transformToTextureCoordinates(screenCoordinates: CGPoint) -> CGPoint {
// hit test from screen to the face geometry
let hitTestResults = sceneView.hitTest(screenCoordinates, options: nil)
guard let result = hitTestResults.first else {
return CGPoint(x: -1.0, y: -1.0)
let world_coords = result.worldCoordinates
--- HELP! ---
}
func drawLine(from fromPoint: CGPoint, to toPoint: CGPoint) {
// transform the screen coordinates to texture coordinates
let fromPoint_transformed = transformToTextureCoordinates(screenCoordinates: fromPoint)
let toPoint_transformed = transformToTextureCoordinates(screenCoordinates:toPoint)
// draw line on the texture image
UIGraphicsBeginImageContext(view.frame.size)
guard let context = UIGraphicsGetCurrentContext() else {
return
}
textureImage?.draw(in: view.bounds)
context.move(to: fromPoint_transformed)
context.addLine(to: toPoint_transformed)
context.setLineCap(.round)
context.setBlendMode(.normal)
context.setLineWidth(brushWidth)
context.setStrokeColor(color.cgColor)
context.strokePath()
textureImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
}
It's actually super simple. No need for transformation matrix. Hit test provides the texture coordinates u,v in the 0 to 1 range. So multiply by the texture width and height, you will get the pixel coordinates on the texture
func transformToTextureCoordinates(screenCoordinates: CGPoint) -> CGPoint {
let hitTestResults = sceneView.hitTest(screenCoordinates, options: nil)
guard let result = hitTestResults.first else {
return CGPoint(x: -1, y: -1)
}
let textureCoordinates = result.textureCoordinates(withMappingChannel: 0)
return CGPoint(x: textureCoordinates.x * textureImage.size.width, y: textureCoordinates.y * textureImage.size.height)
}
It is pretty easy to add border to UIImageView, using layers (borderWidth, borderColor etc.). Is there any possibility to add border to image, not to image view? Does somebody know?
Update:
I tried to follow the suggestion below und used extension. Thank you for that but I did not get the desired result. Here is my code. What is wrong?
import UIKit
class ViewController: UIViewController {
var imageView: UIImageView!
var sizeW = CGFloat()
var sizeH = CGFloat()
override func viewDidLoad() {
super.viewDidLoad()
sizeW = view.frame.width
sizeH = view.frame.height
setImage()
}
func setImage(){
//add image view
imageView = UIImageView(frame: CGRect(x: 0, y: 0, width: sizeW/2, height: sizeH/2))
imageView.center = view.center
imageView.tintColor = UIColor.orange
imageView.contentMode = UIViewContentMode.scaleAspectFit
let imgOriginal = UIImage(named: "plum")!.withRenderingMode(.alwaysTemplate)
let borderImage = imgOriginal.imageWithBorder(width: 2, color: UIColor.blue)
imageView.image = borderImage
view.addSubview(imageView)
}
}
extension UIImage {
func imageWithBorder(width: CGFloat, color: UIColor) -> UIImage? {
let square = CGSize(width: min(size.width, size.height) + width * 2, height: min(size.width, size.height) + width * 2)
let imageView = UIImageView(frame: CGRect(origin: CGPoint(x: 0, y: 0), size: square))
imageView.contentMode = .center
imageView.image = self
imageView.layer.borderWidth = width
imageView.layer.borderColor = color.cgColor
UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return nil }
imageView.layer.render(in: context)
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return result
}
}
The second image with the red border is more or less what I need:
Strongly inspired by #herme5, refactored into more compact Swift 5/iOS12+ code as follows (fixed vertical flip issue as well):
public extension UIImage {
/**
Returns the flat colorized version of the image, or self when something was wrong
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- Returns: the flat colorized version of the image, or the self if something was wrong
*/
func colorized(with color: UIColor = .white) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer {
UIGraphicsEndImageContext()
}
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
color.setFill()
context.translateBy(x: 0, y: size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.clip(to: rect, mask: cgImage)
context.fill(rect)
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return colored
}
/**
Returns the stroked version of the fransparent image with the given stroke color and the thickness.
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- thickness: the thickness of the border. Default to `2`
- quality: The number of degrees (out of 360): the smaller the best, but the slower. Defaults to `10`.
- Returns: the stroked version of the image, or self if something was wrong
*/
func stroked(with color: UIColor = .white, thickness: CGFloat = 2, quality: CGFloat = 10) -> UIImage {
guard let cgImage = cgImage else { return self }
// Colorize the stroke image to reflect border color
let strokeImage = colorized(with: color)
guard let strokeCGImage = strokeImage.cgImage else { return self }
/// Rendering quality of the stroke
let step = quality == 0 ? 10 : abs(quality)
let oldRect = CGRect(x: thickness, y: thickness, width: size.width, height: size.height).integral
let newSize = CGSize(width: size.width + 2 * thickness, height: size.height + 2 * thickness)
let translationVector = CGPoint(x: thickness, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return self }
defer {
UIGraphicsEndImageContext()
}
context.translateBy(x: 0, y: newSize.height)
context.scaleBy(x: 1.0, y: -1.0)
context.interpolationQuality = .high
for angle: CGFloat in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: angle)
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeCGImage, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(cgImage, in: oldRect)
guard let stroked = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return stroked
}
}
extension CGPoint {
/**
Rotates the point from the center `origin` by `byDegrees` degrees along the Z axis.
- Parameters:
- origin: The center of he rotation;
- byDegrees: Amount of degrees to rotate around the Z axis.
- Returns: The rotated point.
*/
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = x - origin.x
let dy = y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + byDegrees * .pi / 180.0 // to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
Here is a UIImage extension I wrote in Swift 4. As IOSDealBreaker said this is all about image processing, and some particular cases may occur. You should have a png image with a transparent background, and manage the size if larger than the original.
First get a colorised "shade" version of your image.
Then draw and redraw the shade image all around a given origin point (In our case around (0,0) at a distance that is the border thickness)
Draw your source image at the origin point so that it appears on the foreground.
You may have to enlarge your image if the borders go out of the original rect.
My method uses a lot of util methods and class extensions. Here is some maths to rotate a vector (which is actually a point) around another point: Rotating a CGPoint around another CGPoint
extension CGPoint {
func rotated(around origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = self.x - origin.x
let dy = self.y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + (byDegrees * CGFloat.pi / 180.0) // convert it to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
}
I wrote my custom CIFilter to colorise an image which have a transparent background: Colorize a UIImage in Swift
class ColorFilter: CIFilter {
var inputImage: CIImage?
var inputColor: CIColor?
private let kernel: CIColorKernel = {
let kernelString =
"""
kernel vec4 colorize(__sample pixel, vec4 color) {
pixel.rgb = pixel.a * color.rgb;
pixel.a *= color.a;
return pixel;
}
"""
return CIColorKernel(source: kernelString)!
}()
override var outputImage: CIImage? {
guard let inputImage = inputImage, let inputColor = inputColor else { return nil }
let inputs = [inputImage, inputColor] as [Any]
return kernel.apply(extent: inputImage.extent, arguments: inputs)
}
}
extension UIImage {
func colorized(with color: UIColor) -> UIImage {
guard let cgInput = self.cgImage else {
return self
}
let colorFilter = ColorFilter()
colorFilter.inputImage = CIImage(cgImage: cgInput)
colorFilter.inputColor = CIColor(color: color)
if let ciOutputImage = colorFilter.outputImage {
let context = CIContext(options: nil)
let cgImg = context.createCGImage(ciOutputImage, from: ciOutputImage.extent)
return UIImage(cgImage: cgImg!, scale: self.scale, orientation: self.imageOrientation).alpha(color.rgba.alpha).withRenderingMode(self.renderingMode)
} else {
return self
}
}
At this point you should have everything to make this work:
extension UIImage {
func stroked(with color: UIColor, size: CGFloat) -> UIImage {
let strokeImage = self.colorized(with: color)
let oldRect = CGRect(x: size, y: size, width: self.size.width, height: self.size.height).integral
let newSize = CGSize(width: self.size.width + (2*size), height: self.size.height + (2*size))
let translationVector = CGPoint(x: size, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, self.scale)
if let context = UIGraphicsGetCurrentContext() {
context.interpolationQuality = .high
let step = 10 // reduce the step to increase quality
for angle in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: CGFloat(angle))
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeImage.cgImage!, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(self.cgImage!, in: oldRect)
let newImage = UIImage(cgImage: context.makeImage()!, scale: self.scale, orientation: self.imageOrientation)
UIGraphicsEndImageContext()
return newImage.withRenderingMode(self.renderingMode)
}
UIGraphicsEndImageContext()
return self
}
}
Borders to the images belongs to image processing area of iOS. It's not easy as borders for a UIView, It's pretty deep but if you're willing to go the distance, here is a library and a hint for the journey
https://github.com/BradLarson/GPUImage
try using GPUImageThresholdEdgeDetectionFilter
or try OpenCV https://docs.opencv.org/2.4/doc/tutorials/ios/image_manipulation/image_manipulation.html
Use this simple extension for UIImage
extension UIImage {
func outline() -> UIImage? {
UIGraphicsBeginImageContext(size)
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
self.draw(in: rect, blendMode: .normal, alpha: 1.0)
let context = UIGraphicsGetCurrentContext()
context?.setStrokeColor(red: 1.0, green: 0.5, blue: 1.0, alpha: 1.0)
context?.setLineWidth(5.0)
context?.stroke(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
It will give you an image with pink border.
I am using the CIDetector class in IOS to find CIRectangleFeatures in my UIImage. Afterwards, I aim to show the cornerPoints drawn into a layer which I then, in turn, add to my UIImageView. Unfortunately, the coordinates given with the CIRectangleFeature are in image space. And, even though I am trying to convert them using the CGContextConvertToUserSpace function, the rectangle drawn is quite off of the actual rectangle in the image.
Here is my code, when an image is taken, workWithTheImage is called:
func analyzeImage(image: UIImage) -> [RectanglePoint]
{
guard let ciImage = CIImage(image: image)
else { return [] }
let context = CIContext(options: nil)
let detector = CIDetector(ofType: CIDetectorTypeRectangle, context: context, options: nil)
let features = detector.featuresInImage(ciImage)
UIGraphicsBeginImageContext(ciImage.extent.size)
let currentContext = UIGraphicsGetCurrentContext()
var points: [RectanglePoint] = []
for feature in features as! [CIRectangleFeature]
{
let topLeft = CGContextConvertPointToUserSpace(currentContext, feature.topLeft)
let topRight = CGContextConvertPointToUserSpace(currentContext, feature.topRight)
let bottomRight = CGContextConvertPointToUserSpace(currentContext, feature.bottomRight)
let bottomLeft = CGContextConvertPointToUserSpace(currentContext, feature.bottomLeft)
let point = RectanglePoint(bottomLeft: bottomLeft, topLeft: topLeft, bottomRight: bottomRight, topRight: topRight)
points.append(point)
}
UIGraphicsEndImageContext()
return points
}
func workWithImage(image: UIImage)
{
let imageView = UIImageView(frame: view.frame)
imageView.image = image
let path = UIBezierPath()
let shapeLayer = CAShapeLayer()
shapeLayer.frame = imageView.bounds
for i in analyzeImage(image)
{
path.moveToPoint(i.topLeft)
path.addLineToPoint(i.topRight)
path.addLineToPoint(i.bottomRight)
path.addLineToPoint(i.bottomLeft)
path.addLineToPoint(i.topLeft)
}
shapeLayer.path = path.CGPath
shapeLayer.fillColor = UIColor.clearColor().CGColor
shapeLayer.strokeColor = UIColor.redColor().CGColor
imageView.layer.addSublayer(shapeLayer)
}
If your path is only off in the Y dimension:
I think they haven't fully ported CIDetector to UIKit yet. The coordinates of the feature are in the Cocoa coordinate system. Simply doing container.height - point.y will convert it.
I also gave your struct the correct name. The rest of the stuff in there, I used to figure out what was going on. Might be useful to you.
Code :
func analyzeImage(image: UIImage) -> [Quadrilateral]
{
guard let ciImage = CIImage(image: image)
else { return [] }
let flip = true // set to false to prevent flipping the coordinates
let context = CIContext(options: nil)
let detector = CIDetector(ofType: CIDetectorTypeRectangle, context: context, options: [CIDetectorAccuracy:CIDetectorAccuracyHigh])
let features = detector.featuresInImage(ciImage)
UIGraphicsBeginImageContext(ciImage.extent.size)
let currentContext = UIGraphicsGetCurrentContext()
var frames: [Quadrilateral] = []
for feature in features as! [CIRectangleFeature]
{
var topLeft = CGContextConvertPointToUserSpace(currentContext, feature.topLeft)
var topRight = CGContextConvertPointToUserSpace(currentContext, feature.topRight)
var bottomRight = CGContextConvertPointToUserSpace(currentContext, feature.bottomRight)
var bottomLeft = CGContextConvertPointToUserSpace(currentContext, feature.bottomLeft)
if flip {
topLeft = CGPoint(x: topLeft.x, y: image.size.height - topLeft.y)
topRight = CGPoint(x: topRight.x, y: image.size.height - topRight.y)
bottomLeft = CGPoint(x: bottomLeft.x, y: image.size.height - bottomLeft.y)
bottomRight = CGPoint(x: bottomRight.x, y: image.size.height - bottomRight.y)
}
let frame = Quadrilateral(topLeft: topLeft, topRight: topRight, bottomLeft: bottomLeft, bottomRight: bottomRight)
frames.append(frame)
}
UIGraphicsEndImageContext()
return frames
}
Quadrilateral struct :
struct Quadrilateral {
var topLeft : CGPoint = CGPointZero
var topRight : CGPoint = CGPointZero
var bottomLeft : CGPoint = CGPointZero
var bottomRight : CGPoint = CGPointZero
var path : UIBezierPath {
get {
let tempPath = UIBezierPath()
tempPath.moveToPoint(topLeft)
tempPath.addLineToPoint(topRight)
tempPath.addLineToPoint(bottomRight)
tempPath.addLineToPoint(bottomLeft)
tempPath.addLineToPoint(topLeft)
return tempPath
}
}
init(topLeft topLeft_I: CGPoint, topRight topRight_I: CGPoint, bottomLeft bottomLeft_I: CGPoint, bottomRight bottomRight_I: CGPoint) {
topLeft = topLeft_I
topRight = topRight_I
bottomLeft = bottomLeft_I
bottomRight = bottomRight_I
}
var frame : CGRect {
get {
let highestPoint = max(topLeft.y, topRight.y, bottomLeft.y, bottomRight.y)
let lowestPoint = min(topLeft.y, topRight.y, bottomLeft.y, bottomRight.y)
let farthestPoint = max(topLeft.x, topRight.x, bottomLeft.x, bottomRight.x)
let closestPoint = min(topLeft.x, topRight.x, bottomLeft.x, bottomRight.x)
// you might want to set origin to (0,0)
let origin = CGPoint(x: closestPoint, y: lowestPoint)
let size = CGSize(width: farthestPoint, height: highestPoint)
return CGRect(origin: origin, size: size)
}
}
var size : CGSize {
get {
return frame.size
}
}
var origin : CGPoint {
get {
return frame.origin
}
}
}