Cannot find CGPoint in CGPath - ios

I have the following function for drawing a line between two points:
override func draw(from fromPoint: CGPoint, to toPoint: CGPoint) {
let path = UIBezierPath()
path.move(to: fromPoint)
path.addLine(to: toPoint)
path.close()
layer.path = path.cgPath
layer.strokeColor = pencil.color.cgColor
layer.lineWidth = pencil.strokeSize
}
Which is being called in touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?)
This draws a line and works fine, the line draws itself on the correct screen position.
Then I have another functionality where the user can erase previous drawings, and I'm trying to get if a touch position, is contained by any of the drawn paths:
private func findLayer(in touch: UITouch) -> CAShapeLayer? {
let point = touch.location(in: view)
// all the drawn paths are contained in CAShapeLayers
// Circles and rectangle layers have a corresponding frame that contains the UIBezierPath.
if let hitLayer = view?.layer.hitTest(point) as? CAShapeLayer,
hitLayer.path?.contains(point) == true || hitLayer.frame.contains(point) {
return hitLayer
}
guard let sublayers = view?.layer.sublayers else { return nil }
// this extra check is for layers that dont have a frame (Lines, Angle and free drawing)
for layer in sublayers {
if let shapeLayer = layer as? CAShapeLayer,
shapeLayer.path?.contains(point) == true || shapeLayer.frame.contains(point) {
return shapeLayer
}
}
return nil
}
The problem is that when the user draws a line, the findLayer function doesn't ever return the Layer with the line, but it works perfectly when the user draws a circle or a rectangle.
I don't want to give Line drawings a frame, because then the hit box could be too big, and the user could delete the drawing even if the touch isn't near the real path.
How can a find if a touch point is part of a CAShapeLayer.path ?

Related

CAShapeLayer. Does the line pass through the point?

I use CAShapeLayer in order to draw a line on the screen. In the method touchesEnded I want to check " Does the line pass through the point?". In my code when I press on the any part of the screen the method contains returns always true. Perhaps, I have problem in line.frame = (view?.bounds)!. How can I fix it?
Sorry for my bad English.
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first
let firstPosition = touch?.location(in: self)
if atPoint(firstPosition!) == lvl1 {
let firstPositionX = firstPosition?.x
let firstPositionY = frame.size.height - (firstPosition?.y)!
view?.layer.addSublayer(line)
line.lineWidth = 8
let color = #colorLiteral(red: 0.8078431487, green: 0.02745098062, blue: 0.3333333433, alpha: 1).cgColor
line.strokeColor = color
line.fillColor = nil
line.frame = (view?.bounds)!
path.move(to: CGPoint(x: firstPositionX!, y: firstPositionY))
}
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first
let firstPosition = touch?.location(in: self)
if atPoint(firstPosition!) == lvl1 {
let firstPositionX = firstPosition?.x
let firstPositionY = frame.size.height - (firstPosition?.y)!
path.addLine(to: CGPoint(x: firstPositionX!, y: firstPositionY))
line.path = path.cgPath
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
if line.contains(screenCenterPoint) {
print("ok")
}
}
Problem
The method func contains(_ p: CGPoint) -> Bool of CAShapeLayer returns true if the bounds of the layer contains the point. (see documentation)
So you cannot use this to check if the line contains a point.
There is however, another method with the same name in the class CGPath that returns whether the specified point is interior to the path. But since you only stroke your path and you don't fill the interior, this method will not give the desired result either.
Solution
The trick is to create an outline of your path using:
let outline = path.cgPath.copy(strokingWithWidth: line.lineWidth, lineCap: .butt, lineJoin: .round, miterLimit: 0)
And then check if the interior of the outline contains your screenCenterPoint
if outline.contains(screenCenterPoint) {
print("ok")
}
Performance considerations
Since you are checking the containment only when touches end, I think that creating an outline of the path does not add too much overhead.
When you want to check the containment in realtime, for example inside the touchesMoved function, calculating an outline may produce some overhead because this method is called a lot of times per second. Also the longer the path becomes, the longer it will take to calculate the outline.
So in realtime it is better to generate only the outline of the last drawn segment and then check if that outline contains your point.
If you want to reduce overhead seriously, you can write your own containment function. Containment of a point in a straight line is fairly simple and can be reduced to the following formula:
Given a line from start to end with width and a point p
Calculate:
dx = start.x - end.x
dy = start.y - end.y
a = dy * p.x - dx * p.y + end.x * start.y - end.y * start.x
b = hypot(dy, dx)
The line contains point p if:
abs(a/b) < width/2 and p is in the bounding box of the line.

Swift Drawing App is Laggy

I am currently working on a drawing app. I'm building it for iOS, and am using Swift 3 to do so.
It's just a basic drawing app, but I'm trying to add an extra feature. I started with a UIScrollView, then added an Image View to that scroll view. All of the drawing part is done with the image view. When you first launch the app, the scrollview is completely zoomed in. When you switch to "zoom mode" this allows you to pinch to zoom. The problem is, when you first open the app, when you draw while zoomed in, the drawing is really fuzzy. In order to fix this, I can use a line of code like this:
UIGraphicsBeginImageContextWithOptions((self.view.frame.size), false, 7.0)
This causes the drawing to look great while zoomed in, but causes the app to run very laggy. The thing that confuses me though, if I change the above code to this:
UIGraphicsBeginImageContextWithOptions((self.view.frame.size), false, 0.0)
And zoom out all the way, the drawing looks exactly the same (granted, I'm zoomed all the way out) but it's no longer laggy. I know this probably isn't coming across super clearly so here's a video showing what happens in the first scenario: https://youtu.be/E_9FKf1pUTY and in the second: https://youtu.be/OofFTS4Q0OA
So basically, I'm wondering if there's a way to treat the zoomed in area as if it was its own view. It seems to me as if the app is updating the entire image view, rather than just the part that is visible at any given time. Is there a way to update only the portion of the image view that is drawn on? Sorry if this is a bit of a confusing post, feel free to ask questions if there's anything you don't understand. Just for clarity sake, I'll include all of the drawing code below:
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
print("Touches began")
swiped = true
if let touch = touches.first {
lastPoint = touch.location(in: scrollView)
lastPoint.x = lastPoint.x / scrollView.zoomScale
lastPoint.y = lastPoint.y / scrollView.zoomScale
}
}
func drawLines(fromPoint:CGPoint,toPoint:CGPoint) {
print("\(fromPoint.x), \(fromPoint.y)")
//UIGraphicsBeginImageContext(self.view.frame.size)
UIGraphicsBeginImageContextWithOptions((scrollView.frame.size), false, 0.0)
imageView.image?.draw(in: CGRect(x: 0, y: 0, width: self.view.frame.width, height: self.view.frame.height))
let context = UIGraphicsGetCurrentContext()
context?.move(to: CGPoint(x: fromPoint.x, y: fromPoint.y))
context?.addLine(to: CGPoint(x: toPoint.x, y: toPoint.y))
context?.setBlendMode(CGBlendMode.normal)
context?.setLineCap(CGLineCap.round)
if erase == true {
context?.setLineWidth(30)
}
if erase == false {
context?.setLineWidth(CGFloat(sizeVar))
}
if color == "black" {
context?.setStrokeColor(UIColor.black.cgColor)
}
if color == "white" {
context?.setStrokeColor(UIColor.white.cgColor)
}
if color == "blue" {
context?.setStrokeColor(UIColor.blue.cgColor)
}
if color == "cyan" {
context?.setStrokeColor(UIColor.cyan.cgColor)
}
if color == "green" {
context?.setStrokeColor(UIColor.green.cgColor)
}
if color == "magenta" {
context?.setStrokeColor(UIColor.magenta.cgColor)
}
if color == "red" {
context?.setStrokeColor(UIColor.red.cgColor)
}
if color == "yellow" {
context?.setStrokeColor(UIColor.yellow.cgColor)
}
context?.strokePath()
imageView.image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
swiped = true
if let touch = touches.first {
var currentPoint = touch.location(in: scrollView)
currentPoint.x = currentPoint.x / scrollView.zoomScale
currentPoint.y = currentPoint.y / scrollView.zoomScale
drawLines(fromPoint: lastPoint, toPoint: currentPoint)
lastPoint = currentPoint
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
if !swiped {
drawLines(fromPoint: lastPoint, toPoint: lastPoint)
}
}
The scale parameter in the function UIGraphicsBeginImageContextWithOptions(_:_:_:) is not a quality setting, and you should not put arbitrary values there. It's a scale factor, and it's akin to the #1x, #2x, and #3x settings for images. It tells the system the scaling factor to use when mapping image pixels to screen pixels. In most (almost all) cases you should use 0, which means "use the native scale of the screen" (#2x for normal retina, or #3x for iPhone 6+ and 7+.) You should never set it to an arbitrary value like 7. That creates an image with 7x as many pixels as normal, and forces the system to scale it for screen drawing every time, which takes more time and is slower.
Next, creating a new image for each new line is a dreadfully inefficient way to do drawing. It creates and releases large blocks of memory constantly, and then has to completely redraw the screen each time. Instead I would set up a view that has a CAShapeLayer as it's backing layer and update the path that's installed in the layer.

SKNode follow another SKNode within SKConstraint bounds

I am attempting to simulate an eye with SpriteKit.
The pupil of the eye tracks the users's finger as it moves across the screen, but must stay within the bounds of the eye.
I have attempted to solve this unsuccessfully with the use of SKConstraint.
Edit
My thought was to apply SKConstraints against the pupil to restrict its bounds to the eye. Any touches (i.e. touchesMoved(), etc) will be applied to the pupil in the form of of SKAction.moveTo() and SpriteKit handles maintaining the pupil within the eye bound.
let touchPoint = CGPoint()
SKAction.moveTo( touchPoint, duration: 2)
The code for the video is available: https://gist.github.com/anonymous/f2356e07d1ac0e67c25b1940662d72cb
A picture is worth a thousand words...
Imagine the pupil is the small, white, filled circle. The blue box simulates a user moving their finger across the screen.
Ideally, the pupil follows the blue box around the screen and follows the path as defined by the yellow circle.
iOS 10 | Swift 3 | Xcode 8
Instead of constraining by distance, you can use an orientation constraint to rotate a node to face toward the touch location. By adding the "pupil" node with an x offset to the node being constrained, the pupil will move toward the touch point. Here's an example of how to do that:
let follow = SKSpriteNode(color:.blue, size:CGSize(width:10,height:10))
let pupil = SKShapeNode(circleOfRadius: 10)
let container = SKNode()
let maxDistance:CGFloat = 25
override func didMove(to view: SKView) {
addChild(container)
// Add the pupil at an offset
pupil.position = CGPoint(x: maxDistance, y: 0)
container.addChild(pupil)
// Node that shows the touch point
addChild(follow)
// Add an orientation constraint to the container
let range = SKRange(constantValue: 0)
let constraint = SKConstraint.orient(to: follow, offset: range)
container.constraints = [constraint]
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches {
let location = t.location(in: self)
follow.position = location
adjustPupil(location: location)
}
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
for t in touches {
let location = t.location(in: self)
follow.position = location
adjustPupil(location: location)
}
}
// Adjust the position of the pupil within the iris
func adjustPupil(location:CGPoint) {
let dx = location.x - container.position.x
let dy = location.y - container.position.y
let distance = sqrt(dx*dx + dy*dy)
let x = min(distance, maxDistance)
pupil.position = CGPoint(x:x, y:0)
}
I wanted to wait for a response before I posted an answer, but #0x141E and I debated about how constraints work, so here is the code. Use it to find out where you are going wrong.
import SpriteKit
class GameScene: SKScene {
static let pupilRadius : CGFloat = 30
static let eyeRadius : CGFloat = 100
let follow = SKSpriteNode(color:.blue, size:CGSize(width:10,height:10))
let pupil = SKShapeNode(circleOfRadius: pupilRadius)
override func didMove(to view: SKView) {
let eye = SKShapeNode(circleOfRadius: GameScene.eyeRadius)
eye.strokeColor = .white
eye.fillColor = .white
addChild(eye)
pupil.position = CGPoint(x: 0, y: 0)
pupil.fillColor = .blue
eye.addChild(pupil)
addChild(follow)
pupil.constraints = [SKConstraint.distance(SKRange(lowerLimit: 0, upperLimit: GameScene.eyeRadius-GameScene.pupilRadius), to: eye)]
}
func moveFollowerAndPupil(_ location:CGPoint){
follow.position = location
pupil.position = convert(location, to: pupil.parent!)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
touches.forEach({moveFollowerAndPupil($0.location(in: self))})
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
touches.forEach({moveFollowerAndPupil($0.location(in: self))})
}
}
As you can see, no use of square root on my end, hopefully Apple is smart enough to not use it either since it is not needed, meaning this should in theory be faster than manually doing the distance formula.

Overriding drawRect for drawing app

I am building a demo drawing application. I am using touchesMoved:withEvent to collect my points and adding them to a CGMutablePathRef. To stroke the path, I override DrawRect, add the path to the context and stroke the path:
override func drawRect(rect: CGRect) {
self.backgroundColor?.set()
UIRectFill(rect)
let context : CGContextRef = UIGraphicsGetCurrentContext()
for line in pathArray {
CGContextAddPath(context, line.structPath)
CGContextSetLineWidth(context, line.structLineWidth)
CGContextSetStrokeColorWithColor(context, line.structLineColor.CGColor)
CGContextSetAlpha(context, lineOpacity)
}
CGContextSetLineCap(context, kCGLineCapRound)
CGContextStrokePath(context)
self.empty = false
}
override func touchesBegan(touches: Set<UITouch>, withEvent event: UIEvent?) {
if let touch = touches.first as UITouch! {
previousPoint = touch.previousLocationInView(self)
previousPreviousPoint = touch.previousLocationInView(self)
currentPoint = touch.locationInView(self)
}
self.touchesMoved(touches, withEvent: event)
}
override func touchesMoved(touches: Set<UITouch>, withEvent event: UIEvent?) {
if let touch = touches.first as UITouch! {
previousPreviousPoint = previousPoint
previousPoint = touch.previousLocationInView(self)
currentPoint = touch.locationInView(self)
let mid1 : CGPoint = getMidPoint(previousPoint, p2: previousPreviousPoint)
let mid2 : CGPoint = getMidPoint(currentPoint, p2: previousPoint)
let subpath : CGMutablePathRef = CGPathCreateMutable()
CGPathMoveToPoint(subpath, nil, mid1.x, mid1.y)
CGPathAddQuadCurveToPoint(subpath, nil, previousPoint.x, previousPoint.y, mid2.x, mid2.y)
let bounds : CGRect = CGPathGetBoundingBox(subpath)
let drawBox : CGRect = CGRectInset(bounds, -2.0 * lineWidth, -2.0 * lineWidth)
let newLine = line(newPath: subpath)
pathArray.append(newLine)
self.setNeedsDisplayInRect(drawBox)
}
}
The above code works as expected, except I am seeing a unexpected result. The "draw box" which gets the bandbox and sets a CGRectInset changes the lineColor of other paths already drawn:
I understand (sort of) why this is happening, but cannot find a solution to this problem. Any suggestions would be most appreciated!
You want to stroke each path separately, like this:
override func drawRect(rect: CGRect) {
let context : CGContextRef = UIGraphicsGetCurrentContext()
// Set parameters in the context that are the same for all lines
CGContextSetLineCap(context, kCGLineCapRound)
CGContextSetAlpha(context, lineOpacity)
for line in pathArray {
// Set parameters in the context that are specific to this line
CGContextSetLineWidth(context, line.structLineWidth)
CGContextSetStrokeColorWithColor(context, line.structLineColor.CGColor)
// Add the line's path to the context's current path:
CGContextAddPath(context, line.structPath)
// And stroke it.
CGContextStrokePath(context)
// (This has the side effect of clearing the context's current path.)
}
}
The CGContext keeps track of a current path. Think of it as a CGMutablePath that you don't have direct access to. You can affect it by calling functions like CGContextAddPath or many others.
The CGContext's current path is not actually visible until you tell the context to draw it, by calling a function like CGContextStrokePath. At that time, the context uses the current path and the other values in the context (stroke color, alpha, line width, etc.) to figure out what to draw, and draws it.
Your code was adding each line's path into the current path, so you ended up with the current path containing several disconnected subpaths. Then you called CGContextStrokePath once at the end, and all of those subpaths were drawn using the values of the parameters that you most recently set. So you saw only the last line's width and color.

Allowing users to draw rect on a UIImage, with the intention of cropping the image

I'm sure this has been asked a number of times from various different perspectives, but I'm unable to find an answer on here as yet.
What I want to achieve
What I would like to do is to display a UIImage, and allow the user to draw a rectangle on the image, and eventually crop their selection.
Research so far
I've found previous questions here on SO that handle the cropping, however they often deal with static cropping areas that don't change, this does lead to the following constraints of such mechanism
The area of the image you're interested in may be positioned
anywhere, for example if you're trying to crop a road sign, it may
be centered on one image, but aligned left on another, therefore you
can't predict which area to crop.
The size and scale of the interested area may change, for example
one image may be a close up of that road sign so the cropping area
would be larger, but another image may have been taken from a
distance meaning the cropping area would be smaller.
With the combination of the above two variables, its almost impossible to accurately predict where the area of interest in the image would be, so I'm relying on the user to define this by being able to "draw" a box around the area we're interested in, in this case, a road sign.
This is all peachy on Android, since you can delegate all the hard work out with a nice intent, such as :
Intent intent = new Intent("com.android.camera.action.CROP");
However, I can't find an equivalent for iOS.
I've found this bit of code from this source :
- (UIImage *)imageByDrawingCircleOnImage:(UIImage *)image
{
// begin a graphics context of sufficient size
UIGraphicsBeginImageContext(image.size);
// draw original image into the context
[image drawAtPoint:CGPointZero];
// get the context for CoreGraphics
CGContextRef ctx = UIGraphicsGetCurrentContext();
// set stroking color and draw circle
[[UIColor redColor] setStroke];
// make circle rect 5 px from border
CGRect circleRect = CGRectMake(0, 0,
image.size.width,
image.size.height);
circleRect = CGRectInset(circleRect, 5, 5);
// draw circle
CGContextStrokeEllipseInRect(ctx, circleRect);
// make image out of bitmap context
UIImage *retImage = UIGraphicsGetImageFromCurrentImageContext();
// free the context
UIGraphicsEndImageContext();
return retImage;
}
Which I believe is a good starting point for cropping (it does crop circles however), it does rely on predefining the area you want to crop when calling CGRectMake.
This previous question also details how to do the actual cropping.
I'm assuming that to allow the user to draw the rect, I'd need to integrate with Gestures?
The Question :
How can I allow the user, to draw a rect over an image view, with the intent of cropping that area?
You could give BJImageCropper a try:
A simple UIView subclass that allows a user to crop an image. If you use it, I'd love to know! Twitter: #barrettjacobsen
This post is already 5 years old, but future references, this is how I managed to get it done. Following code is a combination of Rob's answer and some image cropping
Xcode 9 and Swift 4 are being used here
Add 2 ViewControllers
Add ImageView and 2 buttons for first view controller and another image view for last view controller
link all views to source file
View controller
import UIKit
extension UIView {
func snapshot(afterScreenUpdates: Bool = false) -> UIImage {
UIGraphicsBeginImageContextWithOptions(bounds.size, isOpaque, 0)
drawHierarchy(in: bounds, afterScreenUpdates: afterScreenUpdates)
let image = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
}
extension UIImage {
func crop( rect: CGRect) -> UIImage {
var rect = rect
rect.origin.x*=self.scale
rect.origin.y*=self.scale
rect.size.width*=self.scale
rect.size.height*=self.scale
let imageRef = self.cgImage!.cropping(to: rect)
let image = UIImage(cgImage: imageRef!, scale: self.scale, orientation: self.imageOrientation)
return image
}
}
class ViewController: UIViewController {
var rec: CGRect!
var cropImage: UIImage!
#IBOutlet weak var imageView: UIImageView!
private let shapeLayer: CAShapeLayer = {
let _shapeLayer = CAShapeLayer()
_shapeLayer.fillColor = UIColor.clear.cgColor
_shapeLayer.strokeColor = UIColor.green.cgColor
_shapeLayer.lineWidth = 2
return _shapeLayer
}()
private var startPoint: CGPoint!
override func viewDidLoad() {
super.viewDidLoad()
imageView.layer.addSublayer(shapeLayer)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
clear()
startPoint = touches.first?.location(in: imageView)
}
func clear() {
imageView.layer.sublayers = nil
imageView.image = UIImage(named: "aa")
imageView.layer.addSublayer(shapeLayer)
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let startPoint = startPoint, let touch = touches.first else { return }
let point: CGPoint
if let predictedTouch = event?.predictedTouches(for: touch)?.last {
point = predictedTouch.location(in: imageView)
} else {
point = touch.location(in: imageView)
}
updatePath(from: startPoint, to: point)
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let startPoint = startPoint, let touch = touches.first else { return }
let point = touch.location(in: imageView)
updatePath(from: startPoint, to: point)
imageView.image = imageView.snapshot(afterScreenUpdates: true)
shapeLayer.path = nil
}
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent?) {
shapeLayer.path = nil
}
private func updatePath(from startPoint: CGPoint, to point: CGPoint) {
let size = CGSize(width: point.x - startPoint.x, height: point.y - startPoint.y)
rec = CGRect(origin: startPoint, size: size)
shapeLayer.path = UIBezierPath(rect: rec).cgPath
}
#IBAction func btnTapped(_ sender: Any) {
clear()
}
#IBAction func btnCropTapped(_ sender: Any) {
let newRec = CGRect(origin: CGPoint(x: rec.origin.x, y: rec.origin.y), size: rec.size)
cropImage = imageView.image?.crop(rect: newRec)
print(rec)
print(newRec)
self.performSegue(withIdentifier: "toImage", sender: nil)
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "toImage" {
if let destination = segue.destination as? ImageViewController {
destination.croppedImage = cropImage
}
}
}
}

Resources