So I need to move Point of view of the sceneview with the pan gesture in Mac catalyst app.
for iPhone and iPad there is default two finger drag.
I have tried with
let point = gesture.location(in: self)
let v1 = self.projectPoint(SCNVector3Zero)
let vector = self.unprojectPoint(SCNVector3Make(Float(point.x), Float(point.y), v1.z))
self.pointOfView?.position = SCNVector3(vector.x, vector.y, self.pointOfView!.position.z)
if I apply this to SceneKit root node it is working , but I need to move point of view only not the position of the root node
Please suggest
Whoever still not get it
here is the solution
var previousLocation = SCNVector3(x: 0, y: 0, z: 0)
#objc func panned(gesture:UIPanGestureRecognizer) {
let view = self.sceneView1!
let translation = gesture.translation(in: view)
let location = gesture.location(in: view)
let secLocation = CGPoint(x: location.x + translation.x, y: location.y + translation.y)
let P1 = view.unprojectPoint(SCNVector3(x: Float(location.x), y: Float(location.y), z: 0.0))
let P2 = view.unprojectPoint(SCNVector3(x: Float(location.x), y: Float(location.y), z: 1.0))
let Q1 = view.unprojectPoint(SCNVector3(x: Float(secLocation.x), y: Float(secLocation.y), z: 0.0))
let Q2 = view.unprojectPoint(SCNVector3(x: Float(secLocation.x), y: Float(secLocation.y), z: 1.0))
let t1 = -P1.z / (P2.z - P1.z)
let t2 = -Q1.z / (Q2.z - Q1.z)
let x1 = P1.x + t1 * (P2.x - P1.x)
let y1 = P1.y + t1 * (P2.y - P1.y)
let P0 = SCNVector3Make(x1, y1,0)
let x2 = Q1.x + t2 * (Q2.x - Q1.x)
let y2 = Q1.y + t2 * (Q2.y - Q1.y)
let Q0 = SCNVector3Make(x2, y2, 0)
var diffR = Q0 - P0
diffR = SCNVector3Make(diffR.x * -1, diffR.y * -1, diffR.z * -1)
// diffR *= -1
let cameraNode = view.pointOfView
switch gesture.state {
case .began:
previousLocation = cameraNode!.position
break;
case .changed:
cameraNode?.position = SCNVector3Make(previousLocation.x + diffR.x, previousLocation.y + diffR.y, previousLocation.z + diffR.z)
break;
default:
break;
}
}
Related
I am trying to implement the video call screen like whats app. I am trying to drag the view of the receiver and based on dragging it should be moved to that position. for eg.if the user moves the receiver view to the bottom left or any othe side but it should place to the near corner of the main view(i.e sender view) but the conditions are not working fine. the thing is it is exactly like the reciever screen moving in whatsapp video call page
#IBOutlet weak var sampleView: UIView!
var initialCenter = CGPoint()
var viewOrigin: CGPoint!
viewOrigin = sampleView.frame.origin
#IBAction func panView(_ sender: UIPanGestureRecognizer) {
//let endPoint = sender.translation(in: self.view)
guard sender.view != nil else {return}
let viewToDrag = sender.view!
// Get the changes in the X and Y directions relative to
// the superview's coordinate space.
let endPoint = sender.translation(in: viewToDrag.superview)
let velocity = sender.velocity(in: viewToDrag.superview)
if sender.state == .began {
// Save the view's original position.
self.initialCenter = viewToDrag.center
}
// Update the position for the .began, .changed, and .ended states
if sender.state != .cancelled {
// Add the X and Y translation to the view's original position.
let k: CGFloat = view.bounds.size.height / view.bounds.size.width
let b = view.bounds.size.height
if endPoint.y < k * endPoint.x && endPoint.y < -1 * k * endPoint.x + b {
print("top edge")
viewToDrag.center = CGPoint(x: viewToDrag.center.x + endPoint.x, y: viewToDrag.center.y + endPoint.y)
sender.setTranslation(CGPoint(x: 0, y: 0), in: viewToDrag)
} else if endPoint.y <= k * endPoint.x && endPoint.y >= -1 * k * endPoint.x + b {
print("right edge")
viewToDrag.center = CGPoint(x: viewToDrag.center.x + endPoint.x, y: viewToDrag.center.y + endPoint.y)
sender.setTranslation(CGPoint(x: 0, y: 0), in: viewToDrag)
} else if endPoint.y > k * endPoint.x && endPoint.y > -1 * k * endPoint.x + b {
print("bottom edge")
viewToDrag.center = CGPoint(x: viewToDrag.center.x + endPoint.x, y: viewToDrag.center.y + endPoint.y)
sender.setTranslation(CGPoint(x: 0, y: 0), in: viewToDrag)
} else if endPoint.y > k * endPoint.x && endPoint.y < -1 * k * endPoint.x + b {
print("left edge")
UIView.animate(withDuration: 0.3, animations: {
self.sampleView.frame.origin = self.viewOrigin
})
viewToDrag.center = CGPoint(x: viewToDrag.center.x + endPoint.x, y: viewToDrag.center.y + endPoint.y)
sender.setTranslation(CGPoint(x: 0, y: 0), in: viewToDrag)
}
}
else {
// On cancellation, return the piece to its original location.
viewToDrag.center = initialCenter
}
}
I want to make a rotation of point by -90 degrees
Initial
Final
Let's take a look on top left and top right points of Initial. Their coordinates are:
let topLeft = CGPoint(x: 2, y: 1)
let topRight = CGPoint(x: 3, y: 1)
And after rotation coordinates of them should become:
topLeft 1:0
topRight 2:0
How can i do it ?
I have tried several answers but none of them give me my final results.
did not work:
Rotating a CGPoint around another CGPoint
What is the best way to rotate a CGPoint on a grid?
Here are some code from my playground:
let topLeft = CGPoint(x: 2, y: 1)
let topRight = CGPoint(x: 3, y: 1)
func rotatePoint1(_ point: CGPoint, _ degrees: CGFloat) -> CGPoint {
let s = CGFloat(sinf(Float(degrees)))
let c = CGFloat(cosf(Float(degrees)));
return CGPoint(x: c * point.x - s * point.y, y: s * point.x + c * point.y)
}
func rotatePoint2(_ point: CGPoint, _ degrees: CGFloat, _ origin: CGPoint) -> CGPoint {
let dx = point.x - origin.x
let dy = point.y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + degrees * CGFloat(M_PI / 180.0) // convert it to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
func rotatePoint3(_ point: CGPoint, _ degrees: CGFloat) -> CGPoint {
let translateTransform = CGAffineTransform(translationX: point.x, y: point.y)
let rotationTransform = CGAffineTransform(rotationAngle: degrees)
let customRotation = (rotationTransform.concatenating(translateTransform.inverted())).concatenating(translateTransform)
return point.applying(customRotation)
}
print(rotatePoint1(topLeft, -90))
print(rotatePoint1(topRight, -90))
You are really describing two rotations with your example:
The points are rotated by -90 degrees around the center of the 3x3 grid. When this happens, the topLeft point becomes bottomLeft, and topRight becomes topLeft.
Then you rotate those points around the center of the square 90 degrees (ie. the other direction) to make them topLeft and topRight again.
Using this function from this answer:
func rotatePoint(target: CGPoint, aroundOrigin origin: CGPoint, byDegrees: CGFloat) -> CGPoint {
let dx = target.x - origin.x
let dy = target.y - origin.y
let radius = sqrt(dx * dx + dy * dy)
let azimuth = atan2(dy, dx) // in radians
let newAzimuth = azimuth + byDegrees * .pi / 180 // convert it to radians
let x = origin.x + radius * cos(newAzimuth)
let y = origin.y + radius * sin(newAzimuth)
return CGPoint(x: x, y: y)
}
let topLeft = CGPoint(x: 2, y: 1)
let topRight = CGPoint(x: 3, y: 1)
let squareCenter = CGPoint(x: 2.5, y: 1.5)
// First rotate around the center of the 3 x 3 square
let centerOfRotation = CGPoint(x: 1.5, y: 1.5)
let tl1 = rotatePoint(target: topLeft, aroundOrigin: centerOfRotation, byDegrees: -90) // (x 1 y 1)
let tr1 = rotatePoint(target: topRight, aroundOrigin: centerOfRotation, byDegrees: -90) // (x 1 y 0)
let sc1 = rotatePoint(target: squareCenter, aroundOrigin: centerOfRotation, byDegrees: -90) // (x 1.5 y 0.5)
// Now rotate the 1x1 square the other way around new position of square center
let tl2 = rotatePoint(target: tl1, aroundOrigin: sc1, byDegrees: 90) // (x 1 y 0)
let tr2 = rotatePoint(target: tr1, aroundOrigin: sc1, byDegrees: 90) // (x 2 y 0)
Note: As #MBo noted in the comments, if your cell is always 1x1, it is sufficient to rotate the center of your cell and then just add and subtract the 0.5 offsets to find the four corners.
You can just transform you view like so
yourView.transform = CGAffineTransform(rotationAngle: -CGFloat.pi/2)
EDIT:
My bad.
Use CGFloat.pi when working with degrees
print(rotatePoint1(topLeft, -CGFloat.pi/2))
Use sin and cos functions directly
let s = sin(degrees)
let c = cos(degrees)
iOS coordinate system is a bit flipped compared to standard one so you will have to adjust the angle (you can see simulation)
print(rotatePoint1(topLeft, -CGFloat.pi/2)) // (1.0000000000000002, -2.0)
print(rotatePoint1(topRight, -CGFloat.pi/2)) // (1.0000000000000002, -3.0)
I'm trying to create a curly bracket in Swift, from two points. The idea works fine, with a straight line, because it's currently not dynamic in anyway. My issue lies in finding the dynamic control points and center depending on the location of p1 and p2 points.
This is my current code:
override func viewDidLoad() {
super.viewDidLoad()
let path = UIBezierPath()
let p1 = CGPointMake(100, 100)
let p2 = CGPointMake(300, 100)
let c1 = CGPointMake(150, 80)
let c2 = CGPointMake(250, 80)
var midPoint = midPointForPoints(p1, p2: p2)
var midP1 = midPoint
midP1.x -= 10
var midP2 = midPoint
midP2.x += 10
midPoint.y -= 20
path.moveToPoint(p1)
path.addQuadCurveToPoint(midP1, controlPoint: c1)
path.addLineToPoint(midPoint)
path.addLineToPoint(midP2)
path.addQuadCurveToPoint(p2, controlPoint: c2)
let shape = CAShapeLayer()
shape.lineWidth = 5
shape.strokeColor = UIColor.redColor().CGColor
shape.fillColor = UIColor.clearColor().CGColor
shape.path = path.CGPath
self.view.layer.addSublayer(shape)
}
func midPointForPoints(p1: CGPoint, p2: CGPoint)->CGPoint{
let deltaX = (p1.x + p2.x)/2
let deltaY = (p1.y + p2.y)/2
let midPoint = CGPointMake(deltaX, deltaY)
return midPoint
}
This doesen't take the degrees of the points into account, so if I were to create the two points as:
let p1 = CGPointMake(100, 100)
let p2 = CGPointMake(300, 300)
It would not find the proper control points and midpoint.
Hope someone can help me in the right direction. The idea is of course in the end to just know the two points (p1, p2) and dynamically create every other points, I just typed in values for the moment, to make it easier for myself. I've added images of the issue to better show you.
First create a path for a brace that starts at (0, 0) and ends at (1, 0). Then apply an affine transformation that moves, scales, and rotates the path to span your designed endpoints. It needs to transform (0, 0) to your start point and (1, 0) to your end point. Creating the transformation efficiently requires some trigonometry, but I've done the homework for you:
extension UIBezierPath {
class func brace(from start: CGPoint, to end: CGPoint) -> UIBezierPath {
let path = self.init()
path.move(to: .zero)
path.addCurve(to: CGPoint(x: 0.5, y: -0.1), controlPoint1: CGPoint(x: 0, y: -0.2), controlPoint2: CGPoint(x: 0.5, y: 0.1))
path.addCurve(to: CGPoint(x: 1, y: 0), controlPoint1: CGPoint(x: 0.5, y: 0.1), controlPoint2: CGPoint(x: 1, y: -0.2))
let scaledCosine = end.x - start.x
let scaledSine = end.y - start.y
let transform = CGAffineTransform(a: scaledCosine, b: scaledSine, c: -scaledSine, d: scaledCosine, tx: start.x, ty: start.y)
path.apply(transform)
return path
}
}
Result:
Here's the entire Swift playground I used to make the demo:
import UIKit
import PlaygroundSupport
extension UIBezierPath {
class func brace(from start: CGPoint, to end: CGPoint) -> UIBezierPath {
let path = self.init()
path.move(to: .zero)
path.addCurve(to: CGPoint(x: 0.5, y: -0.1), controlPoint1: CGPoint(x: 0, y: -0.2), controlPoint2: CGPoint(x: 0.5, y: 0.1))
path.addCurve(to: CGPoint(x: 1, y: 0), controlPoint1: CGPoint(x: 0.5, y: 0.1), controlPoint2: CGPoint(x: 1, y: -0.2))
let scaledCosine = end.x - start.x
let scaledSine = end.y - start.y
let transform = CGAffineTransform(a: scaledCosine, b: scaledSine, c: -scaledSine, d: scaledCosine, tx: start.x, ty: start.y)
path.apply(transform)
return path
}
}
class ShapeView: UIView {
override class var layerClass: Swift.AnyClass { return CAShapeLayer.self }
lazy var shapeLayer: CAShapeLayer = { self.layer as! CAShapeLayer }()
}
class ViewController: UIViewController {
override func loadView() {
let view = UIView(frame: CGRect(x: 0, y: 0, width: 600, height: 200))
view.backgroundColor = .white
for (i, handle) in handles.enumerated() {
handle.autoresizingMask = [ .flexibleTopMargin, .flexibleTopMargin, .flexibleBottomMargin, .flexibleRightMargin ]
let frame = CGRect(x: view.bounds.width * 0.1 + CGFloat(i) * view.bounds.width * 0.8 - 22, y: view.bounds.height / 2 - 22, width: 44, height: 44)
handle.frame = frame
handle.shapeLayer.path = CGPath(ellipseIn: handle.bounds, transform: nil)
handle.shapeLayer.lineWidth = 2
handle.shapeLayer.lineDashPattern = [2, 6]
handle.shapeLayer.lineCap = kCALineCapRound
handle.shapeLayer.strokeColor = UIColor.blue.cgColor
handle.shapeLayer.fillColor = nil
view.addSubview(handle)
let panner = UIPanGestureRecognizer(target: self, action: #selector(pannerDidFire(panner:)))
handle.addGestureRecognizer(panner)
}
brace.shapeLayer.lineWidth = 2
brace.shapeLayer.lineCap = kCALineCapRound
brace.shapeLayer.strokeColor = UIColor.black.cgColor
brace.shapeLayer.fillColor = nil
view.addSubview(brace)
setBracePath()
self.view = view
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
setBracePath()
}
private let handles: [ShapeView] = [
ShapeView(),
ShapeView()
]
private let brace = ShapeView()
private func setBracePath() {
brace.shapeLayer.path = UIBezierPath.brace(from: handles[0].center, to: handles[1].center).cgPath
}
#objc private func pannerDidFire(panner: UIPanGestureRecognizer) {
let view = panner.view!
let offset = panner.translation(in: view)
panner.setTranslation(.zero, in: view)
var center = view.center
center.x += offset.x
center.y += offset.y
view.center = center
setBracePath()
}
}
let vc = ViewController()
PlaygroundPage.current.liveView = vc.view
The key to the problem is when the figure is rotated your base vectors will rotate. When your figure is axis-aligned your base vectors are u (1, 0) and v (0, 1).
So when you are performing midPoint.y -= 20 you can see it as the same as midPoint.x -= v.x * 20; midPoint.y -= v.y * 20 where v is (0, 1). The results are the same, check for yourself.
This implementation will do what your code does, only axis independent.
let path = UIBezierPath()
let p1 = CGPointMake(100, 100)
let p2 = CGPointMake(300, 100)
let o = p1.plus(p2).divide(2.0) // origo
let u = p2.minus(o) // base vector 1
let v = u.turn90() // base vector 2
let c1 = o.minus(u.times(0.5)).minus(v.times(0.2)) // CGPointMake(150, 80)
let c2 = o.plus(u.times(0.5)).minus(v.times(0.2)) // CGPointMake(250, 80)
var midPoint = o.minus(v.times(0.2))
var midP1 = o.minus(u.times(0.2))
var midP2 = o.plus(u.times(0.2))
Note: I set the factors to match the initial values in your implementation.
Also added this CGPoint extension for convenience. Hope it helps.
extension CGPoint {
public func plus(p: CGPoint) -> (CGPoint)
{
return CGPoint(x: self.x + p.x, y: self.y + p.y)
}
public func minus(p: CGPoint) -> (CGPoint)
{
return CGPoint(x: self.x - p.x, y: self.y - p.y)
}
public func times(f: CGFloat) -> (CGPoint)
{
return CGPoint(x: self.x * f, y: self.y * f)
}
public func divide(f: CGFloat) -> (CGPoint)
{
return self.times(1.0/f)
}
public func turn90() -> (CGPoint)
{
return CGPoint(x: -self.y, y: x)
}
}
I use the following CAGradientLayer:
let layer = CAGradientLayer()
layer.colors = [
UIColor.redColor().CGColor,
UIColor.greenColor().CGColor,
UIColor.blueColor().CGColor
]
layer.startPoint = CGPointMake(0, 1)
layer.endPoint = CGPointMake(1, 0)
layer.locations = [0.0, 0.6, 1.0]
But when I set bounds property for the layer, it just stretches a square gradient. I need a result like in Sketch 3 app image (see above).
How can I achieve this?
Update: Use context.drawLinearGradient() instead of CAGradientLayer in a manner similar to the following. It will draw gradients that are consistent with Sketch/Photoshop.
If you absolutely must use CAGradientLayer, then here is the math you'll need to use...
It took some time to figure out, but from careful observation, I found out that Apple's implementation of gradients in CAGradientLayer is pretty odd:
First it converts the view to a square.
Then it applies the gradient using start/end points.
The middle gradient will indeed form a 90 degree angle in this resolution.
Finally, it squishes the view down to the original size.
This means that the middle gradient will no longer form a 90 degree angle in the new size. This contradicts the behavior of virtually every other paint application: Sketch, Photoshop, etc.
If you want to implement start/end points as it works in Sketch, you'll need to translate the start/end points to account for the fact that Apple is going to squish the view.
Steps to perform (Diagrams)
Code
import UIKit
/// Last updated 4/3/17.
/// See https://stackoverflow.com/a/43176174 for more information.
public enum LinearGradientFixer {
public static func fixPoints(start: CGPoint, end: CGPoint, bounds: CGSize) -> (CGPoint, CGPoint) {
// Naming convention:
// - a: point a
// - ab: line segment from a to b
// - abLine: line that passes through a and b
// - lineAB: line that passes through A and B
// - lineSegmentAB: line segment that passes from A to B
if start.x == end.x || start.y == end.y {
// Apple's implementation of horizontal and vertical gradients works just fine
return (start, end)
}
// 1. Convert to absolute coordinates
let startEnd = LineSegment(start, end)
let ab = startEnd.multiplied(multipliers: (x: bounds.width, y: bounds.height))
let a = ab.p1
let b = ab.p2
// 2. Calculate perpendicular bisector
let cd = ab.perpendicularBisector
// 3. Scale to square coordinates
let multipliers = calculateMultipliers(bounds: bounds)
let lineSegmentCD = cd.multiplied(multipliers: multipliers)
// 4. Create scaled perpendicular bisector
let lineSegmentEF = lineSegmentCD.perpendicularBisector
// 5. Unscale back to rectangle
let ef = lineSegmentEF.divided(divisors: multipliers)
// 6. Extend line
let efLine = ef.line
// 7. Extend two lines from a and b parallel to cd
let aParallelLine = Line(m: cd.slope, p: a)
let bParallelLine = Line(m: cd.slope, p: b)
// 8. Find the intersection of these lines
let g = efLine.intersection(with: aParallelLine)
let h = efLine.intersection(with: bParallelLine)
if let g = g, let h = h {
// 9. Convert to relative coordinates
let gh = LineSegment(g, h)
let result = gh.divided(divisors: (x: bounds.width, y: bounds.height))
return (result.p1, result.p2)
}
return (start, end)
}
private static func unitTest() {
let w = 320.0
let h = 60.0
let bounds = CGSize(width: w, height: h)
let a = CGPoint(x: 138.5, y: 11.5)
let b = CGPoint(x: 151.5, y: 53.5)
let ab = LineSegment(a, b)
let startEnd = ab.divided(divisors: (x: bounds.width, y: bounds.height))
let start = startEnd.p1
let end = startEnd.p2
let points = fixPoints(start: start, end: end, bounds: bounds)
let pointsSegment = LineSegment(points.0, points.1)
let result = pointsSegment.multiplied(multipliers: (x: bounds.width, y: bounds.height))
print(result.p1) // expected: (90.6119039567129, 26.3225059181603)
print(result.p2) // expected: (199.388096043287, 38.6774940818397)
}
}
private func calculateMultipliers(bounds: CGSize) -> (x: CGFloat, y: CGFloat) {
if bounds.height <= bounds.width {
return (x: 1, y: bounds.width/bounds.height)
} else {
return (x: bounds.height/bounds.width, y: 1)
}
}
private struct LineSegment {
let p1: CGPoint
let p2: CGPoint
init(_ p1: CGPoint, _ p2: CGPoint) {
self.p1 = p1
self.p2 = p2
}
init(p1: CGPoint, m: CGFloat, distance: CGFloat) {
self.p1 = p1
let line = Line(m: m, p: p1)
let measuringPoint = line.point(x: p1.x + 1)
let measuringDeltaH = LineSegment(p1, measuringPoint).distance
let deltaX = distance/measuringDeltaH
self.p2 = line.point(x: p1.x + deltaX)
}
var length: CGFloat {
let dx = p2.x - p1.x
let dy = p2.y - p1.y
return sqrt(dx * dx + dy * dy)
}
var distance: CGFloat {
return p1.x <= p2.x ? length : -length
}
var midpoint: CGPoint {
return CGPoint(x: (p1.x + p2.x)/2, y: (p1.y + p2.y)/2)
}
var slope: CGFloat {
return (p2.y-p1.y)/(p2.x-p1.x)
}
var perpendicularSlope: CGFloat {
return -1/slope
}
var line: Line {
return Line(p1, p2)
}
var perpendicularBisector: LineSegment {
let p1 = LineSegment(p1: midpoint, m: perpendicularSlope, distance: -distance/2).p2
let p2 = LineSegment(p1: midpoint, m: perpendicularSlope, distance: distance/2).p2
return LineSegment(p1, p2)
}
func multiplied(multipliers: (x: CGFloat, y: CGFloat)) -> LineSegment {
return LineSegment(
CGPoint(x: p1.x * multipliers.x, y: p1.y * multipliers.y),
CGPoint(x: p2.x * multipliers.x, y: p2.y * multipliers.y))
}
func divided(divisors: (x: CGFloat, y: CGFloat)) -> LineSegment {
return multiplied(multipliers: (x: 1/divisors.x, y: 1/divisors.y))
}
}
private struct Line {
let m: CGFloat
let b: CGFloat
/// y = mx+b
init(m: CGFloat, b: CGFloat) {
self.m = m
self.b = b
}
/// y-y1 = m(x-x1)
init(m: CGFloat, p: CGPoint) {
// y = m(x-x1) + y1
// y = mx-mx1 + y1
// y = mx + (y1 - mx1)
// b = y1 - mx1
self.m = m
self.b = p.y - m*p.x
}
init(_ p1: CGPoint, _ p2: CGPoint) {
self.init(m: LineSegment(p1, p2).slope, p: p1)
}
func y(x: CGFloat) -> CGFloat {
return m*x + b
}
func point(x: CGFloat) -> CGPoint {
return CGPoint(x: x, y: y(x: x))
}
func intersection(with line: Line) -> CGPoint? {
// Line 1: y = mx + b
// Line 2: y = nx + c
// mx+b = nx+c
// mx-nx = c-b
// x(m-n) = c-b
// x = (c-b)/(m-n)
let n = line.m
let c = line.b
if m-n == 0 {
// lines are parallel
return nil
}
let x = (c-b)/(m-n)
return point(x: x)
}
}
Proof it works regardless of rectangle size
I tried this with a view size=320x60, gradient=[red#0,green#0.5,blue#1], startPoint = (0,1), and endPoint = (1,0).
Sketch 3:
Actual generated iOS screenshot using the code above:
Note that the angle of the green line looks 100% accurate. The difference lies in how the red and blue are blended. I can't tell if that's because I'm calculating the start/end points incorrectly, or if it's just a difference in how Apple blends gradients vs. how Sketch blends gradients.
Here's the math to fix the endPoint
let width = bounds.width
let height = bounds.height
let dx = endPoint.x - startPoint.x
let dy = endPoint.y - startPoint.y
if width == 0 || height == 0 || width == height || dx == 0 || dy == 0 {
return
}
let ux = dx * width / height
let uy = dy * height / width
let coef = (dx * ux + dy * uy) / (ux * ux + uy * uy)
endPoint = CGPoint(x: startPoint.x + coef * ux, y: startPoint.y + coef * uy)
Full code of layoutSubviews method is
override func layoutSubviews() {
super.layoutSubviews()
let gradientOffset = self.bounds.height / self.bounds.width / 2
self.gradientLayer.startPoint = CGPointMake(0, 0.5 + gradientOffset)
self.gradientLayer.endPoint = CGPointMake(1, 0.5 - gradientOffset)
self.gradientLayer.frame = self.bounds
}
I am having a little more mathematical problem with 3D programming and I am hoping you can help me!
I am trying to create a 3D game using Scenekit with a isometric angle.
This code creates my orthographic camera:
var cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.name = "Camera"
cameraNode.position = SCNVector3Make(-5.0, -5.0, 10.0)
cameraNode.eulerAngles = SCNVector3Make(PI / 3.0, 0.0, -PI / 4.0)
cameraNode.camera?.usesOrthographicProjection = true
cameraNode.camera?.orthographicScale = 7.0
scene.rootNode.addChildNode(cameraNode)
Now i want to move the camera using a pan gesture, producing a scroll feeling. To make this possible the camera shouldn't move vertically, only horizontally. The touch location on screen and the unprojected position in the 3D world should stay the same while moving.
I thought about calculating the 2D translation into 3D difference and ignoring the vertical component. This code actually works and almost produces the desired result, but the speed is not correct. If I pan, the camera seems to accelerate and not react correctly:
var previousTranslation = CGPointMake(0.0, 0.0)
func pan(gesture: UIPanGestureRecognizer)
{
let view = self.view as SCNView
let translation = gesture.translationInView(view)
let location = gesture.locationInView(view)
let diffTrans = translation - previousTranslation
previousTranslation = translation
let cameraNode = scene.rootNode.childNodeWithName("Camera", recursively: false)
let worldPointTrans = view.unprojectPoint(SCNVector3Make(-Float(diffTrans.x), -Float(diffTrans.y), 0.0))
let worldPoint0 = view.unprojectPoint(SCNVector3Make(0.0, 0.0, 0.0))
var diff = worldPointTrans - worldPoint0
diff.x = diff.x / Float(cameraNode!.camera!.orthographicScale)
diff.y = diff.y / Float(cameraNode!.camera!.orthographicScale)
diff.z = 0
cameraNode?.position += diff
}
Does anybody know a sophisticated way of calculating a screen translation into a horizontal 3D translation, ignoring the vertical axis?
Thank you in advance :)
EDIT:
The pan works for horizontal translation now. But not for vertical, because I set the difference on the z axis to zero.
I found my own solution.!
I am calculating a ray at the start location of the gesture (P1-P2) and a ray at the translated location (Q1-Q2). Now I have two rays and I let both intersect with the XY Plane to receive the points P0 and Q0
The difference of P0 and Q0 is the unprojected translation.
This technique should also work with a non-orthogonal camera, but i didn't test this yet.
It seems to me that it works, but if anybody could mathematically confirm this assumption, I would be glad to read that :)
Here is the code:
var previousLocation = SCNVector3(x: 0, y: 0, z: 0)
func pan(gesture: UIPanGestureRecognizer)
{
let view = self.view as SCNView
let translation = gesture.translationInView(view)
let location = gesture.locationInView(view)
let secLocation = location + translation
let P1 = view.unprojectPoint(SCNVector3(x: Float(location.x), y: Float(location.y), z: 0.0))
let P2 = view.unprojectPoint(SCNVector3(x: Float(location.x), y: Float(location.y), z: 1.0))
let Q1 = view.unprojectPoint(SCNVector3(x: Float(secLocation.x), y: Float(secLocation.y), z: 0.0))
let Q2 = view.unprojectPoint(SCNVector3(x: Float(secLocation.x), y: Float(secLocation.y), z: 1.0))
let t1 = -P1.z / (P2.z - P1.z)
let t2 = -Q1.z / (Q2.z - Q1.z)
let x1 = P1.x + t1 * (P2.x - P1.x)
let y1 = P1.y + t1 * (P2.y - P1.y)
let P0 = SCNVector3Make(x1, y1,0)
let x2 = Q1.x + t1 * (Q2.x - Q1.x)
let y2 = Q1.y + t1 * (Q2.y - Q1.y)
let Q0 = SCNVector3Make(x2, y2, 0)
var diffR = Q0 - P0
diffR *= -1
let cameraNode = view.scene!.rootNode.childNodeWithName("Camera", recursively: false)
switch gesture.state {
case .Began:
previousLocation = cameraNode!.position
break;
case .Changed:
cameraNode?.position = previousLocation + diffR
break;
default:
break;
}
}
I've calculated the equations for the isometric panning, the code is below.
//camera pan ISOMETRIC logic
func pan(gesture: UIPanGestureRecognizer) {
let view = self.sceneView as SCNView
let cameraNode = view.scene!.rootNode.childNode(withName: "Camera", recursively: false)
let translation = gesture.translation(in: view)
let constant: Float = 30.0
var translateX = Float(translation.y)*sin(.pi/4.0)/cos(.pi/3.0)-Float(translation.x)*cos(.pi/4.0)
var translateY = Float(translation.y)*cos(.pi/4.0)/cos(.pi/3.0)+Float(translation.x)*sin(.pi/4.0)
translateX = translateX / constant
translateY = translateY / constant
switch gesture.state {
case .began:
previousLocation = cameraNode!.position
break;
case .changed:
cameraNode?.position = SCNVector3Make((previousLocation.x + translateX), (previousLocation.y + translateY), (previousLocation.z))
break;
default:
break;
}
}
And to get scaling right, you need to use screenheight as a variable for orthographicScale. The scaling i used here is 30x magnification, note the 30 is also used for the constant in the code above.
let screenSize: CGRect = UIScreen.main.bounds
let screenHeight = screenSize.height
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.name = "Camera"
let cameraDist = Float(20.0)
let cameraPosX = cameraDist*(-1.0)*cos(.pi/4.0)*cos(.pi/6.0)
let cameraPosY = cameraDist*(-1.0)*sin(.pi/4.0)*cos(.pi/6.0)
let cameraPosZ = cameraDist*sin(.pi/6)
cameraNode.position = SCNVector3Make(cameraPosX, cameraPosY, cameraPosZ)
cameraNode.eulerAngles = SCNVector3Make(.pi / 3.0, 0.0, -.pi / 4.0)
cameraNode.camera?.usesOrthographicProjection = true
cameraNode.camera?.orthographicScale = Double(screenHeight)/(2.0*30.0) //30x magnification constant. larger number = larger object
scene.rootNode.addChildNode(cameraNode)