I'm trying to learn ARKIT and make a small demo app to draw in 3D.
The following is the code I wrote and so far there are no problems:
import UIKit
import ARKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet weak var sceneView: ARSCNView!
#IBOutlet weak var DRAW: UIButton!
#IBOutlet weak var DEL: UIButton!
let config = ARWorldTrackingConfiguration()
override func viewDidLoad() {
super.viewDidLoad()
self.sceneView.session.run(config)
self.sceneView.delegate = self
}
func renderer(_ renderer: SCNSceneRenderer, willRenderScene scene: SCNScene, atTime time: TimeInterval) {
guard let pointOfView = sceneView.pointOfView else {return}
let transform = pointOfView.transform
let cameraOrientation = SCNVector3(-transform.m31,-transform.m32,-transform.m33)
let cameraLocation = SCNVector3(transform.m41,transform.m42,transform.m43)
let cameraCurrentPosition = cameraOrientation + cameraLocation
DispatchQueue.main.async {
if (self.DRAW.isTouchInside){
let sphereNode = SCNNode(geometry: SCNSphere(radius: 0.02))
sphereNode.position = cameraCurrentPosition
self.sceneView.scene.rootNode.addChildNode(sphereNode)
sphereNode.geometry?.firstMaterial?.diffuse.contents = UIColor.red
print("RED Button is Pressed")
}else if (self.DEL.isTouchInside){
self.sceneView.scene.rootNode.enumerateChildNodes{
(node, stop) in
node.removeFromParentNode()
}
}else{
let pointer = SCNNode(geometry: SCNSphere(radius: 0.01))
pointer.name = "pointer"
pointer.position = cameraCurrentPosition
self.sceneView.scene.rootNode.enumerateChildNodes({(node,_) in
if node.name == "pointer"{
node.removeFromParentNode()
}
})
self.sceneView.scene.rootNode.addChildNode(pointer)
pointer.geometry?.firstMaterial?.diffuse.contents = UIColor.purple
}
}
}
}
func +(left:SCNVector3,right:SCNVector3) -> SCNVector3 {
return SCNVector3Make(left.x + right.x, left.y + right.y, left.z + right.z)
}
As you can see, I set the scene and configure it,
I create a button to draw when pressed, a pointer (or viewfinder) that takes the center of the scene and a button to delete the nodes inserted.
Now I would like to be able to move the cameraCurrentPosition to a different point from the center: I would like to move it if possible with a touch on the screen taking the position of the finger.
If possible, could someone help me with the code?
Generally speaking, you can't programmatically move the Camera within an ARSCN, the camera transform is the physical position of the device relative to the virtual scene.
With that being said, one way you could draw the user touches to the screen is using the touchesMoved method within your View Controller.
var touchRoots: [SCNNode] = [] // list of root nodes for each set of touches drawn
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
// get the initial touch event
if let touch = touches.first {
guard let pointOfView = self.sceneView.pointOfView else { return }
let transform = pointOfView.transform // transformation matrix
let orientation = SCNVector3(-transform.m31, -transform.m32, -transform.m33) // camera rotation
let location = SCNVector3(transform.m41, transform.m42, transform.m43) // location of camera frustum
let currentPostionOfCamera = orientation + location // center of frustum in world space
DispatchQueue.main.async {
let touchRootNode : SCNNode = SCNNode() // create an empty node to serve as our root for the incoming points
touchRootNode.position = currentPostionOfCamera // place the root node ad the center of the camera's frustum
touchRootNode.scale = SCNVector3(1.25, 1.25, 1.25)// touches projected in Z will appear smaller than expected - increase scale of root node to compensate
guard let sceneView = self.sceneView else { return }
sceneView.scene.rootNode.addChildNode(touchRootNode) // add the root node to the scene
let constraint = SCNLookAtConstraint(target: self.sceneView.pointOfView) // force root node to always face the camera
constraint.isGimbalLockEnabled = true // enable gimbal locking to avoid issues with rotations from LookAtConstraint
touchRootNode.constraints = [constraint] // apply LookAtConstraint
self.touchRoots.append(touchRootNode)
}
}
}
override func func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
if let touch = touches.first {
let translation = touch.location(in: self.view)
let translationFromCenter = CGPoint(x: translation.x - (0.5 * self.view.frame.width), y: translation.y - (0.5 * self.view.frame.height))
// add nodes using the main thread
DispatchQueue.main.async {
guard let touchRootNode = self.touchRoots.last else { return }
let sphereNode : SCNNode = SCNNode(geometry: SCNSphere(radius: 0.015))
sphereNode.position = SCNVector3(-1*Float(translationFromCenter.x/1000), -1*Float(translationFromCenter.y/1000), 0)
sphereNode.geometry?.firstMaterial?.diffuse.contents = UIColor.white
touchRootNode.addChildNode(sphereNode) // add point to the active root
}
}
}
Note: solution only handles a single touch, but it is simple enough to extend the example to add multi-touch support.
Related
I have a UIView that I want to position on top of a certain area in an SKScene.
This is the point that I have in my SKScene:
guard let point = elementCGPoints.centroid() else {
return
}
And I'm trying to use this SKView method to convert from scene coordinates to view coordinates; based on this SO discussion:
let convertedPoint = canvasScene.convertPoint(toView: point)
selectionBox.frame = CGRect(x: convertedPoint.x, y: convertedPoint.y, width: 100, height: 100)
However, this ends up always displaying at a point far beyond the range of my view.
Any intuition about how to do this right?
here are two ways. the first is responding to UITouch inside SKScene
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let touch = touches.first else { return }
let locationInWindow = touch.location(in: touch.window)
let locationInView = touch.location(in: self)
gameSceneDelegate?.moveView(toPosition: locationInWindow) //move a UIView to window position (via delegate pattern)
shape.position = locationInView //move a SKNode to same position in scene
}
this is easiest. but your question suggests you want to use convertPoint in which case you could convert an arbitrary point from scene to view like this
//inside SKScene
let random_point = CGPoint(x: CGFloat.random(in: 0...100), y: CGFloat.random(in: 0...100))
let converted_point = self.convertPoint(toView: random_point)
gameSceneDelegate?.moveView(toPosition: converted_point) //move a UIView to window position (via delegate pattern)
shape.position = random_point //move a SKNode to same position in scene
and the UIViewController delegate would look like this
protocol GameSceneDelegate : UIViewController {
func moveView(toPosition position:CGPoint)
}
class GameViewController: UIViewController, GameSceneDelegate {
[...]
func moveView(toPosition p:CGPoint) {
myView?.frame.origin = p
}
}
this class render my SCN file as well.
import UIKit
import ARKit
class SimpleViewController: UIViewController {
#IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
sceneView.scene = SCNScene(named: "duck.scn", inDirectory: "models.scnassets/furniture")!
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
sceneView.session.run(ARWorldTrackingConfiguration())
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
}
but I know how can I rotate or scale my object (duck.scn) file. I would like to user can do interaction with my object.
If you want to scale an SCNNode you can do something like this:
/// Scales An SCNNode
///
/// - Parameter gesture: UIPinchGestureRecognizer
#objc func scaleObject(gesture: UIPinchGestureRecognizer) {
guard let nodeToScale = currentNode else { return }
if gesture.state == .changed {
let pinchScaleX: CGFloat = gesture.scale * CGFloat((nodeToScale.scale.x))
let pinchScaleY: CGFloat = gesture.scale * CGFloat((nodeToScale.scale.y))
let pinchScaleZ: CGFloat = gesture.scale * CGFloat((nodeToScale.scale.z))
nodeToScale.scale = SCNVector3Make(Float(pinchScaleX), Float(pinchScaleY), Float(pinchScaleZ))
gesture.scale = 1
}
if gesture.state == .ended { }
}
Whereby current node refers to an SCNNode.
If you want to move an SCNNode you can do something like this:
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
//1. Get The Current Touch Point
guard let currentTouchPoint = touches.first?.location(in: self.augmentedRealityView),
//2. Get The Next Feature Point Etc
let hitTest = augmentedRealityView.hitTest(currentTouchPoint, types: .existingPlane).first else { return }
//3. Convert To World Coordinates
let worldTransform = hitTest.worldTransform
//4. Set The New Position
let newPosition = SCNVector3(worldTransform.columns.3.x, worldTransform.columns.3.y, worldTransform.columns.3.z)
//5. Apply To The Node
currentNode.simdPosition = float3(newPosition.x, newPosition.y, newPosition.z)
}
If you would like to rotate an SCNNode you first should create a variable(s) to store the original angle e.g:
//Store The Rotation Of The CurrentNode
var currentAngleY: Float = 0.0
Then you can do something like this:
/// Rotates An SCNNode Around It's YAxis
///
/// - Parameter gesture: UIRotationGestureRecognizer
#objc func rotateNode(_ gesture: UIRotationGestureRecognizer){
//1. Get The Current Rotation From The Gesture
let rotation = Float(gesture.rotation)
//2. If The Gesture State Has Changed Set The Nodes EulerAngles.y
if gesture.state == .changed{
currentNode.eulerAngles.y = currentAngleY + rotation
}
//3. If The Gesture Has Ended Store The Last Angle Of The Cube
if(gesture.state == .ended) {
currentAngleY = currentNode.eulerAngles.y
}
}
If you want to interact directly with your SCNScene (although I believe not in ARKit) you can use the following method:
var allowsCameraControl: Bool { get set }
Whereby:
If you set this property to true, SceneKit creates a camera node and
handles mouse or touch events to allow the user to pan, zoom, and
rotate their view of the scene. (Enabling user camera control does not
modify camera objects already existing in the scene graph or the nodes
containing them.)
An example would thus be:
sceneView.scene = SCNScene(named: "duck.scn", inDirectory: "models.scnassets/furniture")!
sceneView.allowsCameraControl = true;
I am trying to create an Image view which I can move and scale on screen. the problem is that when I change the scale of the Image, the movement system seams to be broken.
I wrote some code to drag the object from an anchor point which could be different from the center of the UIImage, but the scale ruined the process.
/*
See LICENSE folder for this sample’s licensing information.
Abstract:
Main view controller for the AR experience.
*/
import ARKit
import SceneKit
import UIKit
import ModelIO
class ViewController: UIViewController, ARSessionDelegate , UIGestureRecognizerDelegate{
// MARK: Outlets
#IBOutlet var sceneView: ARSCNView!
#IBOutlet weak var blurView: UIVisualEffectView!
#IBOutlet weak var dropdown: UIPickerView!
#IBOutlet weak var AddStickerButton: UIButton!
#IBOutlet weak var deleteStickerButton: UIImageView!
var offset : CGPoint = CGPoint.zero
var isDeleteVisible : Bool = false
let array:[String] = ["HappyHeart_Lisa", "Logo_bucato", "Sweety_2_Lisa", "Sweety_Lisa", "Tonglue_Lisa"]
lazy var statusViewController: StatusViewController = {
return childViewControllers.lazy.flatMap({ $0 as? StatusViewController }).first!
}()
var stickers = [Sticker]()
// MARK: Properties
var myScene : SCNScene!
/// Convenience accessor for the session owned by ARSCNView.
var session: ARSession {
sceneView.session.configuration
//sceneView.scene.background.contents = UIColor.black
return sceneView.session
}
var nodeForContentType = [VirtualContentType: VirtualFaceNode]() //Tiene sotto controllo la selezione(Tipo maschera)
let contentUpdater = VirtualContentUpdater() //Chiama la VirtualContentUpdater.swift
var selectedVirtualContent: VirtualContentType = .faceGeometry {
didSet {
// Set the selected content based on the content type.
contentUpdater.virtualFaceNode = nodeForContentType[selectedVirtualContent]
}
}
// MARK: - View Controller Life Cycle
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = contentUpdater
sceneView.session.delegate = self
sceneView.automaticallyUpdatesLighting = true
createFaceGeometry()
// Set the initial face content, if any.
contentUpdater.virtualFaceNode = nodeForContentType[selectedVirtualContent]
// Hook up status view controller callback(s).
statusViewController.restartExperienceHandler = { [unowned self] in
self.restartExperience()
}
let pinchGesture = UIPinchGestureRecognizer(target: self, action: #selector(scale))
let rotationGesture = UIRotationGestureRecognizer(target: self, action: #selector(rotate))
pinchGesture.delegate = self
rotationGesture.delegate = self
view.addGestureRecognizer(pinchGesture)
view.addGestureRecognizer(rotationGesture)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
/*
AR experiences typically involve moving the device without
touch input for some time, so prevent auto screen dimming.
*/
UIApplication.shared.isIdleTimerDisabled = true
resetTracking()
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
session.pause()
}
// MARK: - Setup
/// - Tag: CreateARSCNFaceGeometry
func createFaceGeometry() {
// This relies on the earlier check of `ARFaceTrackingConfiguration.isSupported`.
let device = sceneView.device!
let maskGeometry = ARSCNFaceGeometry(device: device)!
let glassesGeometry = ARSCNFaceGeometry(device: device)!
nodeForContentType = [
.faceGeometry: Mask(geometry: maskGeometry),
.overlayModel: GlassesOverlay(geometry: glassesGeometry),
.blendShapeModel: RobotHead(),
.sfere: RobotHead()
]
}
// MARK: - ARSessionDelegate
func session(_ session: ARSession, didFailWithError error: Error) {
guard error is ARError else { return }
let errorWithInfo = error as NSError
let messages = [
errorWithInfo.localizedDescription,
errorWithInfo.localizedFailureReason,
errorWithInfo.localizedRecoverySuggestion
]
let errorMessage = messages.flatMap({ $0 }).joined(separator: "\n")
DispatchQueue.main.async {
self.displayErrorMessage(title: "The AR session failed.", message: errorMessage)
}
}
func sessionWasInterrupted(_ session: ARSession) {
blurView.isHidden = false
statusViewController.showMessage("""
SESSION INTERRUPTED
The session will be reset after the interruption has ended.
""", autoHide: false)
}
func sessionInterruptionEnded(_ session: ARSession) {
blurView.isHidden = true
DispatchQueue.main.async {
self.resetTracking()
}
}
/// - Tag: ARFaceTrackingSetup
func resetTracking() {
statusViewController.showMessage("STARTING A NEW SESSION")
guard ARFaceTrackingConfiguration.isSupported else { return }
let configuration = ARFaceTrackingConfiguration()
configuration.isLightEstimationEnabled = true
session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
}
// MARK: - Interface Actions
/// - Tag: restartExperience
func restartExperience() {
// Disable Restart button for a while in order to give the session enough time to restart.
statusViewController.isRestartExperienceButtonEnabled = false
DispatchQueue.main.asyncAfter(deadline: .now() + 5.0) {
self.statusViewController.isRestartExperienceButtonEnabled = true
}
resetTracking()
}
// MARK: - Error handling
func displayErrorMessage(title: String, message: String) {
// Blur the background.
blurView.isHidden = false
// Present an alert informing about the error that has occurred.
let alertController = UIAlertController(title: title, message: message, preferredStyle: .alert)
let restartAction = UIAlertAction(title: "Restart Session", style: .default) { _ in
alertController.dismiss(animated: true, completion: nil)
self.blurView.isHidden = true
self.resetTracking()
}
alertController.addAction(restartAction)
present(alertController, animated: true, completion: nil)
}
//Create a new Sticker
func createNewSticker(){
stickers.append(Sticker(view : self.view, viewCtrl : self))
}
#IBAction func addNewSticker(_ sender: Any) {
createNewSticker()
}
//Function To Move the Stickers, all the Touch Events Listener
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in (touches as! Set<UITouch>) {
var location = touch.location(in: self.view)
for sticker in stickers {
if(sticker.imageView.frame.contains(location) && !isSomeOneMoving()){
//sticker.imageView.center = location
offset = touch.location(in: sticker.imageView)
let offsetPercentage = CGPoint(x: offset.x / sticker.imageView.bounds.width, y: offset.y / sticker.imageView.bounds.height)
let offsetScaled = CGPoint(x: sticker.imageView.frame.width * offsetPercentage.x, y: sticker.imageView.frame.height * offsetPercentage.y)
offset.x = (sticker.imageView.frame.width / 2) - offsetScaled.x
offset.y = (sticker.imageView.frame.height / 2) - offsetScaled.y
location = touch.location(in: self.view)
location.x = (location.x + offset.x)
location.y = (location.y + offset.y)
sticker.imageView.center = location
disableAllStickersMovements()
isDeleteVisible = true
sticker.isStickerMoving = true;
deleteStickerButton.isHidden = false
}
}
}
}
func disableAllStickersMovements(){
for sticker in stickers {
sticker.isStickerMoving = false;
}
}
func isSomeOneMoving() -> Bool{
for sticker in stickers {
if(sticker.isStickerMoving){
return true
}
}
return false
}
var lastLocationTouched : CGPoint = CGPoint.zero
var lastStickerTouched : Sticker = Sticker()
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
for touch in (touches as! Set<UITouch>) {
var location = touch.location(in: self.view)
for sticker in stickers {
if(sticker.imageView.frame.contains(location) && sticker.isStickerMoving){
lastLocationTouched = location
location = touch.location(in: self.view)
location.x = (location.x + offset.x)
location.y = (location.y + offset.y)
sticker.imageView.center = location
//sticker.imageView.center = location
}
if(deleteStickerButton.frame.contains(lastLocationTouched) && isDeleteVisible && sticker.isStickerMoving){
sticker.imageView.alpha = CGFloat(0.5)
}else{
sticker.imageView.alpha = CGFloat(1)
}
}
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
for sticker in stickers {
if(deleteStickerButton.frame.contains(lastLocationTouched) && isDeleteVisible && sticker.isStickerMoving){
removeASticker(sticker : sticker)
disableAllStickersMovements()
}
}
disableAllStickersMovements()
isDeleteVisible = false
deleteStickerButton.isHidden = true
}
func removeASticker(sticker : Sticker ){
sticker.imageView.removeFromSuperview()
let stickerPosition = stickers.index(of: sticker)!
stickers.remove(at: stickerPosition)
for sticker in stickers {
sticker.isStickerMoving = false;
}
}
var identity = CGAffineTransform.identity
#objc func scale(_ gesture: UIPinchGestureRecognizer) {
for sticker in stickers {
if(sticker.isStickerMoving){
switch gesture.state {
case .began:
identity = sticker.imageView.transform
case .changed,.ended:
sticker.imageView.transform = identity.scaledBy(x: gesture.scale, y: gesture.scale)
case .cancelled:
break
default:
break
}
}
}
}
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
return true
}
#objc func rotate(_ gesture: UIRotationGestureRecognizer) {
for sticker in stickers {
if(sticker.isStickerMoving){
sticker.imageView.transform = sticker.imageView.transform.rotated(by: gesture.rotation)
}
}
}
}
and then the sticker class
import UIKit
import Foundation
class Sticker : NSObject, UIGestureRecognizerDelegate{
var location = CGPoint(x: 0 , y: 0);
var sticker_isMoving = false;
let imageView = UIImageView()
var isStickerMoving : Bool = false;
init(view : UIView, viewCtrl : ViewController ) {
super.init()
imageView.image = UIImage(named: "BroccolFace_Lisa.png")
imageView.isUserInteractionEnabled = true
imageView.contentMode = UIViewContentMode.scaleAspectFit
imageView.frame = CGRect(x: view.center.x, y: view.center.y, width: 200, height: 200)
view.addSubview(imageView)
}
override init(){
}
}
This is because the imageView.bounds and the touch.location(in: imageView) are in unscaled values. This will overcome the problem:
offset = touch.location(in: imageView)
let offsetPercentage = CGPoint(x: offset.x / imageView.bounds.width, y: offset.y / imageView.bounds.height)
let offsetScaled = CGPoint(x: imageView.frame.width * offsetPercentage.x, y: imageView.frame.height * offsetPercentage.y)
offset.x = (imageView.frame.width / 2) - offsetScaled.x
offset.y = (imageView.frame.height / 2) - offsetScaled.y
Basically it converts the offset into a percentage based on the unscaled values and then converts that into scaled values based on the imageView frame (which is modified by the scale). It then uses that to calculate the offset.
EDIT (NUMBER TWO)
This is more complete way to do it and it should solve any issues that may arise due to scaling or rotation.
Add this structure to hold the details of the dragging for images:
struct DragInfo {
let imageView: UIImageView
let startPoint: CGPoint
}
Add these instance variables (you can also remove offset if you want):
var dragStartPoint: CGPoint = CGPoint.zero
var currentDragItems: [DragInfo] = []
var dragTouch: UITouch?
Change touchesBegan to this:
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
guard self.dragTouch == nil, let touch = touches.first else { return }
self.dragTouch = touch
let location = touch.location(in: self.view)
self.dragStartPoint = location
for imageView in self.imageList {
if imageView.frame.contains(location) {
self.currentDragItems.append(DragInfo(imageView: imageView, startPoint: imageView.center))
}
}
}
Change touchesMoved to this:
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let dragTouch = self.dragTouch else { return }
for touch in touches {
if touch == dragTouch {
let location = touch.location(in: self.view)
let offset = CGPoint(x: location.x - self.dragStartPoint.x, y: location.y - self.dragStartPoint.y)
for dragInfo in self.currentDragItems {
let imageOffSet = CGPoint(x: dragInfo.startPoint.x + offset.x, y: dragInfo.startPoint.y + offset.y)
dragInfo.imageView.center = imageOffSet
}
}
}
}
Change touchesEnded to this:
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let dragTouch = self.dragTouch, touches.contains(dragTouch) else { return }
self.currentDragItems.removeAll()
self.dragTouch = nil
}
Set the following properties on the gesture recognisers used:
scaleGesture.delaysTouchesEnded = false
scaleGesture.cancelsTouchesInView = false
rotationGesture.delaysTouchesEnded = false
rotationGesture.cancelsTouchesInView = false
Some explanation about how it works.
With all the touch events it only considers the first touch because dragging from multiple touches doesn't make much sense (what if two touches were over the same image view and move differently). It records this touch and then only considers that touch for dragging things around.
When touchesBegan is called it checks no touch for dragging exists (indicating a drag in progress) and it finds all image views that are under the touch and for each one it records the details of itself and it start centre position in a DragInfo structure and stores it in the currentDragItems array. It also records the position the touch started in the main view and the touch that initiated it.
When touchesMoved is called it only considers the touch that started the dragging and it calculates the offset from the original position the touch started in the main view and then goes down the list of images involved in the dragging and calculates their new centre based on their original starting position and the offset calculated and sets that as the new centre.
When touchesEnded is called assuming it is the dragging touch that is ended it clears the array of DragInfo objects to ready for the next drag.
You need to set the delaysTouchesEnded and cancelsTouchesInView properties on all gesture recognisers so that all touches are passed through to the view otherwise the touchesEnded methods in particular are not called.
Doing the calculations like this removes the problems of scale and rotation as you are just concerned with offsets from initial positions. It also works if multiple image views are dragged at the same time as their details are kept separately.
Now there are some things to be aware of:
You will need to put in all the other code you app required as this is just a basic example to show the idea.
This assumes that you only want to drag image views that you pick up at the start. If you want to collect image views as you drag around you would need to develop a much more complicated system.
As I stated only one drag operation can be in progress at a time and it takes the first touch registered as this source touch. This source touch is then used to filter out any other touches that may happen. This is done to keep things simple and otherwise you would have to account for all kinds of strange situations like if multiple touches were on the same image view.
I hope this all makes sense and you can adapt it to solve your problem.
Here is an extension that I use to pan, pinch and rotate an image with UIPanGestureRecognizer, UIPinchGestureRecognizer and UIRotationGestureRecognizer
extension ViewController : UIGestureRecognizerDelegate {
func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool {
return true
}
func panGesture(gesture: UIPanGestureRecognizer) {
switch gesture.state {
case .ended: fallthrough
case .changed:
let translation = gesture.translation(in: gesture.view)
if let view = gesture.view {
var finalPoint = CGPoint(x:view.center.x + translation.x, y:view.center.y + translation.y)
finalPoint.x = min(max(finalPoint.x, 0), self.myImageView.bounds.size.width)
finalPoint.y = min(max(finalPoint.y, 0), self.myImageView.bounds.size.height)
view.center = finalPoint
gesture.setTranslation(CGPoint.zero, in: gesture.view)
}
default : break
}
}
func pinchGesture(gesture: UIPinchGestureRecognizer) {
switch gesture.state {
case .changed:
let scale = gesture.scale
gesture.view?.transform = gesture.view!.transform.scaledBy(x: scale, y: scale)
gesture.scale = 1
default : break
}
}
func rotateGesture(gesture: UIRotationGestureRecognizer) {
switch gesture.state {
case .changed:
let rotation = gesture.rotation
gesture.view?.transform = gesture.view!.transform.rotated(by: rotation)
gesture.rotation = 0
default : break
}
}
}
setting the UIGestureRecognizerDelegate will help you do the three of gestures at the same time.
I'm quite new to iOS level development, currently trying to create my first SceneKit game - however, i'm not sure what approach should i use to create the "non-3D" pages like menu, level select page, settings etc.
I currently have one GameViewController, and i use empty SCNScene with overlaySKScene to display my SpriteKit menu. Upon clicking the play button in menu i transition to another empty SCNScene where i would like to display list of levels. Should i again use ovelaySKSCene, is this the correct approach?
I also noticed that even if i set scnView.overlaySKScene = nil before transitioning to levelScene , if i tap in the spot of the previous placement of pla button, the nodePressed in overlayScene still contains the playButton node..
Forgive me for the rookie questions, i haven't been able to find a good example of this type of scenarios so far to learn from.
My code so far:
import UIKit
import QuartzCore
import SceneKit
import SpriteKit
class GameViewController: UIViewController {
var scnView: SCNView {
let scnView = view as! SCNView
scnView.backgroundColor = UIColor.init(displayP3Red: 0.98, green: 0.64, blue: 0.04, alpha: 1)
return scnView
}
var menuScene: SCNScene!
var levelsScene: SCNScene!
var ground: SCNNode!
var light: SCNNode!
var scnScene: SCNScene!
var cameraNode: SCNNode!
var brickNode: SCNNode!
var selectedBrickNode: SCNNode!
var bricksArray : [SCNNode] = []
var controls: SKScene!
var levels: SKScene!
override func viewDidLoad() {
super.viewDidLoad()
setupView()
showMenu()
setupCamera()
}
override var shouldAutorotate: Bool {
return true
}
override var prefersStatusBarHidden: Bool {
return true
}
func setupView() {
scnView.showsStatistics = true
scnView.allowsCameraControl = false
scnView.autoenablesDefaultLighting = true
}
func showMenu() {
menuScene = SCNScene( )
scnView.scene = menuScene
self.controls = Controls(size: self.view.bounds.size)
self.controls.isUserInteractionEnabled = false
scnView.overlaySKScene = self.controls
}
func showLevels(){
levelsScene = SCNScene()
self.levels = Levels(size: self.view.bounds.size)
self.levels.isUserInteractionEnabled = false
scnView.overlaySKScene = self.levels
let transition = SKTransition.push(with: .left, duration: 1.0)
self.scnView.present(levelsScene
, with: transition, incomingPointOfView: nil, completionHandler:nil)
}
func setupCamera(){
cameraNode = SCNNode()
let camera = SCNCamera()
camera.zFar = 10000
cameraNode.camera = camera
cameraNode.position = SCNVector3(x: 0, y: 10, z: 20)
let constraint = SCNLookAtConstraint(target: ground)
constraint.isGimbalLockEnabled = true
cameraNode.constraints = [constraint]
let ambientLight = SCNLight()
ambientLight.color = UIColor.darkGray
ambientLight.type = SCNLight.LightType.ambient
cameraNode.light = ambientLight
menuScene.rootNode.addChildNode(cameraNode)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
print("touch began ")
for touch: UITouch in touches {
let location = touch.location(in: scnView.overlaySKScene!)
let nodePressed = self.controls.atPoint(location)
print(nodePressed.name)
if (nodePressed.name == "playButton") {
scnView.overlaySKScene = nil
showLevels()
}
}
}
}
“the nodePressed in overlayScene still contains the playButton node...”
You are setting the pointer to the overlay scene to nil, and in touchesbegan you get the location from the overlay scene but then you use atPoint on the skscene stored in the “controls” property, so it still finds the play button. So replace self.controls with scnView.overlaySKScene in touchesbegan.
That said, I don’t know what in general is considered best practice by others but personally, once I need more than just a couple of buttons, I stop using the spritekit overlay scene and instead use regular UIKit elements to build menus on top of the SCNView.
I have 2 different textures for my character that overlap/are displayed too fast
while moving the character. How can I set a duration for the animation, so the textures always switch at the same speed while moving the character?
This is my code:
override func touchesMoved(touches: NSSet, withEvent event: UIEvent) {
for touch: AnyObject in touches {
let location = touch.locationInNode(self)
let animatePlayerStart = SKAction.setTexture(SKTexture(imageNamed: "Player\(i).png"))
// Determine speed for character movement
var minDuration:CGFloat = 0.7;
var maxDuration:CGFloat = 1.8;
var rangeDuration:CGFloat = maxDuration - minDuration;
var actualDuration:NSTimeInterval = NSTimeInterval((CGFloat(arc4random())%rangeDuration) + minDuration)
let move = SKAction.moveTo(location, duration:actualDuration)
player.runAction(SKAction.sequence([animatePlayerStart, move]))
// i determines which texture is going to be displayed
if(self.i == 2) {
self.i = 1
}
else{
self.i++
}
}
}
You are changing texture in touchesMoved which is called fast, thus the effect you are currently getting. To change textures after pre-defined period of time you can use this method:
+ animateWithTextures:timePerFrame:
import SpriteKit
class GameScene: SKScene {
let hero = SKSpriteNode(imageNamed: "heroState_A")
let textureA = SKTexture(imageNamed: "heroState_A")
let textureB = SKTexture(imageNamed: "heroState_B")
override func didMoveToView(view: SKView) {
/* Setup your scene here */
//Because hero is already initialized with textureA, start from textureB
let animation = SKAction.animateWithTextures([textureB,textureA], timePerFrame:0.5)
hero.position = CGPoint(x: CGRectGetMidX(frame), y: CGRectGetMidY(frame))
addChild(hero)
//Start animation
hero.runAction(SKAction.repeatActionForever(animation),withKey:"heroAnimation")
//Removing heroAnimation
//You can stop this animation by hero.removeAllActions, but if you run animation with key, you can remove just that particular action, which gives you more control
let stop = SKAction.runBlock({
if(self.hero.actionForKey("heroAnimation") != nil){
self.hero.removeActionForKey("heroAnimation")
}
})
//Just an example, in real app you will do this at certain events (eg. when player stops his movement)
runAction(SKAction.sequence([SKAction.waitForDuration(5),stop]))
}
}