iOS 11 ARKit : Drag Object in 3D View - ios

I have a node object in 3d view and i need to drag that object,
So far i have tried from here : Placing, Dragging and Removing SCNNodes in ARKit
and converted in swift
#objc func handleDragGesture(_ gestureRecognizer: UIGestureRecognizer) {
let tapPoint = gestureRecognizer.location(in: self.sceneView)
switch gestureRecognizer.state {
case .began:
print("Object began to move")
let hitResults = self.sceneView.hitTest(tapPoint, options: nil)
if hitResults.isEmpty { return }
let hitResult = hitResults.first
if let node = hitResult?.node.parent?.parent?.parent {
self.photoNode = node
}
case .changed:
print("Moving object position changed")
if let _ = self.photoNode {
let hitResults = self.sceneView.hitTest(tapPoint, types: .featurePoint)
let hitResult = hitResults.last
if let transform = hitResult?.worldTransform {
let matrix = SCNMatrix4FromMat4(transform)
let vector = SCNVector3Make(matrix.m41, matrix.m42, matrix.m43)
self.photoNode?.position = vector
}
}
case .ended:
print("Done moving object")
default:
break
}
}
but it is not working properly. what is the correct way to do?

You can do this using panGestureRecongniser... see basic swift Playground code for handling a SCNNode.
import UIKit
import ARKit
import SceneKit
import PlaygroundSupport
public var textNode : SCNNode?
// Main ARKIT ViewController
class ViewController : UIViewController, ARSCNViewDelegate, ARSessionDelegate {
var textNode: SCNNode!
var counter = 0
#IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
// set the views delegate
sceneView.delegate = self as! ARSCNViewDelegate
// show statistics such as fps and timing information
sceneView.showsStatistics = true
// Create a new scene
sceneView.scene.rootNode
// Add ligthing
sceneView.autoenablesDefaultLighting = true
let text = SCNText(string: "Drag Me with Pan Gesture!", extrusionDepth: 1)
// create material
let material = SCNMaterial()
material.diffuse.contents = UIColor.green
text.materials = [material]
//Create Node object
textNode = SCNNode()
textNode.name = "textNode"
textNode.scale = SCNVector3(x:0.004,y:0.004,z:0.004)
textNode.geometry = text
textNode.position = SCNVector3(x: 0, y:0.02, z: -1)
// add new node to root node
self.sceneView.scene.rootNode.addChildNode(textNode)
// Add pan gesture for dragging the textNode about
sceneView.addGestureRecognizer(UIPanGestureRecognizer(target: self, action: #selector(panGesture(_:))))
}
override func loadView() {
sceneView = ARSCNView(frame:CGRect(x: 0.0, y: 0.0, width: 500.0, height: 600.0))
// Set the view's delegate
sceneView.delegate = self
let config = ARWorldTrackingConfiguration()
config.planeDetection = .horizontal
// Now we'll get messages when planes were detected...
sceneView.session.delegate = self
self.view = sceneView
sceneView.session.run(config)
}
#objc func panGesture(_ gesture: UIPanGestureRecognizer) {
gesture.minimumNumberOfTouches = 1
let results = self.sceneView.hitTest(gesture.location(in: gesture.view), types: ARHitTestResult.ResultType.featurePoint)
guard let result: ARHitTestResult = results.first else {
return
}
let position = SCNVector3Make(result.worldTransform.columns.3.x, result.worldTransform.columns.3.y, result.worldTransform.columns.3.z)
textNode.position = position
}
}
PlaygroundPage.current.liveView = ViewController()
PlaygroundPage.current.needsIndefiniteExecution = true
EDIT:
The above drag function only worked if you had 1 object in the view, so it was not really necessary to hit the node to start dragging. It will just drag from where ever you tapped on the screen. If you have multiple objects in the view, and you want to drag nodes independently. You could change the panGesture function to the following, detect each node tapped first:
// drags nodes independently
#objc func panGesture(_ gesture: UIPanGestureRecognizer) {
gesture.minimumNumberOfTouches = 1
let results = self.sceneView.hitTest(gesture.location(in: gesture.view), types: ARHitTestResult.ResultType.featurePoint)
guard let result: ARHitTestResult = results.first else {
return
}
let hits = self.sceneView.hitTest(gesture.location(in: gesture.view), options: nil)
if let tappedNode = hits.first?.node {
let position = SCNVector3Make(result.worldTransform.columns.3.x, result.worldTransform.columns.3.y, result.worldTransform.columns.3.z)
tappedNode.position = position
}
}

REF: https://stackoverflow.com/a/48220751/5589073
This code works for me
private func drag(sender: UIPanGestureRecognizer) {
switch sender.state {
case .began:
let location = sender.location(in: self.sceneView)
guard let hitNodeResult = self.sceneView.hitTest(location,
options: nil).first else { return }
self.PCoordx = hitNodeResult.worldCoordinates.x
self.PCoordy = hitNodeResult.worldCoordinates.y
self.PCoordz = hitNodeResult.worldCoordinates.z
case .changed:
// when you start to pan in screen with your finger
// hittest gives new coordinates of touched location in sceneView
// coord-pcoord gives distance to move or distance paned in sceneview
let hitNode = sceneView.hitTest(sender.location(in: sceneView), options: nil)
if let coordx = hitNode.first?.worldCoordinates.x,
let coordy = hitNode.first?.worldCoordinates.y,
let coordz = hitNode.first?.worldCoordinates.z {
let action = SCNAction.moveBy(x: CGFloat(coordx - self.PCoordx),
y: CGFloat(coordy - self.PCoordy),
z: CGFloat(coordz - self.PCoordz),
duration: 0.0)
self.photoNode.runAction(action)
self.PCoordx = coordx
self.PCoordy = coordy
self.PCoordz = coordz
}
sender.setTranslation(CGPoint.zero, in: self.sceneView)
case .ended:
self.PCoordx = 0.0
self.PCoordy = 0.0
self.PCoordz = 0.0
default:
break
}
}

Related

How can I dynamically track SCNNodes in a scene for removal?

Obligatory: first time writing an app, relevant code is below. My code runs mostly as I want it to, but does not achieve my goal of dynamic object tracking.
I'm using Swift and Scenekit to build a simple puzzle game, similar to a 3d-version of candy crush.
I have a class Cube that extends SCNNode. On initialization, this class will randomly draw a 5x5 cube of SCNBoxes with each box being red, green, or blue (all 6 sides of a box are 1 color).
The goal of the game is to get the highest score by removing "chains" of SCNBoxes of like-colors. When a chain is removed, cubes should recognize gravity and drop to fill in the voids created by the removed-chain. This is where I need to dynamically track position. As the cubes fall into the gaps, their neighbors change.
My approach: build a struct CubeDetails that has properties var color: String and var location: SCNVector3. Next, build a dictionary masterCubeDict = [SCNNode: CubeDetails] that has all of the cubes of 1 color (the color is provided by a hittestresult).
Every time a user taps a cube, grab its color, refresh the masterCubeDict, and then use math on the SCNVector3 position to determine which cubes are neighbors.
I think my algorithm for finding 'cube neighbors' using math on the scnvector3 is where I'm off. There must be a better way for scenekit nodes to identity/find each other, right?
Also -- I would like the physics of the cubes to let them fall and have no bounce/sliding at all. They should only move straight up/down. Collisions should never happen. I thought I implemented that properly through friction, restituion, and mass of the cubes but I'm not getting the outcome I want.
class Cube
import SceneKit
class Cube : SCNNode {
let cubeWidth:Float = 0.95
let spaceBetweenCubes:Float = 0.05
var cubecolor:UIColor = UIColor.black
var masterCubeDict: [SCNNode: CubeDetails] = [:]
struct CubeDetails {
var color:String
var position:SCNVector3
}
override init() {
super.init()
let cubeOffsetDistance = self.cubeOffsetDistance()
var cubeColorString: String = ""
var xPos:Float = -cubeOffsetDistance
var yPos:Float = -cubeOffsetDistance
var zPos:Float = -cubeOffsetDistance
let xFloor:Float = -1.5
let yFloor:Float = -1.5
let zFloor:Float = -1.5
let floorGeo = SCNBox(width: 20, height: 0, length: 20, chamferRadius: 0)
let floor = SCNNode(geometry: floorGeo)
floor.position = SCNVector3(x: xFloor, y: yFloor, z: zFloor)
floor.name = "floor"
floor.opacity = 0.0
floor.physicsBody = SCNPhysicsBody(type: .kinematic, shape: nil)
floor.physicsBody?.collisionBitMask = 1
floor.physicsBody?.friction = 1.0
self.addChildNode(floor)
for _ in 0..<5 {
for _ in 0..<5 {
for _ in 0..<5 {
let cubeGeometry = SCNBox(width: CGFloat(cubeWidth), height: CGFloat(cubeWidth), length: CGFloat(cubeWidth), chamferRadius: 0)
let material = SCNMaterial()
material.diffuse.contents = randomColor()
//unwrap material (type any) and cast to uicolor for switch
if let unwrapColor: UIColor = material.diffuse.contents as? UIColor {
switch unwrapColor {
case UIColor.red:
cubeColorString = "red"
case UIColor.green:
cubeColorString = "green"
case UIColor.blue:
cubeColorString = "blue"
default:
cubeColorString = "black"
}
} else { print("Error unwrapping color") }
cubeGeometry.materials = [material, material, material, material, material, material]
let cube = SCNNode(geometry: cubeGeometry)
cube.name = cubeColorString
cube.physicsBody = SCNPhysicsBody(type: .dynamic, shape: nil)
cube.physicsBody?.restitution = 0.0
cube.physicsBody?.isAffectedByGravity = true
cube.physicsBody?.mass = 25.0
cube.physicsBody?.friction = 1.0
cube.physicsBody?.collisionBitMask = 1
cube.position = SCNVector3(x: xPos, y: yPos, z: zPos)
let details = CubeDetails(color: cubeColorString, position: cube.position)
//add cube details to the master dict
masterCubeDict[cube] = details
//print(masterCubeDict)
xPos += cubeWidth + spaceBetweenCubes
self.addChildNode(cube)
}
xPos = -cubeOffsetDistance
yPos += cubeWidth + spaceBetweenCubes
}
xPos = -cubeOffsetDistance
yPos = -cubeOffsetDistance
zPos += cubeWidth + spaceBetweenCubes
}
}
private func cubeOffsetDistance()->Float {
return (cubeWidth + spaceBetweenCubes) / 2
}
private func randomColor() -> UIColor{
var tmpColor: UIColor
let num = Int.random(in:0...2)
switch num {
case 0:
tmpColor = UIColor.red
case 1:
tmpColor = UIColor.blue
case 2:
tmpColor = UIColor.green
default:
tmpColor = UIColor.black
}
return tmpColor
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
GameViewController
import UIKit
import QuartzCore
import SceneKit
var myMasterCubeDict: [SCNNode: Cube.CubeDetails] = [:]
class GameViewController: UIViewController {
let gameCube = Cube()
override func viewDidLoad() {
super.viewDidLoad()
// create a new scene
// let scene = SCNScene(named: "art.scnassets/ship.scn")!
let scene = SCNScene()
// create and add a camera to the scene
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
scene.rootNode.addChildNode(cameraNode)
// place the camera
cameraNode.position = SCNVector3(x: 2, y: 0, z: 20)
// create and add a light to the scene
let lightNode = SCNNode()
lightNode.light = SCNLight()
lightNode.light!.type = .omni
lightNode.position = SCNVector3(x: 0, y: 10, z: 10)
scene.rootNode.addChildNode(lightNode)
// create and add an ambient light to the scene
let ambientLightNode = SCNNode()
ambientLightNode.light = SCNLight()
ambientLightNode.light!.type = .ambient
ambientLightNode.light!.color = UIColor.darkGray
scene.rootNode.addChildNode(ambientLightNode)
// init cube
myMasterCubeDict = gameCube.masterCubeDict
scene.rootNode.addChildNode(gameCube)
// retrieve the SCNView
let scnView = self.view as! SCNView
// set the scene to the view
scnView.scene = scene
// allows the user to manipulate the camera
scnView.allowsCameraControl = true
// show statistics such as fps and timing information
scnView.showsStatistics = true
// configure the view
scnView.backgroundColor = UIColor.black
// add a tap gesture recognizer
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(handleTap(_:)))
scnView.addGestureRecognizer(tapGesture)
}
#objc
func handleTap(_ gestureRecognize: UIGestureRecognizer) {
// retrieve the SCNView
let scnView = self.view as! SCNView
// check what nodes are tapped
let p = gestureRecognize.location(in: scnView)
let hitResults = scnView.hitTest(p, options: [:])
// check that we clicked on at least one object
if hitResults.count > 0 {
// retrieved the first clicked object
let result = hitResults[0]
//get dict of same-color node
var dictOfSameColor = findAndReturnChain(boi: result.node)
// print(dictOfSameColor)
var finalNodes: [SCNNode] = [result.node]
var resFlag = 1
repeat {
var xSame: Bool = false
var ySame: Bool = false
var zSame: Bool = false
resFlag = 0
for node in finalNodes {
// var nodeX = node.position.x
for (key, value) in dictOfSameColor {
if(abs(node.position.x - value.position.x) < 0.7) {
xSame = true
}
if(abs(node.position.y - value.position.y) < 0.7) {
ySame = true
}
if(abs(node.position.z - value.position.z) < 0.7) {
zSame = true
}
//print("X-val: \(xDif) \nY-val: \(yDif) \nZ-val: \(zDif) \nColor: \(key.name) \n\n\n\n")
if (xSame && ySame ) {
if !(zSame) {
if (abs((node.position.z-value.position.z)) < 2) {
finalNodes.append(key)
dictOfSameColor.removeValue(forKey: key)
resFlag = 1
}
}
}
if (xSame && zSame) {
if !(ySame) {
if (abs((node.position.y-value.position.y)) < 2) {
finalNodes.append(key)
dictOfSameColor.removeValue(forKey: key)
resFlag = 1
}
}
}
if (ySame && zSame) {
if !(xSame) {
if (abs((node.position.x-value.position.x)) < 2) {
finalNodes.append(key)
dictOfSameColor.removeValue(forKey: key)
resFlag = 1
}
}
}
xSame = false
ySame = false
zSame = false
}
}
//print(finalNodes)
} while resFlag == 1
//print(finalNodes)
for node in finalNodes {
if node.name != "floor" {
node.removeFromParentNode()
}
}
//IMPLEMENT: Reset dicts to current state of the cube
myMasterCubeDict = updateMasterCubeDict(cube: gameCube)
dictOfSameColor.removeAll()
}
}
func findAndReturnChain(boi: SCNNode) -> [SCNNode:Cube.CubeDetails] {
var ret: [SCNNode:Cube.CubeDetails] = [:]
//find cubes with the same color
for (key, value) in myMasterCubeDict {
if value.color == boi.name {
ret[key] = value
}
}
return ret
}
func updateMasterCubeDict(cube: Cube) -> [SCNNode:Cube.CubeDetails] {
myMasterCubeDict.removeAll()
var newNode: SCNNode = SCNNode()
var newDetails = Cube.CubeDetails(color: "", position: SCNVector3Zero)
cube.enumerateChildNodes { (cube, stop) in
newNode = cube
if let newName = cube.name {
newDetails.color = newName
}
newDetails.position = cube.position
myMasterCubeDict[newNode] = newDetails
}
return myMasterCubeDict
}
override var shouldAutorotate: Bool {
return true
}
override var prefersStatusBarHidden: Bool {
return true
}
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
if UIDevice.current.userInterfaceIdiom == .phone {
return .allButUpsideDown
} else {
return .all
}
}
}
I did a game somewhat like this. You could probably get the math to work, but the way I did it was to map out each node and have an array containing its adjacent nodes. Doing it this way, I'm sure that when I remove a node and loop through its adjacent[array] nodes, then I got the right ones.
I don't subclass SCNNodes - some do, but I create the class I want that contains info about my node - I add the node to Scenekit there, that's separates the actual node from other work I may want to do with the class. Some nodes have a lot of detail that I may want to manage separately (multiple particle systems, movements, etc). Then I just keep my classes of nodes in an array and each class has direct access to it's own node.
Sorry - I don't know about the bounce, there are a lot of choices with the physics engine.

Move camera to tapped SCNNode

I'm using SceneKit and Swift to try and move the camera so it's 'focused' on the selected node. I understand I have the defaultCameraController enabled but I was trying to adjust the camera's position via dolly, rotate and translateInCameraSpaceBy but there was no animated transition - it just jumped to the new position.
Is there anyway for the camera to glide into position like how Google Maps slides/then zooms over to a searched location?
Any help would be greatly appreciated :)
Here's my code:
import UIKit
import SceneKit
class ViewController: UIViewController {
var gameView: SCNView!
var scene: SCNScene!
var cameraNode: SCNNode!
override func viewDidLoad() {
super.viewDidLoad()
// Scene
scene = SCNScene()
// Camera
cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3(0, 0, 10)
scene.rootNode.addChildNode(cameraNode)
// Light
/*
let lightNode = SCNNode()
lightNode.light = SCNLight()
lightNode.light?.type = .omni
lightNode.position = SCNVector3(0, 10, 2)
scene.rootNode.addChildNode(lightNode)
*/
// Stars
//let stars = SCNParticleSystem(named: "starsParticles.scnp", inDirectory: nil)!
//scene.rootNode.addParticleSystem(stars)
// Earth
let earthNode = itemPlate()
earthNode.position = SCNVector3(0, 0, 0)
scene.rootNode.addChildNode(earthNode)
// Create orbiting moonOne
let moonNodeOne = itemPlate()
moonNodeOne.position = SCNVector3(3, 0, 0)
earthNode.addChildNode(moonNodeOne)
// Create orbiting moonOne
let moonNodeTwo = itemPlate()
moonNodeTwo.position = SCNVector3(5, 3, 2)
earthNode.addChildNode(moonNodeTwo)
// Create orbiting moonOne
let moonNodeThree = itemPlate()
moonNodeThree.position = SCNVector3(-4, -3, 5)
earthNode.addChildNode(moonNodeThree)
// Scene formation
gameView = self.view as! SCNView
gameView.scene = scene
gameView.showsStatistics = true
gameView.allowsCameraControl = true
gameView.autoenablesDefaultLighting = true
gameView.defaultCameraController.interactionMode = .fly
gameView.defaultCameraController.inertiaEnabled = true
gameView.defaultCameraController.maximumVerticalAngle = 89
gameView.defaultCameraController.minimumVerticalAngle = -89
scene.background.contents = UIImage(named: "orangeBg.jpg")
}
override var prefersStatusBarHidden: Bool {
return true
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first!
let location = touch.location(in: gameView)
let hitList = gameView.hitTest(location, options: nil)
if let hitObject = hitList.first {
let node = hitObject.node
// Update camera position
//gameView.defaultCameraController.translateInCameraSpaceBy(x: node.position.x, y: node.position.y, z: node.position.z + 5)
let onScreenPoint:CGPoint = CGPoint(x: 1.0, y: 1.0)
let viewport:CGSize = CGSize(width: 50, height: 50)
gameView.defaultCameraController.dolly(by: 1.0, onScreenPoint: onScreenPoint, viewport: viewport)
//let newCameraPosition = SCNVector3Make(node.position.x, node.position.y, node.position.z + 10)
print("NODE_HIT_OBJECT_COORDS: \(node.position.x), \(node.position.y) \(node.position.y)")
//let moveToAction = SCNAction.move(by: newCameraPosition, duration: 1.0)
}
}
}
You can implement in your code a methodology like this (sorry, I used macOS project instead iOS, but it's almost the same):
func handleClick(_ gestureRecognizer: NSGestureRecognizer) {
let scnView = self.view as! SCNView
let p = gestureRecognizer.location(in: scnView)
let hitResults = scnView.hitTest(p, options: [:])
if hitResults.count > 0 {
let result = hitResults[0]
let nodePosition = result.node.position.z
var matrix = matrix_identity_float4x4
SCNTransaction.begin()
SCNTransaction.animationDuration = 1.5 // duration in seconds
matrix.columns.3.z = Float(nodePosition + 5.0)
scnView.pointOfView?.position.z = CGFloat(matrix.columns.3.z)
SCNTransaction.commit()
}
}
Or, as a second logical option, you can use SceneKit's constraints:
func handleClick(_ gestureRecognizer: NSGestureRecognizer) {
let scnView = self.view as! SCNView
let p = gestureRecognizer.location(in: scnView)
let hitResults = scnView.hitTest(p, options: [:])
if hitResults.count > 0 {
let result = hitResults[0]
let nodePosition = result.node
let constraint1 = SCNLookAtConstraint(target: nodePosition)
let constraint2 = SCNDistanceConstraint(target: nodePosition)
constraint2.minimumDistance = 5
constraint2.maximumDistance = 9
SCNTransaction.begin()
SCNTransaction.animationDuration = 1.5
scnView.pointOfView?.constraints = [constraint2, constraint1]
SCNTransaction.commit()
}
}
P.S. These two approaches ain't out-of-the-box solutions but rather hints on how to implement what you want to.

How to detect touch on SKShapeNode in Video Sphere?

I use this simple code for playing 360 video. I need to add a point to the video - there is no problem with that. But how to track clicks on it? In this example, adding a point occurs in the viewDidLoad method.
I tried touchesBegan, but this method does not work. I really hope for your help
class ViewControllerTwo: UIViewController {
let motionManager = CMMotionManager()
let cameraNode = SCNNode()
var sphereNode: SCNNode!
#IBOutlet weak var sceneView: SCNView!
func createSphereNode(material: AnyObject?) -> SCNNode {
let sphere = SCNSphere(radius: 100.0)
sphere.segmentCount = 96
sphere.firstMaterial!.isDoubleSided = true
sphere.firstMaterial!.diffuse.contents = material
let sphereNode = SCNNode(geometry: sphere)
sphereNode.position = SCNVector3Make(0,0,0)
return sphereNode
}
func configureScene(node sphereNode: SCNNode) {
let scene = SCNScene()
sceneView.scene = scene
sceneView.showsStatistics = true
sceneView.allowsCameraControl = true
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3Make(0, 0, 0)
scene.rootNode.addChildNode(sphereNode)
scene.rootNode.addChildNode(cameraNode)
}
func startCameraTracking() {
motionManager.deviceMotionUpdateInterval = 1.0 / 60.0
motionManager.startDeviceMotionUpdates(to: .main) { [weak self] (data, error) in
guard let data = data else { return }
let attitude: CMAttitude = data.attitude
self?.cameraNode.eulerAngles = SCNVector3Make(Float(attitude.roll + Double.pi/2.0), -Float(attitude.yaw), -Float(attitude.pitch))
}
}
override func viewDidLoad() {
super.viewDidLoad()
let url = URL(fileURLWithPath: Bundle.main.path(forResource: "google-help-vr", ofType: "mp4")!)
let player = AVPlayer(url: url )
let videoNode = SKVideoNode(avPlayer: player)
let size = CGSize(width: 1025, height: 512)
videoNode.size = size
videoNode.position = CGPoint(x: size.width / 2, y: size.height / 2)
let spriteScene = SKScene(size: size)
spriteScene.addChild(videoNode)
// How to detect when tapped?
let circ = SKShapeNode(rectOf: CGSize(width: 50, height: 50), cornerRadius: 25)
circ.fillColor = .red
circ.isUserInteractionEnabled = true
videoNode.addChild(circ)
sphereNode = createSphereNode(material:spriteScene)
configureScene(node: sphereNode)
guard motionManager.isDeviceMotionAvailable else {
return
}
startCameraTracking()
player.play()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
sceneView.play(self)
}
}
I did a self class for the object SKShapeNode, in order to track clicks through the touchesBegan method. But all without success
You can use a UITapGesture recognizer to get the 2D point then use SCNSceneRenderer .hitTest(_:options:) to get all of the possible intersections along that ray. Note that the method is on the SCNSceneRenderer protocol, which SCNView conforms to so you may have missed it in the SCNView documentation.
#IBAction func tap(_ recognizer: UITapGestureRecognizer) {
let location = recognizer.location(in: sceneView)
if let firstResult = sceneView.hitTest(location, options: nil).first,
//Do stuff with firstResult here
}

Using Multiple AR Image Anchors to display dynamic 3D Overlay

I'm working on a project wherein we have to detect a certain number of custom QR codes (as ARImageAnchors) and then using the position of these anchors to dynamically display a 3D overlay. To be exact, we are planning to dynamically display a 3D model of human anatomy over the anchors which will be placed on a mannequin. For example, the mannequin we are placing the QR codes on is smaller or bigger than the default size of the 3D model, we would like it to adapt based on the distances between the images. Below is the sample code I'm thinking of working off from (source: https://www.appcoda.com/arkit-image-recognition/).
import UIKit
import ARKit
class ViewController: UIViewController {
#IBOutlet weak var sceneView: ARSCNView!
#IBOutlet weak var label: UILabel!
let fadeDuration: TimeInterval = 0.3
let rotateDuration: TimeInterval = 3
let waitDuration: TimeInterval = 0.5
lazy var fadeAndSpinAction: SCNAction = {
return .sequence([
.fadeIn(duration: fadeDuration),
.rotateBy(x: 0, y: 0, z: CGFloat.pi * 360 / 180, duration: rotateDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var fadeAction: SCNAction = {
return .sequence([
.fadeOpacity(by: 0.8, duration: fadeDuration),
.wait(duration: waitDuration),
.fadeOut(duration: fadeDuration)
])
}()
lazy var treeNode: SCNNode = {
guard let scene = SCNScene(named: "tree.scn"),
let node = scene.rootNode.childNode(withName: "tree", recursively: false) else { return SCNNode() }
let scaleFactor = 0.005
node.scale = SCNVector3(scaleFactor, scaleFactor, scaleFactor)
node.eulerAngles.x = -.pi / 2
return node
}()
lazy var bookNode: SCNNode = {
guard let scene = SCNScene(named: "book.scn"),
let node = scene.rootNode.childNode(withName: "book", recursively: false) else { return SCNNode() }
let scaleFactor = 0.1
node.scale = SCNVector3(scaleFactor, scaleFactor, scaleFactor)
return node
}()
lazy var mountainNode: SCNNode = {
guard let scene = SCNScene(named: "mountain.scn"),
let node = scene.rootNode.childNode(withName: "mountain", recursively: false) else { return SCNNode() }
let scaleFactor = 0.25
node.scale = SCNVector3(scaleFactor, scaleFactor, scaleFactor)
node.eulerAngles.x += -.pi / 2
return node
}()
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
configureLighting()
}
func configureLighting() {
sceneView.autoenablesDefaultLighting = true
sceneView.automaticallyUpdatesLighting = true
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
resetTrackingConfiguration()
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
#IBAction func resetButtonDidTouch(_ sender: UIBarButtonItem) {
resetTrackingConfiguration()
}
func resetTrackingConfiguration() {
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else { return }
let configuration = ARWorldTrackingConfiguration()
configuration.detectionImages = referenceImages
let options: ARSession.RunOptions = [.resetTracking, .removeExistingAnchors]
sceneView.session.run(configuration, options: options)
label.text = "Move camera around to detect images"
}
}
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
DispatchQueue.main.async {
guard let imageAnchor = anchor as? ARImageAnchor,
let imageName = imageAnchor.referenceImage.name else { return }
// TODO: Comment out code
// let planeNode = self.getPlaneNode(withReferenceImage: imageAnchor.referenceImage)
// planeNode.opacity = 0.0
// planeNode.eulerAngles.x = -.pi / 2
// planeNode.runAction(self.fadeAction)
// node.addChildNode(planeNode)
// TODO: Overlay 3D Object
let overlayNode = self.getNode(withImageName: imageName)
overlayNode.opacity = 0
overlayNode.position.y = 0.2
overlayNode.runAction(self.fadeAndSpinAction)
node.addChildNode(overlayNode)
self.label.text = "Image detected: \"\(imageName)\""
}
}
func getPlaneNode(withReferenceImage image: ARReferenceImage) -> SCNNode {
let plane = SCNPlane(width: image.physicalSize.width,
height: image.physicalSize.height)
let node = SCNNode(geometry: plane)
return node
}
func getNode(withImageName name: String) -> SCNNode {
var node = SCNNode()
switch name {
case "Book":
node = bookNode
case "Snow Mountain":
node = mountainNode
case "Trees In the Dark":
node = treeNode
default:
break
}
return node
}
}
I know that the 3D overlay is displayed in the renderer function above, but it is only displaying on top of a single detected image anchor. Now my question is, is it possible to reference multiple ARImage anchors to dynamically display a single 3D model?
Being a novice in ARKit and Swift in general, I'm not sure how to go about this problem yet. I'm hoping someone might have an idea of how to work around this and point me to the right direction. Any help will be greatly appreciated!
Thanks in advance!

how to 3d rotate arkit text (Swift4)

My code below takes text and places 3d text in argument reality. The problem is that I cannot rotate the text. I can just move it around in a 2d direction. I would like to be move and rotate the text. I dont know how to do this. I have not seen any online tutorials on this problem. Currently this is a swift 4 problem.
import UIKit
import SceneKit
import ARKit
class TextNode: SCNNode{
var textGeometry: SCNText!
init(text: String, depth: CGFloat = 1, font: String = "Helvatica", textSize: CGFloat = 3, colour: UIColor) {
super.init()
textGeometry = SCNText(string: text , extrusionDepth: depth)
textGeometry.font = UIFont(name: font, size: textSize)
textGeometry.flatness = 0
textGeometry.firstMaterial?.diffuse.contents = colour
self.geometry = textGeometry
self.scale = SCNVector3(0.01, 0.01 , 0.01)
}
required init?(coder aDecoder: NSCoder) { fatalError("init(coder:) has not been implemented") }
}
extension ViewController: UITextFieldDelegate{
func textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool {
if let textEntered = textField.text,
let textRange = Range(range, in: textEntered) {
let updatedText = textEntered.replacingCharacters(in: textRange, with: string)
print("User Has Entered \(updatedText)")
DispatchQueue.main.async {
self.textNode.textGeometry.string = updatedText
}
}
return true
}
}
class ViewController: UIViewController {
/////
var name: String?
#IBOutlet var theBar: UITextField!
#IBOutlet var augmentedRealityView: ARSCNView!
//2. Create Our ARWorld Tracking Configuration
let configuration = ARWorldTrackingConfiguration()
//3. Create Our Session
let augmentedRealitySession = ARSession()
//4. Create A Variable To Store The Current Nodes Rotation Around It's Y-Axis
var currentAngleY: Float = 0.0
var isRotating = false
var currentNode: SCNNode?
/////
var textNode: TextNode!
fileprivate func judo() {
augmentedRealityView.scene.rootNode.addChildNode(textNode)
textNode.position = SCNVector3(0, 0, -1.5)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
augmentedRealityView.session.pause()
}
#IBAction func changeTextColour(){
let snapShot = self.augmentedRealityView.snapshot()
UIImageWriteToSavedPhotosAlbum(snapShot, self, #selector(image(_:didFinishSavingWithError:contextInfo:)), nil)
}
#objc func image(_ image: UIImage, didFinishSavingWithError error: Error?, contextInfo: UnsafeRawPointer) {
if let error = error {
print("Error Saving ARKit Scene \(error)")
} else {
print("ARKit Scene Successfully Saved")
}}
#objc func scaleCurrentNode(_ gesture: UIPinchGestureRecognizer) {
if !isRotating, let selectedNode = currentNode{
if gesture.state == .changed {
let pinchScaleX: CGFloat = gesture.scale * CGFloat((selectedNode.scale.x))
let pinchScaleY: CGFloat = gesture.scale * CGFloat((selectedNode.scale.y))
let pinchScaleZ: CGFloat = gesture.scale * CGFloat((selectedNode.scale.z))
selectedNode.scale = SCNVector3Make(Float(pinchScaleX), Float(pinchScaleY), Float(pinchScaleZ))
gesture.scale = 1
}
if gesture.state == .ended {}
}
}
#objc func rotateNode(_ gesture: UIRotationGestureRecognizer){
if let selectedNode = currentNode{
//1. Get The Current Rotation From The Gesture
let rotation = Float(gesture.rotation)
//2. If The Gesture State Has Changed Set The Nodes EulerAngles.y
if gesture.state == .changed{
isRotating = true
selectedNode.eulerAngles.y = currentAngleY + rotation
}
//3. If The Gesture Has Ended Store The Last Angle Of The CurrentNode
if(gesture.state == .ended) {
currentAngleY = selectedNode.eulerAngles.y
isRotating = false
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
augmentedRealityView.automaticallyUpdatesLighting = true
theBar.delegate = self as? UITextFieldDelegate
textNode = TextNode(text: theBar.text!, colour: .white)
augmentedRealityView.scene.rootNode.addChildNode(textNode)
textNode.position = SCNVector3(0, 0, -1.5)
augmentedRealityView.automaticallyUpdatesLighting = true
//1. Run The ARSession
augmentedRealityView.session = augmentedRealitySession
augmentedRealitySession.run(configuration, options: [.resetTracking, .removeExistingAnchors])
//2. Add A UIPinchGestureRecognizer So We Can Scale Our TextNode
let scaleGesture = UIPinchGestureRecognizer(target: self, action: #selector(scaleCurrentNode(_:)))
self.view.addGestureRecognizer(scaleGesture)
//3. Add A Tap Gesture Recogizer So We Can Place Our TextNode
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(placeOrAssignNode(_:)))
self.view.addGestureRecognizer(tapGesture)
//4. Add A Rotation Gesture Recogizer So We Can Rotate Our TextNode
let rotateGesture = UIRotationGestureRecognizer(target: self, action: #selector(rotateNode(_:)))
self.view.addGestureRecognizer(rotateGesture)
}
#objc func placeOrAssignNode(_ gesture: UITapGestureRecognizer){
//1. Get The Current Location Of The Tap
let currentTouchLocation = gesture.location(in: self.augmentedRealityView)
//2. If We Hit An SCNNode Set It As The Current Node So We Can Interact With It
if let nodeHitTest = self.augmentedRealityView.hitTest(currentTouchLocation, options: nil).first?.node{
currentNode = nodeHitTest
return
}
//3. Do An ARHitTest For Features Points So We Can Place An SCNNode
if let hitTest = self.augmentedRealityView.hitTest(currentTouchLocation, types: .featurePoint).first {
//4. Get The World Transform
let hitTestPosition = hitTest.worldTransform.columns.3
//5. Add The TestNode At The Desired Position
createTextFromPosition(SCNVector3(hitTestPosition.x, hitTestPosition.y, hitTestPosition.z))
return
}
}
func createTextFromPosition(_ position: SCNVector3){
let textNode = SCNNode()
//1. Create The Text Geometry With String & Depth Parameters
let textGeometry = SCNText(string: theBar.text! , extrusionDepth: 1)
//2. Set The Font With Our Set Font & Size
textGeometry.font = UIFont(name: "Helvatica", size: 1)
//3. Set The Flatness To Zero (This Makes The Text Look Smoother)
textGeometry.flatness = 0
//4. Set The Colour Of The Text
textGeometry.firstMaterial?.diffuse.contents = UIColor.white
//5. Set The Text's Material
textNode.geometry = textGeometry
//6. Set The Pivot At The Center
let min = textNode.boundingBox.min
let max = textNode.boundingBox.max
textNode.pivot = SCNMatrix4MakeTranslation(
min.x + (max.x - min.x)/2,
min.y + (max.y - min.y)/2,
min.z + (max.z - min.z)/2
)
//7. Scale The Text So We Can Actually See It!
textNode.scale = SCNVector3(0.005, 0.005 , 0.005)
//8. Add It To The Hierachy & Position It
self.augmentedRealityView.scene.rootNode.addChildNode(textNode)
textNode.position = position
//9. Set It As The Current Node
currentNode = textNode
}
func changeColour(_ value: Int){
if value == 0{
textNode.textGeometry.firstMaterial?.diffuse.contents = UIColor.lightGray
}
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if let name = UserDefaults.standard.value(forKey: "name") as? String{
theBar.text = name
}
theBar.delegate = self as? UITextFieldDelegate
if name == String(1) {
textNode = TextNode(text: theBar.text!, colour: .red)
augmentedRealityView.scene.rootNode.addChildNode(textNode)
textNode.position = SCNVector3(0, 0, -1.5)
}
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = .horizontal
augmentedRealityView.session.run(configuration)
}
}
//////////////////////////////////////
A SCNNode object has a rotation property you can utilize. I don't see where you're setting it in your code.
https://developer.apple.com/documentation/scenekit/scnnode/1408034-rotation
// Sample code: Rotates node to the `heading`
public func nodeRotation(heading: Double) -> SCNVector4 {
return SCNVector4(0, 1, 0, Double.pi - ((heading * (Double.pi/180)) - Double.pi))
}
textNode.rotation = nodeRotation(heading: 45.0)

Resources