ARKit image recognition - image tracking - ios

I try to build an ARKit app. The idea is, to recognize images and place a 3D-Object above it. This is working fine, but the problem is, with some images, the 3D Object isn't tracked.
I know, that the images aren't tracked (when the image moves) but when I move the iPhone, for some images the 3D Object stays in the middle of the image, but for others it "flies around". I testes png/jpg, different sizes, good/bad quality - I cannot figure out, why some images do not work. Any ideas?
import UIKit
import SceneKit
import ARKit
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet var sceneView: ARSCNView!
private var planeNode: SCNNode?
private var imageNode: SCNNode?
private var animationInfo: AnimationInfo?
private var currentMediaName: String?
override func viewDidLoad() {
super.viewDidLoad()
let scene = SCNScene()
sceneView.scene = scene
sceneView.delegate = self
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Load reference images to look for from "AR Resources" folder
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "AR Resources", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
// Add previously loaded images to ARScene configuration as detectionImages
configuration.detectionImages = referenceImages
// Run the view's session
sceneView.session.run(configuration)
let tap = UITapGestureRecognizer(target: self, action: #selector(handleTap(rec:)))
//Add recognizer to sceneview
sceneView.addGestureRecognizer(tap)
}
//Method called when tap
#objc func handleTap(rec: UITapGestureRecognizer) {
//GET Reference-Image Name
loadReferenceImage()
if rec.state == .ended {
let location: CGPoint = rec.location(in: sceneView)
let hits = self.sceneView.hitTest(location, options: nil)
if !hits.isEmpty {
let tappedNode = hits.first?.node
}
}
}
func loadReferenceImage() {
print("CLICK")
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor else {
return
}
currentMediaName = imageAnchor.referenceImage.name
// 1. Load plane's scene.
let planeScene = SCNScene(named: "art.scnassets/plane.scn")!
let planeNode = planeScene.rootNode.childNode(withName: "planeRootNode", recursively: true)!
// 2. Calculate size based on planeNode's bounding box.
let (min, max) = planeNode.boundingBox
let size = SCNVector3Make(max.x - min.x, max.y - min.y, max.z - min.z)
// 3. Calculate the ratio of difference between real image and object size.
// Ignore Y axis because it will be pointed out of the image.
let widthRatio = Float(imageAnchor.referenceImage.physicalSize.width)/size.x
let heightRatio = Float(imageAnchor.referenceImage.physicalSize.height)/size.z
// Pick smallest value to be sure that object fits into the image.
let finalRatio = [widthRatio, heightRatio].min()!
// 4. Set transform from imageAnchor data.
planeNode.transform = SCNMatrix4(imageAnchor.transform)
// 5. Animate appearance by scaling model from 0 to previously calculated value.
let appearanceAction = SCNAction.scale(to: CGFloat(finalRatio), duration: 0.4)
appearanceAction.timingMode = .easeOut
// Set initial scale to 0.
planeNode.scale = SCNVector3Make(0.0, 0.0, 0.0)
// Add to root node.
sceneView.scene.rootNode.addChildNode(planeNode)
// Run the appearance animation.
planeNode.runAction(appearanceAction)
self.planeNode = planeNode
self.imageNode = node
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor, updateAtTime time: TimeInterval) {
guard let imageNode = imageNode, let planeNode = planeNode else {
return
}
// 1. Unwrap animationInfo. Calculate animationInfo if it is nil.
guard let animationInfo = animationInfo else {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
return
}
// 2. Calculate new animationInfo if image position or orientation changed.
if !simd_equal(animationInfo.finalModelPosition, imageNode.simdWorldPosition) || animationInfo.finalModelOrientation != imageNode.simdWorldOrientation {
refreshAnimationVariables(startTime: time,
initialPosition: planeNode.simdWorldPosition,
finalPosition: imageNode.simdWorldPosition,
initialOrientation: planeNode.simdWorldOrientation,
finalOrientation: imageNode.simdWorldOrientation)
}
// 3. Calculate interpolation based on passedTime/totalTime ratio.
let passedTime = time - animationInfo.startTime
var t = min(Float(passedTime/animationInfo.duration), 1)
// Applying curve function to time parameter to achieve "ease out" timing
t = sin(t * .pi * 0.5)
// 4. Calculate and set new model position and orientation.
let f3t = simd_make_float3(t, t, t)
planeNode.simdWorldPosition = simd_mix(animationInfo.initialModelPosition, animationInfo.finalModelPosition, f3t)
planeNode.simdWorldOrientation = simd_slerp(animationInfo.initialModelOrientation, animationInfo.finalModelOrientation, t)
//planeNode.simdWorldOrientation = imageNode.simdWorldOrientation
guard let currentImageAnchor = anchor as? ARImageAnchor else { return }
let name = currentImageAnchor.referenceImage.name!
print("TEST")
print(name)
}
func refreshAnimationVariables(startTime: TimeInterval, initialPosition: float3, finalPosition: float3, initialOrientation: simd_quatf, finalOrientation: simd_quatf) {
let distance = simd_distance(initialPosition, finalPosition)
// Average speed of movement is 0.15 m/s.
let speed = Float(0.15)
// Total time is calculated as distance/speed. Min time is set to 0.1s and max is set to 2s.
let animationDuration = Double(min(max(0.1, distance/speed), 2))
// Store animation information for later usage.
animationInfo = AnimationInfo(startTime: startTime,
duration: animationDuration,
initialModelPosition: initialPosition,
finalModelPosition: finalPosition,
initialModelOrientation: initialOrientation,
finalModelOrientation: finalOrientation)
}
}

Instead of adding the node directly to scene.rootNode, add it to the node associated with the anchor:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
node.addChildNode(yourNode)
}
There is no need to give it a position since it will be placed in the center of the detected image.
Read about the importance of anchors here: What's the difference between using ARAnchor to insert a node and directly insert a node?
Two others points:
Make sure the size of the image set in AR Resource is accurate
Move the phone around a bit before detecting an image. This gives ARKit a chance to understand the scene and helps with better tracking

Related

Swift -How to update ARAnchor to follow Camera's position

I followed this code from #rickster which 100% works and looks great. In the video he's animating a SCNNode that is set using an ARAnchor from 1 position to another and back. I tried to do something similar except I want the node that is set with the ARAnchor to follow/update it's position to another node that is a child of the camera.
I'm having a problem updating the position in func renderer(_ renderer: SCNSceneRenderer, willRenderScene scene: SCNScene, atTime time: TimeInterval) { }
I tried to animate the node that is set with the ARAnchor to follow the other node but it's not working, it follows backwards and in reverse:
let animation = CABasicAnimation(keyPath: #keyPath(SCNNode.transform))
animation.fromValue = nodeSetWithARAnchor.transform
animation.toValue = nodeTiedToCamera.transform
animation.duration = 1
nodeSetWithARAnchor.removeAllAnimations()
nodeSetWithARAnchor.addAnimation(animation, forKey: nil)
I then tried to remove the ARAnchor and reset its node's .worldPostion and .simdWorldTransform but the node diappears. It's in steps 7 & 8 below.
How can I get the nodeSetWithARAnchor to update its ARAnchor and position to always follow the nodeTiedToCamera?
Update In Step 6 now that I set the nodeSetWithARAnchor SCVector3 to match the nodeTiedToCameradWorld's SCVector3 and set its .transform to match the nodeTiedToCameradWorldTransform #rickster's animation code works the best because I don't I have to remove any anchors. There is another problem though. The nodeSetWithARAnchor responds when I move the device but it responds backwards and in reverse.
When I turn the device up the image goes right and when I turn the device down the image goes left. When I turn the device left the image goes up and when I turn the device right the image goes down. It's following the image I have tied to the camera but it's following it incorrectly.
let configuration = ARWorldTrackingConfiguration()
var nodeSetWithARAnchor: SCNNode?
var nodeTiedToCamera: SCNNode?
var anchors: [ARAnchor] = []
override func viewDidLoad() {
super.viewDidLoad()
configuration.planeDetection = [.horizontal, .vertical]
configuration.maximumNumberOfTrackedImages = 1
// 1. once this anchor is set inside renderer(_:didAdd:for:) I initialize the nodeSetWithARAnchor at 30cm behind the device's camera's initial position
DispatchQueue.main.asyncAfter(deadline: .now() + 3) {
var translation = matrix_identity_float4x4
translation.columns.3.z = -0.3
let transform = simd_mul(self.sceneView.session.currentFrame!.camera.transform, translation)
let anchor = ARAnchor(transform: transform)
self.sceneView.session.add(anchor: anchor)
}
// 2. the nodeTiedToCamera will always go where ever the device's camera goes
let plane = SCNPlane(width: 0.1, height: 0.1)
nodeTiedToCamera = SCNNode(geometry: plane)
nodeTiedToCamera!.position = SCNVector3(x: -0.15, y: 0.45, z: -1.25) // I don't want it directly in front of the camera
nodeTiedToCamera!.geometry?.fisrtMaterial?.diffuse.contents = UIColor.red
sceneView.pointOfView.addChildNode(nodeTiedToCamera!)
}
// 3. I init the nodeSetWithARAnchor, add it to the sceneView's root node, and keep a copy of it's anchor
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
DispatchQueue.main.async {
if self.nodeSetWithARAnchor == nil {
// create geometry ...
self.nodeSetWithARAnchor = SCNNode(geometry: geometry)
node.addChildNode(self.nodeSetWithARAnchor!)
}
self.anchors.removeAll()
self.anchors.append(anchor)
}
}
func renderer(_ renderer: SCNSceneRenderer, willRenderScene scene: SCNScene, atTime time: TimeInterval) {
DispatchQueue.main.async {
// 4. get the only child that is tied to the camera which is the nodeTiedToCamera
guard let pointOfView = self.sceneView.pointOfView else { return }
guard let child = pointOfView.childNodes.first else { return }
// 5. get it's .worldPosition && it's .simdWorldTransform
let nodeTiedToCameradWorldPosition = child.worldPosition
let nodeTiedToCameradWorldTransform = child.worldTransform
if let nodeSetWithARAnchor = self.nodeSetWithARAnchor, let anchorToRemove = self.anchors.first {
// 6. set the nodeSetWithARAnchor SCVector3 to match the nodeTiedToCameradWorld's SCVector3 and set its .transform to match the nodeTiedToCameradWorldTransform
nodeSetWithARAnchor.position = nodeTiedToCameradWorldPosition
nodeSetWithARAnchor.transform = nodeTiedToCameradWorldTransform
let animation = CABasicAnimation(keyPath: #keyPath(SCNNode.transform))
animation.fromValue = nodeSetWithARAnchor.transform
animation.toValue = nodeTiedToCamera.transform
animation.duration = 1
nodeSetWithARAnchor.removeAllAnimations()
nodeSetWithARAnchor.addAnimation(animation, forKey: nil)
// 7. remove all ARAnchors
//self.sceneView.session.remove(anchor: anchorToRemove)
//self.anchors.removeAll()
// 8. add a new anchor to the session and set it with the nodeSetWithARAnchor.simdWorldTransform
//let anchor = ARAnchor(transform: nodeSetWithARAnchor.simdWorldTransform)
//self.sceneView.session.add(anchor: anchor)
}
}
}
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
guard let node = self.nodeSetWithARAnchor else { return }
if let pointOfView = sceneView.pointOfView {
let isVisible = sceneView.isNode(node, insideFrustumOf: pointOfView)
print("Is node visible: \(isVisible)")
}
}
It was an ALL day thing but got it working. I only had to switch around 2 lines of code in renderer(:willRenderScene:atTime:) and run them in the exact order below. I didn't have to remove and add anchors or run any animation code.
func renderer(_ renderer: SCNSceneRenderer, willRenderScene scene: SCNScene, atTime time: TimeInterval) {
DispatchQueue.main.async { [weak self] in
guard let safeSelf = self else { return }
guard let pointOfView = safeSelf.sceneView.pointOfView else { return }
guard let child = pointOfView.childNodes.first else { return } // child is the nodeTiedToCamera
if let nodeSetWithARAnchor = safeSelf.nodeSetWithARAnchor {
// *** I just had to switch around these 2 lines of code and run them in this exact order ***
nodeSetWithARAnchor.transform = child.worldTransform
nodeSetWithARAnchor.worldPosition = child.worldPosition
}
}
}

ARKit: How to place .obj file on plane which is having multiple materials

I want to place a car object on the plane.
I am setting the sceneview like this.
func setUpSceneView() {
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = .horizontal
sceneView.session.run(configuration)
sceneView.delegate = self
sceneView.debugOptions = [ARSCNDebugOptions.showFeaturePoints]
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) { // 1 guard let planeAnchor = anchor as? ARPlaneAnchor else { return }
// 2
let width = CGFloat(planeAnchor.extent.x)
let height = CGFloat(planeAnchor.extent.z)
let plane = SCNPlane(width: width, height: height)
// 3
plane.materials.first?.diffuse.contents = UIColor.transparentLightBlue
// 4
let anchorNode = SCNScene(named: "art.scnassets/car.scn")!.rootNode
// 5
let x = CGFloat(planeAnchor.center.x)
let y = CGFloat(planeAnchor.center.y)
let z = CGFloat(planeAnchor.center.z)
planeNode.position = SCNVector3(x,y,z)
planeNode.eulerAngles.x = -.pi / 2
node.addChildNode(anchorNode)[car object][1]
}
https://app.box.com/s/vdloxlqxk9rh6h4k5ggwrxm1hslktn8g
I am able to place the car but it is allover camera scene . can any one tell me problem with cooridnate system or 3D object.
This depends on how you want to place the car. Right now your app will place an anchor and where it decides to create its first anchor is where your car will arrive. If you would like to update your anchor as you scan, you need to call didUpdate and your anchor will move to the center of the extent of your plane:
func renderer(_ renderer: SCNSceneRenderer, didUpdate node: SCNNode, for anchor: ARAnchor){}
However, it looks as though you have already defined the size you want your anchor to be. I've never tried it that way before and I'm not sure if you can programatically move your anchor to a desired location. In my mind it would cause issues if the app doesn't already know its a horizontal surface, but I've never tested it.
Instead what I would recommend is to create your plane as you did above (without declaring its size). Update your plane size with the 'didUpdate' function above. Then if you want to place your car in a predetermined spot, run a hittest.
Here is a good resource to walk you through:
https://www.appcoda.com/arkit-horizontal-plane/

place SCNNode fixed in place without ARPlaneAnchor

I'm currently trying to put an SCNNode fixed in place while use ARImageTrackingConfiguration which is no plane detection but it seems like not working properly because the SCNNode is moving while camera moves
below are the code:
/// show cylinder line
func showCylinderLine(point a: SCNVector3, point b: SCNVector3, object: VirtualObject) {
let length = Vector3Helper.distanceBetweenPoints(a: a, b: b)
let cyclinderLine = GeometryHelper.drawCyclinderBetweenPoints(a: a, b: b, length: length, radius: 0.001, radialSegments: 10)
cyclinderLine.name = "line"
cyclinderLine.geometry?.firstMaterial?.diffuse.contents = UIColor.red
self.sceneView.scene.rootNode.addChildNode(cyclinderLine)
cyclinderLine.look(at: b, up: self.sceneView.scene.rootNode.worldUp, localFront: cyclinderLine.worldUp)
}
is it possible to make the cylinderLine SCNNode fixed in place without ARPlaneAnchor ?
(Note: I had tried ARAnchor on nodeForAnchor delegate methods and it is still moving as camera moves)
Can you show your nodeForAnchors method?
That is where nodes are "fixed" to the image so i am guessing an error is there somewhere. Here is one example application of that delegate:
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let _ = anchor as? ARImageAnchor {
if let objectScene = SCNScene(named: "ARassets.scnassets/object.scn") {
//creates a new node that is connected to the image. This is what makes your object "fixed"
let objectNode = objectScene.rootNode.childNodes.first!
objectNode.position = SCNVector3Zero
objectNode.position.y = 0.15
node.addChildNode(planeNode)
}
}
Let me know if this helps ;)

Method "isNode(:insideFrustumOf:)" always true

I'm trying to verify if a specific node is inside the current frustum of the scene.
Therefore I use the method isNode(_:insideFrustumOf:) from Apple.
I save in every call to renderer(_:didAdd:for:) the corresponding node and later test with isNode(_:insideFrustumOf:).
But the result is always true, which is obviously wrong.
Why can't I test the nodes added by ARKit?
UPDATE:
The asked code, if it helps, great!
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//...
nodes.append(node)
//...
}
nodes is an array of SCNNodes.
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
for node in nodes {
let result = sceneView.isNode(node, insideFrustumOf: sceneView.pointOfView!)
//...
}
}
Here the evaluation of the node takes place. Result always true.
Since you haven't posted all your code, its hard to provide a 'definitive' answer.
Having said this, I have created an example for you which works perfectly well.
First I created an [SCNNode] to store any SCNNodes added to the screen:
//Array To Store Any Added Nodes To The Scene Hierachy
var nodesRendered = [SCNNode]()
I then created 3 different SCNNodes:
/// Creates A Red, Blue & Green SCNNode
func createNodes(){
//1. Create A Red Sphere
let redNode = SCNNode()
let redGeometry = SCNSphere(radius: 0.2)
redGeometry.firstMaterial?.diffuse.contents = UIColor.red
redNode.geometry = redGeometry
redNode.position = SCNVector3(-1.5, 0, -1.5)
redNode.name = "RedNode"
//2. Create A Green Sphere
let greenNode = SCNNode()
let greenGeometry = SCNSphere(radius: 0.2)
greenGeometry.firstMaterial?.diffuse.contents = UIColor.green
greenNode.geometry = greenGeometry
greenNode.position = SCNVector3(0, 0, -1.5)
greenNode.name = "GreenNode"
//3. Create A Blue Sphere
let blueNode = SCNNode()
let blueGeometry = SCNSphere(radius: 0.2)
blueGeometry.firstMaterial?.diffuse.contents = UIColor.blue
blueNode.geometry = blueGeometry
blueNode.position = SCNVector3(1.5, 0, -1.5)
blueNode.name = "BlueNode"
//4. Add Them To The Hierachy
augmentedRealityView.scene.rootNode.addChildNode(redNode)
augmentedRealityView.scene.rootNode.addChildNode(greenNode)
augmentedRealityView.scene.rootNode.addChildNode(blueNode)
//5. Store A Reference To The Nodes
nodesRendered.append(redNode)
nodesRendered.append(blueNode)
nodesRendered.append(greenNode)
}
Having done this, I then created a function to determine whether these where in the Frustrum of the Camera:
/// Detects If A Node Is In View Of The Camera
func detectNodeInFrustrumOfCamera(){
guard let cameraPointOfView = self.augmentedRealityView.pointOfView else { return }
for node in nodesRendered{
if augmentedRealityView.isNode(node, insideFrustumOf: cameraPointOfView){
print("\(node.name!) Is In View Of Camera")
}else{
print("\(node.name!) Is Not In View Of Camera")
}
}
}
Finally in the delegate callback I called the function like so:
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
detectNodeInFrustrumOfCamera()
}
Which yielded results such as:
RedNode Is Not In View Of Camera
BlueNode Is Not In View Of Camera
GreenNode Is In View Of Camera
Hope it points you in the right direction...
So I ran into this issue myself while working on an ARKit project. It seems like the function isNode(node:, insideFrustumOf:) will always return true for nodes that were automatically added by ARKit.
My workaround was instead of attempting to track the node added by ARKit, create a new node with a clear material that has the same "volume" as your detected object then create a reference and check if that node is inside the point of view.
Add these variables:
/// The object that was detected.
var refObject: ARReferenceObject?
/// The reference node for the detected object.
var refNode: SCNNode?
/// additional node which we'll use to check the POV against.
var detectionNode: SCNNode?
Implement the delegate function didAdd:
public func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
if let objectAnchor = anchor as? ARObjectAnchor {
guard let name = objectAnchor.referenceObject.name, name == "my_object"
else { return }
print("detected object for the first time")
// create geometry
let cube = SCNBox(width: 0.1, height: 0.1, length: 0.1, chamferRadius: 0.0)
// apply transparent material
let material = SCNMaterial()
material.transparency = 0.0
cube.firstMaterial = material
// add child node
let detectionNode = SCNNode(geometry: cube)
node.addChildNode(detectionNode)
// store references
self.detectionNode = detectionNode // this is the reference we really need
self.refNode = node
self.refObject = objectAnchor.referenceObject
}
}
Finally implement this delegate function updateAtTime:
public func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
guard let node = self.detectionNode else { return }
if let pointOfView = sceneView.pointOfView {
let isVisible = sceneView.isNode(node, insideFrustumOf: pointOfView)
print("Is node visible: \(isVisible)")
}
}

Get All ARAnchors of focused Camera in ARKIT

When application launched first a vertical surface is detected on one wall than camera focus to the second wall, in the second wall another surface is detected. The first wall is now no more visible to the ARCamera but this code is providing me the anchors of the first wall. but I need anchors of the second wall which is right now Visible/focused in camera.
if let anchor = sceneView.session.currentFrame?.anchors.first {
let node = sceneView.node(for: anchor)
addNode(position: SCNVector3Zero, anchorNode: node)
} else {
debugPrint("anchor node is nil")
}
The clue to the answer is in the beginning line of your if let statement.
Lets break this down:
When you say let anchor = sceneView.session.currentFrame?.anchors.first, you are referencing an optional array of ARAnchor, which naturally can have more than one element.
Since your are always calling first e.g. index [0], you will always get the 1st ARAnchor which was added to the array.
Since you now have 2 anchors, you would naturally need the last (latest) element. As such you can try this as a starter:
if let anchor = sceneView.session.currentFrame?.anchors.last {
let node = sceneView.node(for: anchor)
addNode(position: SCNVector3Zero, anchorNode: node)
} else {
debugPrint("anchor node is nil")
}
Update:
Since another poster has interpreted the question differently, in that they believe the question is how can I detect if an ARPlaneAnchor is in view? Let's approach it another way.
First we need to take into consideration that the ARCamera has a Frostrum in which our content is shown:
As such, we would then need to determine whether an ARPlaneAnchor was inViewOfFrostrum.
First we will create 2 variables:
var planesDetected = [ARPlaneAnchor: SCNNode]()
var planeID: Int = 0
The 1st to store the ARPlaneAnchor and its associated SCNNode, and the 2nd in order to provide a unique ID for each plane.
In the ARSCNViewDelegate we can visualise an ARPlaneAnchor and then store it's information e.g:
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//1. Get The Current ARPlaneAnchor
guard let anchor = anchor as? ARPlaneAnchor else { return }
//2. Create An SCNode & Geometry To Visualize The Plane
let planeNode = SCNNode()
let planeGeometry = SCNPlane(width: CGFloat(anchor.extent.x), height: CGFloat(anchor.extent.z))
planeGeometry.firstMaterial?.diffuse.contents = UIColor.cyan
planeNode.geometry = planeGeometry
//3. Set The Position Based On The Anchors Extent & Rotate It
planeNode.position = SCNVector3(anchor.center.x, anchor.center.y, anchor.center.z)
planeNode.eulerAngles.x = -.pi / 2
//4. Add The PlaneNode To The Node & Give It A Unique ID
node.addChildNode(planeNode)
planeNode.name = String(planeID)
//5. Store The Anchor & Node
planesDetected[anchor] = planeNode
//6. Increment The Plane ID
planeID += 1
}
Now we have stored the detected planes, we then of course need to determine if any of these are in view of the ARCamera e.g:
/// Detects If An Object Is In View Of The Camera Frostrum
func detectPlaneInFrostrumOfCamera(){
//1. Get The Current Point Of View
if let currentPointOfView = augmentedRealityView.pointOfView{
//2. Loop Through All The Detected Planes
for anchorKey in planesDetected{
let anchor = anchorKey.value
if augmentedRealityView.isNode(anchor, insideFrustumOf: currentPointOfView){
print("ARPlaneAnchor With ID \(anchor.name!) Is In View")
}else{
print("ARPlaneAnchor With ID \(anchor.name!) Is Not In View")
}
}
}
}
Finally we then need to access this function which we could do in the following delegate method for example renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval):
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
detectPlaneInFrostrumOfCamera()
}
Hopefully both of these will point in the right direction...
In order to get the node that is currently is in point of view you can do something like this:
var targettedAnchorNode: SCNNode?
if let anchors = sceneView.session.currentFrame?.anchors {
for anchor in anchors {
if let anchorNode = sceneView.node(for: anchor), let pointOfView = sceneView.pointOfView, sceneView.isNode(anchorNode, insideFrustumOf: pointOfView) {
targettedAnchorNode = anchorNode
break
}
}
if let targettedAnchorNode = targettedAnchorNode {
addNode(position: SCNVector3Zero, anchorNode: targettedAnchorNode)
} else {
debugPrint("Targetted node not found")
}
} else {
debugPrint("Anchors not found")
}
If you would like to get all focused nodes, collect them in an array satisfying specified condition
Good luck!

Resources