Inverse Kinematics in Arkit - ios

I have Scenekit model with 4 joints.
When I use it in SCNView with the code below and change the position of the parent node (Joint) I can see it animated according to the joints.
private lazy var scene: SCNScene = {
let scene = SCNScene(named: "art.scnassets/ears")!
return scene
}()
in viewDidLoad
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
cameraNode.position = SCNVector3(x: 0, y: 0, z: 5)
rootNode.addChildNode(cameraNode)
scnView.scene = scene
scnView.allowsCameraControl = true
scnView.backgroundColor = .white
let joint = contentNode!.childNode(withName: "Joint", recursively: true)!
var ik:SCNIKConstraint = .inverseKinematicsConstraint(chainRootNode: joint)
joint.childNode(withName: "head", recursively: true)!.constraints = [ik]
The problem occurs when I use the same model in Arkit. In this case, I use a separate View Controller than above. I place it on the head. The model collapse when I show it in Arkit with the code below. I expect it to track head movement and bend according to it.
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
guard let sceneView = renderer as? ARSCNView,
anchor is ARFaceAnchor else { return nil }
faceGeometry = ARSCNFaceGeometry(device: sceneView.device!)!
contentNode = SCNReferenceNode(named: "art/ears")
//contentNode!.physicsBody = .kinematic()
let joint = contentNode!.childNode(withName: "Joint", recursively: true)!
var ik:SCNIKConstraint = .inverseKinematicsConstraint(chainRootNode: joint)
joint.childNode(withName: "head", recursively: true)!.constraints = [ik]
return contentNode
}
I have this extension
extension SCNReferenceNode {
convenience init(named resourceName: String, loadImmediately: Bool = true) {
let url = Bundle.main.url(forResource: resourceName, withExtension: "scn", subdirectory: "Models.scnassets")!
self.init(url: url)!
if loadImmediately {
self.load()
}
}
Normal look of model is like this
Correctly applied inverse kinematics snapshot
I tried setting setMaxAllowedRotationAngle(between 20-45 degrees) to the joints, this prevents collapse behavior but it also prevents bending.
y property of model gravity is -9.8, x and z are 0.
What might be the problem that I can't create the same effect in Arkit?

Related

ARKit + SceneKit: Using reconstructed scene for physics?

I'm using ARKit with SceneKit and would like to let my 3D objects physically interact with the reconstructed scene created by devices with LiDAR sensors (config.sceneReconstruction = .mesh). For example, having a virtual ball bounce off of the geometry of the reconstructed scene.
In RealityKit, this seems to be possible using sceneUnderstanding:
arView.environment.sceneUnderstanding.options.insert(.physics)
How can I achieve the same thing when using SceneKit?
As far as I know, there is no built-in support for enabling this using SceneKit. However you can fairly easily put together a custom solution using the ARMeshAnchor created by ARKit.
First, configure ARKit to enable scene reconstruction:
let config = ARWorldTrackingConfiguration()
if ARWorldTrackingConfiguration.supportsSceneReconstruction(.mesh) {
config.sceneReconstruction = .mesh
} else {
// Handle device that doesn't support scene reconstruction
}
// and enable physics visualization for debugging
sceneView.debugOptions = [.showPhysicsShapes]
Then in your ARSCNViewDelegate use renderer nodeFor to create a scenekit node for newly created ARMeshAnchor instances:
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
guard let meshAnchor = anchor as? ARMeshAnchor else {
return nil
}
let geometry = createGeometryFromAnchor(meshAnchor: meshAnchor)
// Optionally hide the node from rendering as well
geometry.firstMaterial?.colorBufferWriteMask = []
let node = SCNNode(geometry: geometry)
// Make sure physics apply to the node
// You must used concavePolyhedron here!
node.physicsBody = SCNPhysicsBody(type: .static, shape: SCNPhysicsShape(geometry: geometry, options: [.type: SCNPhysicsShape.ShapeType.concavePolyhedron]))
return node
}
// Taken from https://developer.apple.com/forums/thread/130599
func createGeometryFromAnchor(meshAnchor: ARMeshAnchor) -> SCNGeometry {
let meshGeometry = meshAnchor.geometry
let vertices = meshGeometry.vertices
let normals = meshGeometry.normals
let faces = meshGeometry.faces
// use the MTL buffer that ARKit gives us
let vertexSource = SCNGeometrySource(buffer: vertices.buffer, vertexFormat: vertices.format, semantic: .vertex, vertexCount: vertices.count, dataOffset: vertices.offset, dataStride: vertices.stride)
let normalsSource = SCNGeometrySource(buffer: normals.buffer, vertexFormat: normals.format, semantic: .normal, vertexCount: normals.count, dataOffset: normals.offset, dataStride: normals.stride)
// Copy bytes as we may use them later
let faceData = Data(bytes: faces.buffer.contents(), count: faces.buffer.length)
// create the geometry element
let geometryElement = SCNGeometryElement(data: faceData, primitiveType: toSCNGeometryPrimitiveType(faces.primitiveType), primitiveCount: faces.count, bytesPerIndex: faces.bytesPerIndex)
return SCNGeometry(sources: [vertexSource, normalsSource], elements: [geometryElement])
}
func toSCNGeometryPrimitiveType(_ ar: ARGeometryPrimitiveType) -> SCNGeometryPrimitiveType {
switch ar {
case .triangle: return .triangles
default: fatalError("Unknown type")
}
}
Finally, update the scene nodes whenever the reconstructed geometry changes in the ARSCNViewDelegate renderer didUpdate: function:
func renderer(_ renderer: SCNSceneRenderer, didUpdate node: SCNNode, for anchor: ARAnchor) {
guard let meshAnchor = anchor as? ARMeshAnchor else {
return
}
let geometry = SCNGeometry.fromAnchor(meshAnchor: meshAnchor, setColors: false)
geometry.firstMaterial?.colorBufferWriteMask = []
node.geometry = geometry
node.physicsBody!.physicsShape = SCNPhysicsShape(geometry: geometry, options: [.type: SCNPhysicsShape.ShapeType.concavePolyhedron])
}
Any physics objects you create in SceneKit should now be able to interact with reconstructed scene:

How to recognize a lot of objects and add a lot of nodes at the same time?

This is the simple object when I use for ARImageTrackingConfiguration().
Within code I add a plane and paperPlane onto recognized object:
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
let node = SCNNode()
if let imageAnchor = anchor as? ARImageAnchor {
let plane = SCNPlane(width: imageAnchor.referenceImage.physicalSize.width,
height: imageAnchor.referenceImage.physicalSize.height)
plane.firstMaterial?.diffuse.contents = UIColor.green.withAlphaComponent(0.8)
let planeNode = SCNNode(geometry: plane)
planeNode.eulerAngles.x = -.pi / 2
planeNode.position.y = 0
let paperPlaneScene = SCNScene(named: "Scenes.scnassets/paperPlane.scn")!
let paperPlaneNode = paperPlaneScene.rootNode.childNodes.first!
paperPlaneNode.position = SCNVector3Zero
paperPlaneNode.position.z = 0.1
paperPlaneNode.eulerAngles.y = -.pi / 2
paperPlaneNode.eulerAngles.z = .pi
planeNode.addChildNode(paperPlaneNode)
node.addChildNode(planeNode)
}
return node
}
But the result is following:
Why there is only one recognized object at time? And not every of them? They are recognized one by one, but never all at once. Why?
In the latest version of ARKit 5.0 you can simultaneously detect up to 100 images using such classes as ARImageTrackingConfiguration or ARWorldTrackingConfiguration.
To detect up to 100 images at a time use the following instance property:
var maximumNumberOfTrackedImages: Int { get set }
In a real code it looks like this:
guard let reference = ARReferenceImage.referenceImages(inGroupNamed: "ARRes",
bundle: nil)
else { fatalError("Missing resources") }
let config = ARWorldTrackingConfiguration()
config.maximumNumberOfTrackedImages = 10
config.detectionImages = reference
arView.session.run(config, options: [.resetTracking, .removeExistingAnchors])
And here's an extension with renderer(_:didAdd:for:) instance method:
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer,
didAdd node: SCNNode,
for anchor: ARAnchor) {
guard let imageAnchor = anchor as? ARImageAnchor,
let imageName = imageAnchor.referenceImage.name
else { return }
let geometryNode = nodeGetter(name: imageName)
node.addChildNode(geometryNode)
}
func nodeGetter(name: String) -> SCNNode {
var node = SCNNode()
switch name {
case "geometry_01": node = modelOne
case "geometry_02": node = modelTwo
case "geometry_03": node = modelThree
case "geometry_04": node = modelFour
case "geometry_05": node = modelFive
case "geometry_06": node = modelSix
case "geometry_07": node = modelSeven
case "geometry_08": node = modelEight
default: break
}
return node
}
}

Repost Swift: Displaying the vertices label properly on AR Face mesh [duplicate]

I am trying to generate the face mesh from the AR face tutorial with proper vertices label using SCNText. I follow the online tutorial and have the following:
enterextension EmojiBlingViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer,didUpdate node: SCNNode,for anchor: ARAnchor) {
guard let faceAnchor = anchor as? ARFaceAnchor,
let faceGeometry = node.geometry as? ARSCNFaceGeometry else {
return
}
faceGeometry.update(from: faceAnchor.geometry)
}
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
guard let device = sceneView.device else {
return nil
}
let faceGeometry = ARSCNFaceGeometry(device: device)
let node = SCNNode(geometry: faceGeometry)
for x in [1076, 1070, 1163] {
let text = SCNText(string: "\(x)", extrusionDepth: 1)
let txtnode = SCNNode(geometry: text)
txtnode.scale = SCNVector3(x: 0.001, y: 0.001, z: 0.001)
txtnode.name = "\(x)"
node.addChildNode(txtnode)
txtnode.geometry?.firstMaterial?.fillMode = .fill
}
node.geometry?.firstMaterial?.fillMode = .lines
return node
}
}
However, those vertices (1076, 1070, 1163) not show properly- they are overlapped in the centre.
So my question is, does anyone would know how to show (x in [1076, 1070, 1163]) properly in the correct location on the mesh?
Thanks!
Updates: I saw the numbers overlapped and they moved along with the front camera..

Unable to draw boundBox while detecting object with arkit2

I am developing iOS app which will highlight objects ( not specific one that we do with .arobject files) with outlined box in real world. Idea is to implement only boundBox drawing from this documentation / example.
Got some idea from this stackoverflow answer but still unable to draw outlined boundBox to scanned object.
// Declaration
let configuration = ARObjectScanningConfiguration()
let augmentedRealitySession = ARSession()
// viewWillAppear(_ animated: Bool)
configuration.planeDetection = .horizontal
sceneView.session.run(configuration, options: .resetTracking)
// renderer
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor) {
//print("\(self.detectionObjects.debugDescription)")
guard let objectAnchor = anchor as? ARObjectAnchor else { return }
//2. Create A Bounding Box Around Our Object
let scale = CGFloat(objectAnchor.referenceObject.scale.x)
let boundingBoxNode = BlackMirrorzBoundingBox(points: objectAnchor.referenceObject.rawFeaturePoints.points, scale: scale)
node.addChildNode(boundingBoxNode)
}
// BlackMirrorzBoundingBox class
init(points: [float3], scale: CGFloat, color: UIColor = .cyan) {
super.init()
var localMin = float3(repeating: Float.greatestFiniteMagnitude)
var localMax = float3(repeating: -Float.greatestFiniteMagnitude)
for point in points {
localMin = min(localMin, point)
localMax = max(localMax, point)
}
self.simdPosition += (localMax + localMin) / 2
let extent = localMax - localMin
let wireFrame = SCNNode()
let box = SCNBox(width: CGFloat(extent.x), height: CGFloat(extent.y), length: CGFloat(extent.z), chamferRadius: 0)
box.firstMaterial?.diffuse.contents = color
box.firstMaterial?.isDoubleSided = true
wireFrame.geometry = box
setupShaderOnGeometry(box)
self.addChildNode(wireFrame)
}
func setupShaderOnGeometry(_ geometry: SCNBox) {
guard let path = Bundle.main.path(forResource: "wireframe_shader", ofType: "metal", inDirectory: "art.scnassets"),
let shader = try? String(contentsOfFile: path, encoding: .utf8) else {
return
}
geometry.firstMaterial?.shaderModifiers = [.surface: shader]
}
With above logic i am getting box only on plane surface instead of outlined box as in this picture.
You are probably missing the wireframe_shader shader file.
Make sure you add it to the project into the art.scnassets, then try to reload the app.
You can find a similar shader in this repository, don't forget to change the name of the shader file or the resource name in the code.

Load .scn file as SCNNode LOSS animations

When I load Horse.scn to a SCNNode, the animations are lost.
func addHorse(for parentNode:SCNNode, postion:SCNVector3){
guard let virtualObjectScene = SCNScene(named: "Horse.scn") else {
return
}
let wrapperNode = SCNNode()
for child in virtualObjectScene.rootNode.childNodes {
child.geometry?.firstMaterial?.lightingModel = .physicallyBased
child.movabilityHint = .movable
wrapperNode.addChildNode(child)
}
wrapperNode.scale = SCNVector3(0.001,0.001,0.001)
wrapperNode.position = postion;
parentNode.addChildNode(wrapperNode)
}
If I load scene from Horse.dae file, everything is fine. Setting rootNode 's playing property to YES doesn't work.

Resources