Saving and Loading ARWorldMap in RealityKit - augmented-reality

I am using SwiftUI and RealityKit to save my world map, along with one entity (box) that I will place in the world.
Inside my Coordinator I have the following code to create a box and add it to the anchor entity.
#objc func onTap(_ recognizer: UITapGestureRecognizer) {
guard let arView = arView else {
return
}
let location = recognizer.location(in: arView)
let results = arView.raycast(from: location, allowing: .estimatedPlane, alignment: .horizontal)
if let result = results.first {
let arAnchor = ARAnchor(name: "boxAnchor", transform: result.worldTransform)
let anchorEntity = AnchorEntity(anchor: arAnchor)
let box = ModelEntity(mesh: MeshResource.generateBox(size: 0.3), materials: [SimpleMaterial(color: .green, isMetallic: true)])
arView.session.add(anchor: arAnchor)
anchorEntity.addChild(box)
arView.scene.addAnchor(anchorEntity)
}
}
When I click Save button it saves the worldMap
func saveWorldMap() {
guard let arView = arView else {
return
}
arView.session.getCurrentWorldMap { worldMap, error in
if let error = error {
print(error)
return
}
if let worldMap = worldMap {
guard let data = try? NSKeyedArchiver.archivedData(withRootObject: worldMap, requiringSecureCoding: true) else {
return
}
// save the data into user defaults
let userDefaults = UserDefaults.standard
userDefaults.set(data, forKey: "worldMap")
userDefaults.synchronize()
}
}
}
And finally, the loadWorldMap function is supposed to load the map with the anchors and the entities attached to the anchors. Unfortunately, it does load the ARAnchors but there are no entities attached to them. The main reason being that I attached the entity to the AnchorEntity and not ARAnchor. How can I save an entity like a box attached to the ARAnchor.
func loadWorldMap() {
guard let arView = arView else {
return
}
let userDefaults = UserDefaults.standard
if let data = userDefaults.data(forKey: "worldMap") {
print(data)
print("loading world map")
guard let worldMap = try? NSKeyedUnarchiver.unarchivedObject(ofClass: ARWorldMap.self, from: data) else {
return
}
for anchor in worldMap.anchors {
print(anchor.name)
}
let configuration = ARWorldTrackingConfiguration()
configuration.initialWorldMap = worldMap
configuration.planeDetection = .horizontal
arView.session.run(configuration)
}
}

Related

Text Recognition in Images in Swift

Im new to swift and I was trying to make an app that can parse the from on a screenshot. I have the following code so far, and I wasnt able to figure out proper way to call the recognize function in my ContentView, any help is appreciated:
`
struct ContentView: View {
#State var selectedItems: PhotosPickerItem?
#State var selectedPhotoData: Data?
func recogText(selData: Data?)
{
if let selData = selData, let image = UIImage(data: selData){
guard let cgImage = image.cgImage else {return}
let handler = VNImageRequestHandler(cgImage: cgImage)
let request = VNDetectTextRectanglesRequest { request, error in
guard let observations = request.results as? [VNRecognizedTextObservation],
error == nil else {return}
let text = observations.compactMap({
$0.topCandidates(1).first?.string
}).joined(separator: ", ")
print(text.count)
}
do {
try handler.perform([request])
}
catch {
print("Unable to perform the requests: \(error).")
}
}
}
var body: some View {
VStack{
//Icon
mainImage()
//Button
PhotosPicker(selection: $selectedItems, matching: .images) {
Label("Select a photo", systemImage: "photo")
}
.tint(.blue)
.controlSize(.large)
.buttonStyle(.borderedProminent)
.onChange(of: selectedItems) { newItem in
Task {
if let data = try? await newItem?.loadTransferable(type: Data.self) {
selectedPhotoData = data
let _ = recogText(selData: data)
}
}
}
}
}
}
`
Expected a print of the parsed text but no output was found
Hello here is an example function that might help you. Of course you have to replace the TestImage with yours. This might work for you and you need to import Vision
func recogText() {
let textRecognitionRequest = VNRecognizeTextRequest { (request, error) in
// Insert code to process the text recognition results here
guard let observations = request.results as? [VNRecognizedTextObservation] else { return }
for observation in observations {
let topCandidate = observation.topCandidates(1).first
if let recognizedText = topCandidate?.string {
print(recognizedText)
}
}
}
textRecognitionRequest.recognitionLevel = .accurate
let image = UIImage(named: "TestImage")
let imageRequestHandler = VNImageRequestHandler(cgImage: (image?.cgImage!)!, options: [:])
do {
try imageRequestHandler.perform([textRecognitionRequest])
} catch {
print(error)
}
}

How VNClassificationObservation identifier works in VNCoreMLRequest

Im new in Core ML Model implementation. Im working on example of ARKit with Core ML. I have created core ML model with set of images. There are two folder, one folder (named as 'James') in which I have 3 images of my friend with front, left and right angle. and other folder (named as 'Unknown') qhwew I have some random faces. I kept iteration maximum 20 and augmented data options I have selected only crop.
I have following code where I'm integrating that Model. My requirement is when Im scanning the face of friend it should display my friend name as I mentioned in the Core Model and if any other face or any random face scanned then it should display "Uknown" or "This is not me" something text.
With this code when Im scanning my friend face its not showing his name as James or when Im scanning any random face apart from my friend face then its not showing Unknown as text. Where Im exactly lacking? What is the issue?
let sceneView = ARSCNView(frame: UIScreen.main.bounds)
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
sceneView.showsStatistics = true
guard ARFaceTrackingConfiguration.isSupported else { return }
let configuration = ARFaceTrackingConfiguration()
configuration.isLightEstimationEnabled = true
sceneView.session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
view.addSubview(sceneView)
}
}
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
guard let device = sceneView.device else {
return nil
}
let faceGeometry = ARSCNFaceGeometry(device: device)
let node = SCNNode(geometry: faceGeometry)
node.geometry?.firstMaterial?.fillMode = .lines
return node
}
func renderer(_ renderer: SCNSceneRenderer, didUpdate node: SCNNode, for anchor: ARAnchor) {
guard let faceAnchor = anchor as? ARFaceAnchor,
let faceGeometry = node.geometry as? ARSCNFaceGeometry else {
return
}
faceGeometry.update(from: faceAnchor.geometry)
let text = SCNText(string: "", extrusionDepth: 2)
let font = UIFont(name: "Avenir-Heavy", size: 20)
text.font = font
let material = SCNMaterial()
material.diffuse.contents = UIColor.green
text.materials = [material]
text.firstMaterial?.isDoubleSided = true
let textNode = SCNNode(geometry: faceGeometry)
textNode.position = SCNVector3(-0.1, -0.1, -0.5)
print(textNode.position)
textNode.scale = SCNVector3(0.002, 0.002, 0.002)
textNode.geometry = text
guard let model = try? VNCoreMLModel(for: FaceRecognitionPerson_1().model) else {
fatalError("Unable to load model")
}
let coreMlRequest = VNCoreMLRequest(model: model) {[weak self] request, error in
guard let results = request.results as? [VNClassificationObservation],
let topResult = results.first
else {
fatalError("Unexpected results")
}
DispatchQueue.main.async {[weak self] in
print("Identifier Received //: ===> \(topResult.identifier)")
text.string = topResult.identifier
if topResult.identifier != "James" {
print("**===Known User Detected**===")
}
self!.sceneView.scene.rootNode.addChildNode(textNode)
self!.sceneView.autoenablesDefaultLighting = true
}
}
guard let pixelBuffer = self.sceneView.session.currentFrame?.capturedImage else { return }
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:])
DispatchQueue.global().async {
do {
try handler.perform([coreMlRequest])
} catch {
print(error)
}
}
}

Unable to save pdf after signature

I am able to sign a pdf, But when I tried to saved it, Its not saving the updated pdf file.
Code
override func viewDidAppear(_ animated: Bool) {
pdfContainerView.usePageViewController(true, withViewOptions: nil)
guard let signatureImage = signatureImage, let page = pdfContainerView.currentPage else { return }
let pageBounds = page.bounds(for: .cropBox)
let imageBounds = CGRect(x: pageBounds.midX, y: pageBounds.midY, width: 200, height: 100)
let imageStamp = ImageStampAnnotation(with: signatureImage, forBounds: imageBounds, withProperties: nil)
page.addAnnotation(imageStamp)
btnSaveSign.isHidden = false
}
func setupPdfView(url:String) {
if let documentURL = URL(string: url),
let data = try? Data(contentsOf: documentURL),
let document = PDFDocument(data: data) {
// Set document to the view, center it, and set background color
//pdfContainerView.displayMode = .singlePageContinuous
pdfContainerView.document = document
pdfContainerView.autoScales = true
pdfContainerView.displayDirection = .horizontal
pdfContainerView.backgroundColor = UIColor.white
btnNewSign.isHidden = false
lblPlaceholder.isHidden = true
let panAnnotationGesture = UIPanGestureRecognizer(target: self, action: #selector(didPanAnnotation(sender:)))
pdfContainerView.addGestureRecognizer(panAnnotationGesture)
}
}
#objc func didPanAnnotation(sender: UIPanGestureRecognizer) {
let touchLocation = sender.location(in: pdfContainerView)
guard let page = pdfContainerView.page(for: touchLocation, nearest: true)
else {
return
}
let locationOnPage = pdfContainerView.convert(touchLocation, to: page)
switch sender.state {
case .began:
guard let annotation = page.annotation(at: locationOnPage) else {
return
}
if annotation.isKind(of: ImageStampAnnotation.self) {
currentlySelectedAnnotation = annotation
}
case .changed:
guard let annotation = currentlySelectedAnnotation else {
return
}
let initialBounds = annotation.bounds
// Set the center of the annotation to the spot of our finger
annotation.bounds = CGRect(x: locationOnPage.x - (initialBounds.width / 2), y: locationOnPage.y - (initialBounds.height / 2), width: initialBounds.width, height: initialBounds.height)
//print("move to \(locationOnPage)")
case .ended, .cancelled, .failed:
currentlySelectedAnnotation = nil
default:
break
}
}
#objc func clickButton(){
let importMenu = UIDocumentPickerViewController(documentTypes: [String(kUTTypePDF)], in: .import)
importMenu.delegate = self
importMenu.modalPresentationStyle = .formSheet
self.present(importMenu, animated: true, completion: nil)
}
public func documentPicker(_ controller: UIDocumentPickerViewController, didPickDocumentsAt urls: [URL]) {
guard let myURL = urls.first else {
return
}
print("import result : \(myURL)")
self.pdfURLToSave = "\(myURL)"
let pdfView = PDFView(frame: view.frame)
title = "My PDF Viewer"
setupPdfView(url:"\(myURL)")
}
func documentInteractionControllerViewControllerForPreview(_ controller: UIDocumentInteractionController) -> UIViewController {
return self//or use return self.navigationController for fetching app navigation bar colour
}
public func documentMenu(_ documentMenu:UIDocumentPickerViewController, didPickDocumentPicker documentPicker: UIDocumentPickerViewController) {
documentPicker.delegate = self
present(documentPicker, animated: true, completion: nil)
}
Code snippet for save file-
#IBAction func btnSaveAction(_ sender: Any) {
if let documentURL = URL(string: self.pdfURLToSave),
let data = try? Data(contentsOf: documentURL),
let document = PDFDocument(data: data) {
if let data = document.dataRepresentation(){
let paths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let documentsDirectory = paths[0] // Get documents folder
let folderPathUrl = URL(fileURLWithPath: documentsDirectory).appendingPathComponent("SignApp.pdf")
if FileManager.default.fileExists(atPath: folderPathUrl.path){
try? FileManager.default.removeItem(at: folderPathUrl)
}
pdfContainerView.document?.write(toFile: "\(folderPathUrl)")
}
}
}
ImageStampAnnotation Class
class ImageStampAnnotation: PDFAnnotation {
var image: UIImage!
// A custom init that sets the type to Stamp on default and assigns our Image variable
init(with image: UIImage!, forBounds bounds: CGRect, withProperties properties: [AnyHashable : Any]?) {
super.init(bounds: bounds, forType: PDFAnnotationSubtype.stamp, withProperties: properties)
self.image = image
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func draw(with box: PDFDisplayBox, in context: CGContext) {
// Get the CGImage of our image
guard let cgImage = self.image.cgImage else { return }
// Draw our CGImage in the context of our PDFAnnotation bounds
context.draw(cgImage, in: self.bounds)
}
}
Error-
CGPDFContextCreate: failed to create PDF context delegate..
App screen shots-
It's not possible for me to edit your code directly to show a proper solution, since your code isn't complete, but here is a short coordinator class, including your pan annotation code that saves an updated file each time a pan gesture is completed:
class Coordinator : NSObject {
var pdfView : PDFView?
var currentlySelectedAnnotation : PDFAnnotation?
func getDocumentsDirectory() -> URL {
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let documentsDirectory = paths[0]
return documentsDirectory
}
func save() {
guard let pdfView = pdfView else {
return
}
let url = getDocumentsDirectory().appendingPathComponent("SavedPDF.pdf")
pdfView.document?.write(to: url)
print("Wrote pdf to: ", url)
}
#objc func didPanAnnotation(sender: UIPanGestureRecognizer) {
guard let pdfContainerView = pdfView else {
fatalError()
}
let touchLocation = sender.location(in: pdfContainerView)
guard let page = pdfContainerView.page(for: touchLocation, nearest: true)
else {
return
}
let locationOnPage = pdfContainerView.convert(touchLocation, to: page)
switch sender.state {
case .began:
guard let annotation = page.annotation(at: locationOnPage) else {
return
}
print("Set")
if annotation.isKind(of: ImageStampAnnotation.self) {
currentlySelectedAnnotation = annotation
}
case .changed:
guard let annotation = currentlySelectedAnnotation else {
return
}
let initialBounds = annotation.bounds
// Set the center of the annotation to the spot of our finger
annotation.bounds = CGRect(x: locationOnPage.x - (initialBounds.width / 2), y: locationOnPage.y - (initialBounds.height / 2), width: initialBounds.width, height: initialBounds.height)
//print("move to \(locationOnPage)")
case .ended, .cancelled, .failed:
currentlySelectedAnnotation = nil
save()
default:
break
}
}
}
For context, if it helps, this is how I set up the view controller and the PDFView initially:
let vc = UIViewController()
let pdfContainerView = PDFView()
let fileUrl = Bundle.main.url(forResource: "High Noon - SCORE", withExtension: "pdf")!
let pdfDocument = PDFDocument(url: fileUrl)
pdfContainerView.document = pdfDocument
pdfContainerView.autoScales = true
vc.view.addSubview(pdfContainerView)
let panAnnotationGesture = UIPanGestureRecognizer(target: context.coordinator, action: #selector(context.coordinator.didPanAnnotation(sender:)))
pdfContainerView.addGestureRecognizer(panAnnotationGesture)
let page = pdfContainerView.currentPage!
let pageBounds = page.bounds(for: .cropBox)
let imageBounds = CGRect(x: pageBounds.midX, y: pageBounds.midY, width: 200, height: 100)
let imageStamp = ImageStampAnnotation(with: UIImage(systemName: "pencil"), forBounds: imageBounds, withProperties: nil)
page.addAnnotation(imageStamp)

How to make a draggable UIView snap to the corners over the screen?

I have a draggable UIView and I am trying to make it snap to four corners of the screen. I tried a few things, but none of them have worked. Here's the code that I have:
import UIKit
import AVKit
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var crystalName: UILabel!
#IBOutlet weak var crystalInfoContainer: UIView!
#IBOutlet weak var accuracy: UILabel!
var model = IdenticrystClassification().model
override func viewDidLoad() {
super.viewDidLoad()
// This method starts the camera.
let captureSession = AVCaptureSession()
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
// This method defines sub view and defines it's properties.
view.addSubview(crystalInfoContainer)
crystalInfoContainer.clipsToBounds = true
crystalInfoContainer.layer.cornerRadius = 10.0
//crystalInfoContainer.layer.maskedCorners = [.layerMinXMinYCorner, .layerMaxXMinYCorner]
// This method defines torch functionality.
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
// This is the code that I am trying to work out.
func relativeVelocity(forVelocity velocity: CGFloat, from currentValue: CGFloat, to targetValue: CGFloat) -> CGFloat {
guard currentValue - targetValue != 0 else { return 0 }
return velocity / (targetValue - currentValue)
}
func nearestCorner(to point: CGPoint) -> CGPoint {
var minDistance = CGFloat.greatestFiniteMagnitude
var closestPosition = CGPoint.zero
for position in crystalInfoContainer { **Error1**
let distance = point.distance(to: position)
if distance < minDistance {
closestPosition = position
minDistance = distance
}
}
return closestPosition
let decelerationRate = UIScrollView.DecelerationRate.normal.rawValue
let velocity = UIPanGestureRecognizer.velocity(in: view)**Error2**
let projectedPosition = CGPoint(
x: crystalInfoContainer.center.x + project(initialVelocity: velocity.x, decelerationRate: decelerationRate),
y: crystalInfoContainer.center.y + project(initialVelocity: velocity.y, decelerationRate: decelerationRate)
)
let nearestCornerPosition = nearestCorner(to: projectedPosition)
let relativeInitialVelocity = CGVector(
dx: relativeVelocity(forVelocity: velocity.x, from: crystalInfoContainer.center.x, to: nearestCornerPosition.x),
dy: relativeVelocity(forVelocity: velocity.y, from: crystalInfoContainer.center.y, to: nearestCornerPosition.y)
)
let params = UISpringTimingParameters(damping: 1, response: 0.4, initialVelocity: relativeInitialVelocity)
let animator = UIViewPropertyAnimator(duration: 0, timingParameters: params)
animator.addAnimations {
self.crystalInfoContainer.center = nearestCornerPosition
}
animator.startAnimation()
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "video"))
captureSession.addOutput(dataOutput)
toggleTorch(on: true)
}
// Handles Visiout output.
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let model = try? VNCoreMLModel(for: model) else { return }
let request = VNCoreMLRequest(model: model)
{ (finishedReq, err) in
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
let name: String = firstObservation.identifier
let acc: Int = Int(firstObservation.confidence * 100)
DispatchQueue.main.async {
self.crystalName.text = name
self.accuracy.text = "Confidence: \(acc)%"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
override var prefersStatusBarHidden: Bool {
return true
}
}
Error1: For-in loop requires 'UIView?' to conform to 'Sequence'; did you mean to unwrap optional?
Error2: Instance member 'velocity' cannot be used on type 'UIPanGestureRecognizer'; did you mean to use a value of this type instead?
The problem is that your panView method is wrong. You need to switch on the gesture recognizer’s state — began, changed, or ended. Pan only when the gesture changes. When the gesture ends, then and only then, animate the view into the nearest corner.

How to get around 'unarchiveObject(with:)' was deprecated in iOS 12.0 when setting an HKQueryAnchor?

I was using the code below to set an HKQueryAnchor when making a HKAnchoredObjectQuery however 'unarchiveObject(with:)' has been deprecated and I can't figure out how to write it with the new API?
private func getAnchor() -> HKQueryAnchor? {
let encoded = UserDefaults.standard.data(forKey: AnchorKey)
if(encoded == nil){
return nil
}
let anchor = NSKeyedUnarchiver.unarchiveObject(with: encoded!) as? HKQueryAnchor
return anchor
}
private func saveAnchor(anchor : HKQueryAnchor) {
let encoded = NSKeyedArchiver.archivedData(withRootObject: anchor)
defaults.setValue(encoded, forKey: AnchorKey)
defaults.synchronize()
}
This is what I came up with based on Martin R's link, look ok?
private func getAnchor() -> HKQueryAnchor? {
let encoded = UserDefaults.standard.data(forKey: AnchorKey)
guard let unwrappedEncoded = encoded else { return nil }
guard let anchor = try? NSKeyedUnarchiver.unarchiveTopLevelObjectWithData(unwrappedEncoded as Data) as? HKQueryAnchor
else {
return nil
}
return anchor
}
private func saveAnchor(anchor : HKQueryAnchor) {
do {
let encoded = try NSKeyedArchiver.archivedData(withRootObject: anchor, requiringSecureCoding: false)
defaults.setValue(encoded, forKey: AnchorKey)
defaults.synchronize()
} catch {
return
}
}
Try using JSONDecoder and JSONEncoder to get the data to and from HKQueryAnchor instance, i.e.
private func getAnchor() -> HKQueryAnchor? {
guard let encoded = UserDefaults.standard.data(forKey: AnchorKey) else {
return nil
}
let anchor = try? JSONDecoder().decode(HKQueryAnchor.self, from: encoded)
return anchor
}
private func saveAnchor(anchor : HKQueryAnchor) {
if let encoded = try? JSONEncoder().encode(anchor) {
defaults.setValue(encoded, forKey: AnchorKey)
defaults.synchronize()
}
}

Resources