Using ARKit, loading of 3D object is not anchored to the specific latitude longitude location - ios

I tried Apple ARGeotracking for loading 3D model which is of a house.I provided the latitude and the longitude location so as to load the object at that specific location. But the object is not anchored to the specific lat long location. Also the object tends to move as the user moves.
code for horizontal surface detection
// Create a new AR Scene View.
sceneView = ARSCNView(frame: view.bounds)
sceneView.delegate = self
// Show statistics such as fps and timing information
sceneView.showsStatistics = true
view.addSubview(sceneView)
//configure scene view session to detect horizontal planes
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = .horizontal
sceneView.session.run(configuration)
//uncomment to see feature points
sceneView.debugOptions = [ARSCNDebugOptions.showFeaturePoints]
// Responds to a user tap on the AR view.
#objc
func handleTapOnARView(_ sender: UITapGestureRecognizer) {
addGeoAnchor(at: projectLocation2D)
}
Adding ARGeoAnchor and providing latitude location location in params
//MARK:- ADD GEOANCHOR
func addGeoAnchor(at location: CLLocationCoordinate2D, altitude: CLLocationDistance? = nil) {
var geoAnchor: ARGeoAnchor!
if let altitude = altitude {
geoAnchor = ARGeoAnchor(coordinate: location, altitude: altitude)
} else {
geoAnchor = ARGeoAnchor(coordinate: location)
}
addGeoAnchor(geoAnchor)
}
func addGeoAnchor(_ geoAnchor: ARGeoAnchor) {
// Don't add a geo anchor if Core Location isn't sure yet where the user is.
guard isGeoTrackingLocalized else {
alertUser(withTitle: "Cannot add geo anchor", message: "Unable to add geo anchor because geo tracking has not yet localized.")
return
}
arView.session.add(anchor: geoAnchor)
}
var isGeoTrackingLocalized: Bool {
if let status = arView.session.currentFrame?.geoTrackingStatus, status.state == .localized {
return true
}
return false
}
func distanceFromDevice(_ coordinate: CLLocationCoordinate2D) -> Double {
if let devicePosition = locationManager.location?.coordinate {
return MKMapPoint(coordinate).distance(to: MKMapPoint(devicePosition))
} else {
return 0
}
}
// MARK: - ARSessionDelegate
func session(_ session: ARSession, didAdd anchors: [ARAnchor]) {
for geoAnchor in anchors.compactMap({ $0 as? ARGeoAnchor }) {
// Effect a spatial-based delay to avoid blocking the main thread.
DispatchQueue.main.asyncAfter(deadline: .now() + (distanceFromDevice(geoAnchor.coordinate) / 10)) {
// Add an AR placemark visualization for the geo anchor.
self.arView.scene.addAnchor(Entity.placemarkEntity(for: geoAnchor))
}
// Add a visualization for the geo anchor in the map view.
let anchorIndicator = AnchorIndicator(center: geoAnchor.coordinate)
// Remember the geo anchor we just added
let anchorInfo = GeoAnchorWithAssociatedData(geoAnchor: geoAnchor, mapOverlay: anchorIndicator)
self.geoAnchors.append(anchorInfo)
}
}
didAdd anchor method is called and here I have provided the url to load the model as I was using EchoAR sdk to load model into our app. The models loads but it is not anchored to the specific geographic location.Here the value of modelURL is coming fro echoAR sdk that I have used. It return model in .usdz format.
extension Entity {
static func placemarkEntity(for arAnchor: ARAnchor) -> AnchorEntity {
let placemarkAnchor = AnchorEntity(anchor: arAnchor)
if modelURL != nil{
let entity = (try? Entity.load(contentsOf: modelURL!))!
placemarkAnchor.addChild(entity)
}
else{
Utilities.sharedInstance.showError(message: "8C. ===Unable to render AR Model===")
}
return placemarkAnchor
}
}

Related

ARKit: Tracking VisonCoreML detected object

I'm new to iOS and I am currently refactoring a code I got from a tutorial on VisionCoreML and ARKit that adds a node to the detected object.
currently, if the I move the object the node does not move and follow the object. I can see from Apple's sample code for Recognizing Objects in Live Capture they use layers and repositions this each time Vision detects the object at a new position which is what I was hoping to replicate with an ARObject.
Is there a way I can achieve this with ARKit?
Any help around this would be greatly appreciated.
Thanks.
EDIT: Working code with solution
#IBOutlet var sceneView: ARSCNView!
private var viewportSize: CGSize!
private var previousAnchor: ARAnchor?
private var trackingNode: SCNNode!
lazy var objectDetectionRequest: VNCoreMLRequest = {
do {
let model = try VNCoreMLModel(for: yolov5s(configuration: MLModelConfiguration()).model)
let request = VNCoreMLRequest(model: model) { [weak self] request, error in
self?.processDetections(for: request, error: error)
}
return request
} catch {
fatalError("Failed to load Vision ML model.")
}
}()
func renderer(_ renderer: SCNSceneRenderer, willRenderScene scene: SCNScene, atTime time: TimeInterval) {
guard let capturedImage = sceneView.session.currentFrame?.capturedImage
else { return }
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: capturedImage, orientation: .leftMirrored, options: [:])
do {
try imageRequestHandler.perform([objectDetectionRequest])
} catch {
print("Failed to perform image request.")
}
}
func processDetections(for request: VNRequest, error: Error?) {
guard error == nil else {
print("Object detection error: \(error!.localizedDescription)")
return
}
guard let results = request.results else { return }
for observation in results where observation is VNRecognizedObjectObservation {
let objectObservation = observation as! VNRecognizedObjectObservation
let topLabelObservation = objectObservation.labels.first
print(topLabelObservation!.identifier + " " + "\(Int(topLabelObservation!.confidence * 100))%")
guard recognisedObject(topLabelObservation!.identifier) && topLabelObservation!.confidence > 0.9
else { continue }
let rect = VNImageRectForNormalizedRect(
objectObservation.boundingBox,
Int(self.sceneView.bounds.width),
Int(self.sceneView.bounds.height))
let midPoint = CGPoint(x: rect.midX, y: rect.midY)
let raycastQuery = self.sceneView.raycastQuery(from: midPoint,
allowing: .estimatedPlane,
alignment: .any)
let raycastArray = self.sceneView.session.raycast(raycastQuery!)
guard let raycastResult = raycastArray.first else { return }
let position = SCNVector3(raycastResult.worldTransform.columns.3.x,
raycastResult.worldTransform.columns.3.y,
raycastResult.worldTransform.columns.3.z)
if let _ = trackingNode {
trackingNode!.worldPosition = position
} else {
trackingNode = createNode()
trackingNode!.worldPosition = position
self.sceneView.scene.rootNode.addChildNode(trackingNode!)
}
}
}
private func recognisedObject(_ identifier: String) -> Bool {
return identifier == "remote" || identifier == "mouse"
}
private func createNode() -> SCNNode {
let sphereNode = SCNNode(geometry: SCNSphere(radius: 0.01))
sphereNode.geometry?.firstMaterial?.diffuse.contents = UIColor.purple
return sphereNode
}
private func loadSession() {
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = []
sceneView.session.run(configuration)
}
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
viewportSize = sceneView.frame.size
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
loadSession()
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
To be honest, the technologies you're using here cannot do that out of the box. YOLO (and any other object detection model you swapped out for it) have no built in concept of tracking the same object in a video. They look for objects in a 2D bitmap, and return 2D bounding boxes for them. As either the camera or object moves, and you pass in the next capturedImage buffer, it will give you a new bounding box in the correct position, but it has no way of knowing whether or not it's the same instance of the object detected in a previous frame.
To make this work, you'll need to do some post processing of those Vision results to determine whether or not it's the same object, and if so, manually move the anchor/mesh to match the new position. If you're confident there should only be one object in view at any given time, then it's pretty straightforward. If there will be multiple objects, you're venturing into complex (but still achievable) territory.
You could try to incorporate Vision Tracking, which might work though would depend on the nature and behavior of the tracked object.
Also, sceneView.hitTest() is deprecated. You should probably port that over to use ARSession.raycast()

Why is my programmatic screenshot capturing out of date view?

I have a map view that allows users to draw perimeter lines, and I need to capture a screenshot when they are done to save for recording purposes. For some reason, my code is capturing the view state before the new overlay is added, even though I add the overlay before attempting the screenshot.
I use a separate view to capture gesture, and then covert the points to an overlay and add it to the map here where points are the points gathered from the gesture tracking
func convertFragments() {
var coordinates: [CLLocationCoordinate2D] = []
for point in points {
let coordinate = mapView.convert(point, toCoordinateFrom: drawingView)
coordinates.append(coordinate)
}
let polyline = MKPolyline(coordinates: coordinates, count: coordinates.count)
polyline.title = selectedTool.name
removeLines(for: selectedTool)
points = []
mapView.addOverlay(polyline)
incidentManager.update(for: selectedTool, value: polyline)
}
Then incidentManager.update(value: polyline) makes a network call to save the information and calls a second method to capture the screenshot and update a log in log(eventType: eventType)
func update(for tool: MapTool, value: Any) {
func saveLine(_ line: MKPolyline, for key: String) {
incidentReference.setData([
key: IncidentManager.convertCLPoints(line.coordinates)
], merge: true)
}
switch tool {
case .hotZone, .innerPerimeter, .outerPerimeter:
let line = value as! MKPolyline
saveLine(line, for: tool.rawValue)
case .commandPost, .stagingArea:
let point = value as! CLLocationCoordinate2D
let geoPoint = GeoPoint(latitude: point.latitude, longitude: point.longitude)
incidentReference.setData([
tool.rawValue: geoPoint
], merge: true)
case .poi:
let annotation = value as! PerimeterMapAnnotation
let point = annotation.coordinate
let geoPoint = GeoPoint(latitude: point.latitude, longitude: point.longitude)
incidentReference.setData([
"pointsOfInterest": [annotation.title: geoPoint]
], merge: true)
default:
return
}
updateAddress(defaultValue: value)
let eventType = tool.eventType(didSet: true)
log(eventType: eventType)
}
I have an extension on UIView that captures the view, but for some reason, its not the most updated version of the map view.
private func log(eventType: LogEventType) {
guard let mapImage = mapView.asImage() else { return }
let storageRef = Storage.storage().reference()
let imageRef = storageRef.child("\(incident.id)/\(UUID().uuidString).jpg")
let eventTime = Date()
let eventTimeString = eventTime.apiDateString
incidentReference.collection("eventLog").document(eventTimeString).setData([
"logEventType": eventType.rawValue,
"imageReference": imageRef.fullPath,
"date": Timestamp(date: eventTime)
])
upload(image: mapImage, at: imageRef)
if shouldUpdateCover {
shouldUpdateCover = false
incidentReference.setData([
"coverPhotoRef": imageRef.fullPath
], merge: true)
}
}
func asImage() -> UIImage? {
UIGraphicsBeginImageContextWithOptions(self.bounds.size, self.isOpaque, 0.0)
defer { UIGraphicsEndImageContext() }
if let context = UIGraphicsGetCurrentContext() {
self.layer.render(in: context)
let image = UIGraphicsGetImageFromCurrentImageContext()
return image
}
return nil
}
I have tried many variations this "screenshot" method that I have found online, and no luck. When I debug, I can check the map and its overlays, and see that they are updating before hand.
Any ideas why the image captured here does not capture the changes made?

Using Google Places API to find nearby places BY TYPE (parks) for iOS SwiftUI

I'm pretty new to Swift/iOS app dev. so far, so I'm struggling to figure this out. Basically, I'm trying to make it so when the app opens (first screen is the map), it automatically finds nearby places that are only parks around the user's current location and have these locations annotated with markers on a map (Google Maps) using Google Places API for iOS using updated SwiftUI/Swift 5.0: Using no storyboards!. Table I types, in this case, parks: https://developers.google.com/places/web-service/supported_types
So far, this is the code I have... It uses GMSPlaceLikelihood of places nearby the user's location. This is more so an example of what I want to achieve, however using Google Places API nearby search instead so I can show only parks. Image of app running:
Image
(The place's found from nearby are then listed in a table as shown on the image. This list is just for show)
Thanks in advance for any advice/help.
GoogleMapsView.swift:
#ObservedObject var locationManager = LocationManager()
#ObservedObject var place = PlacesManager()
func makeUIView(context: Self.Context) -> GMSMapView {
let camera = GMSCameraPosition.camera(withLatitude: locationManager.latitude, longitude: locationManager.longitude, zoom: 14)
let mapView = GMSMapView.map(withFrame: CGRect.zero, camera: camera)
mapView.isMyLocationEnabled = true
mapView.settings.rotateGestures = false
mapView.settings.tiltGestures = false
mapView.isIndoorEnabled = false
mapView.isTrafficEnabled = false
mapView.isBuildingsEnabled = false
mapView.settings.myLocationButton = true
place.currentPlacesList(completion: { placeLikelihoodList in
if let placeLikelihoodList = placeLikelihoodList {
print("total places: \(placeLikelihoodList.count)")
for likelihood in placeLikelihoodList {
let place = likelihood.place
let position = CLLocationCoordinate2D(latitude: place.coordinate.latitude, longitude: place.coordinate.longitude)
let marker = GMSMarker(position: position)
marker.title = place.name
marker.map = mapView
}
}
})
return mapView
}
func updateUIView(_ mapView: GMSMapView, context: Context) {
// let camera = GMSCameraPosition.camera(withLatitude: locationManager.latitude, longitude: locationManager.longitude, zoom: zoom)
// mapView.camera = camera
mapView.animate(toLocation: CLLocationCoordinate2D(latitude: locationManager.latitude, longitude: locationManager.longitude))
}
PlacesManager.swift:
class PlacesManager: NSObject, ObservableObject {
private var placesClient = GMSPlacesClient.shared()
#Published var places = [GMSPlaceLikelihood]()
override init() {
super.init()
currentPlacesList { (places) in
guard let places = places else {
return
}
self.places = places
}
}
func currentPlacesList(completion: #escaping (([GMSPlaceLikelihood]?) -> Void)) {
// Specify the place data types to return.
let fields: GMSPlaceField = GMSPlaceField(rawValue: UInt(GMSPlaceField.name.rawValue) |
UInt(GMSPlaceField.placeID.rawValue) | UInt(GMSPlaceField.types.rawValue) | UInt(GMSPlaceField.coordinate.rawValue))!
placesClient.findPlaceLikelihoodsFromCurrentLocation(withPlaceFields: fields, callback: {
(placeLikelihoodList: Array<GMSPlaceLikelihood>?, error: Error?) in
if let error = error {
print("An error occurred: \(error.localizedDescription)")
return
}
if let placeLikelihoodList = placeLikelihoodList {
for likelihood in placeLikelihoodList {
let place = likelihood.place
}
completion(placeLikelihoodList)
}
})
}
ContentView.swift:
var body: some View {
VStack {
GoogleMapsView()
.edgesIgnoringSafeArea(.top)
.frame(height: 400)
PlacesList()
}
.offset(y: 100)
}
https://developers.google.com/places/web-service/search
I suggest you visit this page, it describes the API in detail and shows examples how to call it with different parameters.
After you get the response (I suggest getting it in json format and using SwiftyJSON cocoa pod to parse it) populate the table.

Mapbox Navigation in iOS with in my mapView controller

I want to integrate Mapbox navigation in iOS, I can easily get the direction/route between two coordinate also to get the navigation path from mapbox we can use below code
let options = NavigationOptions(styles: nil)
let viewController = NavigationViewController(for: self.directionsRoute!)
viewController.delegate=self
self.present(viewController, animated: true, completion: nil)
But the problem is I want to display the navigation in my mapview which is a part of another view controller, I can do that by getting a direction/route and instruction but I can't find any method which will be called every second so that I can update route instruction, as well as route, in case of user change the path.
Let me know if I am missing anything or any changes needed.
-Thanks in advance
here is my approach:
first i did get only directions instructions from the MapBox api taking advantage of it's free API calls quota and draw the instructions on GMSMapView or MapKit taking advantage of their good performance and memory management.
podfile
pod 'MapboxDirections.swift'
import MapboxDirections
this is done through the below code
have the property for MapBox directions
#IBOutlet weak var googleMapView: GMSMapView!
let locationManager = CLLocationManager()
let mapBoxirections = Directions(accessToken: osmToken)
var path: GMSMutablePath?
then do the actual api call
private func drawRouteBetween(source: StopModel, destination: StopModel) {
guard let name = source.name, let lat = source.latitude, let lng = source.longitude else { return }
guard let nameDest = destination.name, let latDest = destination.latitude, let lngDest = destination.longitude else { return }
let waypoints = [
Waypoint(coordinate: CLLocationCoordinate2D(latitude: lat, longitude: lng), name: name),
Waypoint(coordinate: CLLocationCoordinate2D(latitude: latDest, longitude: lngDest), name: nameDest),
]
let options = RouteOptions(waypoints: waypoints, profileIdentifier: .automobile)
options.includesSteps = true
options.distanceMeasurementSystem = .metric
mapBoxirections.calculate(options) { (waypoints, routes, error) in
guard error == nil else {
print("Error calculating directions: \(error!)")
return
}
if let route = routes?.first, let leg = route.legs.first {
for step in leg.steps {
if let coordinates = step.coordinates {
for (index, point) in coordinates.enumerated() {
let source = point
if index <= coordinates.count - 2 {
let destination = coordinates[index + 1]
self.drawPolyLine(source: source, destination: destination)
}
}
}
}
}
}
}
note that StopModel is my custom made CLLocation so feel free to replace it with your own as long it has the latitude and longitude
create the method that draws Polyline on your CLLocationManagerDelegate as below
private func drawPolyLine(source: CLLocationCoordinate2D, destination: CLLocationCoordinate2D){
path?.add(source)
path?.add(destination)
let polyLine = GMSPolyline(path: path)
polyLine.strokeWidth = 4 // width of your choice
polyLine.strokeColor = .red // color of your choice
polyLine.map = googleMapView
}
then take a look at the MapBoxDirections.Route model and explore it's properties you will find very useful info inside it
and then take advantage of the callback function from the GMS Delegate that notifies you with the location update instead having a timer and calling it every second this is more efficient way
func locationManager(_ manager: CLLocationManager, didUpdateLocations locations: [CLLocation]) {
/* do your business here */
}
do not forget to have the delegate of the location manager to self or the class of your choice
Maybe this helps a bit: you can easily add observer for route progress changes:
NotificationCenter.default.addObserver(self,
selector: #selector(progressDidChange(notification:)),
name: .routeControllerProgressDidChange,
object: navigationService.router)
You need a navigation service with your route by creating it like
let navigationService = MapboxNavigationService(route: route)
The function progressDidChange can do something like:
#objc func progressDidChange(notification: NSNotification) {
guard let routeProgress = notification.userInfo?[RouteControllerNotificationUserInfoKey.routeProgressKey] as? RouteProgress,
let location = notification.userInfo?[RouteControllerNotificationUserInfoKey.locationKey] as? CLLocation else {
return
}
// you have all information you probably need in routeProgress, f.E.
let secondsRemaining = routeProgress.currentLegProgress.currentStepProgress.durationRemaining
...
}

Mapkit, how to change annotation coordinates to nearest address?

I have a navigation application I am working on, and one use of it is that it can calculate the average of all the annotations coordinates placed by the user(through a search table, and each annotation is placed when they press a result) and find what you might call a middle point, in between all the annotations. This midpoint, however, only goes by coordinates at the moment, meaning that depending on where the users current annotations are, this mid point could wind up in the middle of a lake or a forest, which is not helpful. I want it to find the nearest address to the coordinates of my middle point, and redirect the annotation to there instead. Here's how the annotation is created:
#IBAction func middleFinderButton(_ sender: Any) {
let totalLatitude = mapView.annotations.reduce(0) { $0 + $1.coordinate.latitude }
let totalLongitude = mapView.annotations.reduce(0) { $0 + $1.coordinate.longitude }
let averageLatitude = totalLatitude/Double(mapView.annotations.count)
let averageLongitude = totalLongitude/Double(mapView.annotations.count)
let centerPoint = MKPointAnnotation()
centerPoint.coordinate.latitude = averageLatitude
centerPoint.coordinate.longitude = averageLongitude
mapView.addAnnotation(centerPoint)
}
How can I get this annotation 'centerPoint' to adjust to the nearest address? Thanks.
I would just use a reverse geocode here returning an MKPlacemark. The documentation suggests that normally just one placemark will be returned by the completion handler, on the main thread, so you can use the result straightaway to update the UI. MKPlacemark conforms to the annotation protocol so you can put it directly on the map:
func resolveAddress(for averageCoordinate: CLLocationCoordinate2D, completion: #escaping (MKPlacemark?) -> () ) {
let geocoder = CLGeocoder()
let averageLocation = CLLocation(latitude: averageCoordinate.latitude, longitude: averageCoordinate.longitude)
geocoder.reverseGeocodeLocation(averageLocation) { (placemarks, error) in
guard error == nil,
let placemark = placemarks?.first
else {
completion(nil)
return
}
completion(MKPlacemark(placemark: placemark))
}
}
#IBAction func middleFinderButton(_ sender: Any) {
// your code to find center annotation
resolveAddress(for: centerPoint.coordinate) { placemark in
if let placemark = placemark {
self.mapView.addAnnotation(placemark)
} else {
self.mapView.addAnnotation(centerCoordinate)
}
}

Resources