in my project I'm using ARKit for detecting a specific image, and when he is detected, the app show me the information. If I've already scan the image, and I want to rescan it for seeing the information, it doesn't work. This is the code that I used for the image recognition:
sceneView.delegate = self
sceneView.showsFPS = true
sceneView.showsNodeCount = true
if let scene = SKScene(fileNamed: "Scene") {
sceneView.presentScene(scene)
}
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "image", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
let configuration = ARWorldTrackingConfiguration()
configuration.detectionImages = referenceImages
sceneView.session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
}
// MARK: - ARSKViewDelegate
func view(_ view: ARSKView, nodeFor anchor: ARAnchor) -> SKNode? {
if let imageAnchor = anchor as? ARImageAnchor,
let referenceImageName = imageAnchor.referenceImage.name,
let scannedImage = self.images[referenceImageName] {
self.selectedImage = scannedImage
self.performSegue(withIdentifier: "showImageInformation", sender: self)
}
return nil
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showImageInformation"{
if let imageInformationVC = segue.destination as? ImageInformationViewController,
let actualSelectedImage = selectedImage {
imageInformationVC.imageInformation = actualSelectedImage
}
}
}
The only way is to reset your current session.
Example:
func resetExperience(session: ARSession, configuration: ARWorldTrackingConfiguration) {
guard let referenceImages = ARReferenceImage.referenceImages(inGroupNamed: "image", bundle: nil) else {
fatalError("Missing expected asset catalog resources.")
}
configuration.detectionImages = referenceImages
session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
}
And some general info: ARWorldTrackingConfiguration.
Hope it helps!
Related
Im new in Core ML Model implementation. Im working on example of ARKit with Core ML. I have created core ML model with set of images. There are two folder, one folder (named as 'James') in which I have 3 images of my friend with front, left and right angle. and other folder (named as 'Unknown') qhwew I have some random faces. I kept iteration maximum 20 and augmented data options I have selected only crop.
I have following code where I'm integrating that Model. My requirement is when Im scanning the face of friend it should display my friend name as I mentioned in the Core Model and if any other face or any random face scanned then it should display "Uknown" or "This is not me" something text.
With this code when Im scanning my friend face its not showing his name as James or when Im scanning any random face apart from my friend face then its not showing Unknown as text. Where Im exactly lacking? What is the issue?
let sceneView = ARSCNView(frame: UIScreen.main.bounds)
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
sceneView.showsStatistics = true
guard ARFaceTrackingConfiguration.isSupported else { return }
let configuration = ARFaceTrackingConfiguration()
configuration.isLightEstimationEnabled = true
sceneView.session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
view.addSubview(sceneView)
}
}
extension ViewController: ARSCNViewDelegate {
func renderer(_ renderer: SCNSceneRenderer, nodeFor anchor: ARAnchor) -> SCNNode? {
guard let device = sceneView.device else {
return nil
}
let faceGeometry = ARSCNFaceGeometry(device: device)
let node = SCNNode(geometry: faceGeometry)
node.geometry?.firstMaterial?.fillMode = .lines
return node
}
func renderer(_ renderer: SCNSceneRenderer, didUpdate node: SCNNode, for anchor: ARAnchor) {
guard let faceAnchor = anchor as? ARFaceAnchor,
let faceGeometry = node.geometry as? ARSCNFaceGeometry else {
return
}
faceGeometry.update(from: faceAnchor.geometry)
let text = SCNText(string: "", extrusionDepth: 2)
let font = UIFont(name: "Avenir-Heavy", size: 20)
text.font = font
let material = SCNMaterial()
material.diffuse.contents = UIColor.green
text.materials = [material]
text.firstMaterial?.isDoubleSided = true
let textNode = SCNNode(geometry: faceGeometry)
textNode.position = SCNVector3(-0.1, -0.1, -0.5)
print(textNode.position)
textNode.scale = SCNVector3(0.002, 0.002, 0.002)
textNode.geometry = text
guard let model = try? VNCoreMLModel(for: FaceRecognitionPerson_1().model) else {
fatalError("Unable to load model")
}
let coreMlRequest = VNCoreMLRequest(model: model) {[weak self] request, error in
guard let results = request.results as? [VNClassificationObservation],
let topResult = results.first
else {
fatalError("Unexpected results")
}
DispatchQueue.main.async {[weak self] in
print("Identifier Received //: ===> \(topResult.identifier)")
text.string = topResult.identifier
if topResult.identifier != "James" {
print("**===Known User Detected**===")
}
self!.sceneView.scene.rootNode.addChildNode(textNode)
self!.sceneView.autoenablesDefaultLighting = true
}
}
guard let pixelBuffer = self.sceneView.session.currentFrame?.capturedImage else { return }
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:])
DispatchQueue.global().async {
do {
try handler.perform([coreMlRequest])
} catch {
print(error)
}
}
}
I am using SwiftUI and RealityKit to save my world map, along with one entity (box) that I will place in the world.
Inside my Coordinator I have the following code to create a box and add it to the anchor entity.
#objc func onTap(_ recognizer: UITapGestureRecognizer) {
guard let arView = arView else {
return
}
let location = recognizer.location(in: arView)
let results = arView.raycast(from: location, allowing: .estimatedPlane, alignment: .horizontal)
if let result = results.first {
let arAnchor = ARAnchor(name: "boxAnchor", transform: result.worldTransform)
let anchorEntity = AnchorEntity(anchor: arAnchor)
let box = ModelEntity(mesh: MeshResource.generateBox(size: 0.3), materials: [SimpleMaterial(color: .green, isMetallic: true)])
arView.session.add(anchor: arAnchor)
anchorEntity.addChild(box)
arView.scene.addAnchor(anchorEntity)
}
}
When I click Save button it saves the worldMap
func saveWorldMap() {
guard let arView = arView else {
return
}
arView.session.getCurrentWorldMap { worldMap, error in
if let error = error {
print(error)
return
}
if let worldMap = worldMap {
guard let data = try? NSKeyedArchiver.archivedData(withRootObject: worldMap, requiringSecureCoding: true) else {
return
}
// save the data into user defaults
let userDefaults = UserDefaults.standard
userDefaults.set(data, forKey: "worldMap")
userDefaults.synchronize()
}
}
}
And finally, the loadWorldMap function is supposed to load the map with the anchors and the entities attached to the anchors. Unfortunately, it does load the ARAnchors but there are no entities attached to them. The main reason being that I attached the entity to the AnchorEntity and not ARAnchor. How can I save an entity like a box attached to the ARAnchor.
func loadWorldMap() {
guard let arView = arView else {
return
}
let userDefaults = UserDefaults.standard
if let data = userDefaults.data(forKey: "worldMap") {
print(data)
print("loading world map")
guard let worldMap = try? NSKeyedUnarchiver.unarchivedObject(ofClass: ARWorldMap.self, from: data) else {
return
}
for anchor in worldMap.anchors {
print(anchor.name)
}
let configuration = ARWorldTrackingConfiguration()
configuration.initialWorldMap = worldMap
configuration.planeDetection = .horizontal
arView.session.run(configuration)
}
}
I am able to sign a pdf, But when I tried to saved it, Its not saving the updated pdf file.
Code
override func viewDidAppear(_ animated: Bool) {
pdfContainerView.usePageViewController(true, withViewOptions: nil)
guard let signatureImage = signatureImage, let page = pdfContainerView.currentPage else { return }
let pageBounds = page.bounds(for: .cropBox)
let imageBounds = CGRect(x: pageBounds.midX, y: pageBounds.midY, width: 200, height: 100)
let imageStamp = ImageStampAnnotation(with: signatureImage, forBounds: imageBounds, withProperties: nil)
page.addAnnotation(imageStamp)
btnSaveSign.isHidden = false
}
func setupPdfView(url:String) {
if let documentURL = URL(string: url),
let data = try? Data(contentsOf: documentURL),
let document = PDFDocument(data: data) {
// Set document to the view, center it, and set background color
//pdfContainerView.displayMode = .singlePageContinuous
pdfContainerView.document = document
pdfContainerView.autoScales = true
pdfContainerView.displayDirection = .horizontal
pdfContainerView.backgroundColor = UIColor.white
btnNewSign.isHidden = false
lblPlaceholder.isHidden = true
let panAnnotationGesture = UIPanGestureRecognizer(target: self, action: #selector(didPanAnnotation(sender:)))
pdfContainerView.addGestureRecognizer(panAnnotationGesture)
}
}
#objc func didPanAnnotation(sender: UIPanGestureRecognizer) {
let touchLocation = sender.location(in: pdfContainerView)
guard let page = pdfContainerView.page(for: touchLocation, nearest: true)
else {
return
}
let locationOnPage = pdfContainerView.convert(touchLocation, to: page)
switch sender.state {
case .began:
guard let annotation = page.annotation(at: locationOnPage) else {
return
}
if annotation.isKind(of: ImageStampAnnotation.self) {
currentlySelectedAnnotation = annotation
}
case .changed:
guard let annotation = currentlySelectedAnnotation else {
return
}
let initialBounds = annotation.bounds
// Set the center of the annotation to the spot of our finger
annotation.bounds = CGRect(x: locationOnPage.x - (initialBounds.width / 2), y: locationOnPage.y - (initialBounds.height / 2), width: initialBounds.width, height: initialBounds.height)
//print("move to \(locationOnPage)")
case .ended, .cancelled, .failed:
currentlySelectedAnnotation = nil
default:
break
}
}
#objc func clickButton(){
let importMenu = UIDocumentPickerViewController(documentTypes: [String(kUTTypePDF)], in: .import)
importMenu.delegate = self
importMenu.modalPresentationStyle = .formSheet
self.present(importMenu, animated: true, completion: nil)
}
public func documentPicker(_ controller: UIDocumentPickerViewController, didPickDocumentsAt urls: [URL]) {
guard let myURL = urls.first else {
return
}
print("import result : \(myURL)")
self.pdfURLToSave = "\(myURL)"
let pdfView = PDFView(frame: view.frame)
title = "My PDF Viewer"
setupPdfView(url:"\(myURL)")
}
func documentInteractionControllerViewControllerForPreview(_ controller: UIDocumentInteractionController) -> UIViewController {
return self//or use return self.navigationController for fetching app navigation bar colour
}
public func documentMenu(_ documentMenu:UIDocumentPickerViewController, didPickDocumentPicker documentPicker: UIDocumentPickerViewController) {
documentPicker.delegate = self
present(documentPicker, animated: true, completion: nil)
}
Code snippet for save file-
#IBAction func btnSaveAction(_ sender: Any) {
if let documentURL = URL(string: self.pdfURLToSave),
let data = try? Data(contentsOf: documentURL),
let document = PDFDocument(data: data) {
if let data = document.dataRepresentation(){
let paths = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)
let documentsDirectory = paths[0] // Get documents folder
let folderPathUrl = URL(fileURLWithPath: documentsDirectory).appendingPathComponent("SignApp.pdf")
if FileManager.default.fileExists(atPath: folderPathUrl.path){
try? FileManager.default.removeItem(at: folderPathUrl)
}
pdfContainerView.document?.write(toFile: "\(folderPathUrl)")
}
}
}
ImageStampAnnotation Class
class ImageStampAnnotation: PDFAnnotation {
var image: UIImage!
// A custom init that sets the type to Stamp on default and assigns our Image variable
init(with image: UIImage!, forBounds bounds: CGRect, withProperties properties: [AnyHashable : Any]?) {
super.init(bounds: bounds, forType: PDFAnnotationSubtype.stamp, withProperties: properties)
self.image = image
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func draw(with box: PDFDisplayBox, in context: CGContext) {
// Get the CGImage of our image
guard let cgImage = self.image.cgImage else { return }
// Draw our CGImage in the context of our PDFAnnotation bounds
context.draw(cgImage, in: self.bounds)
}
}
Error-
CGPDFContextCreate: failed to create PDF context delegate..
App screen shots-
It's not possible for me to edit your code directly to show a proper solution, since your code isn't complete, but here is a short coordinator class, including your pan annotation code that saves an updated file each time a pan gesture is completed:
class Coordinator : NSObject {
var pdfView : PDFView?
var currentlySelectedAnnotation : PDFAnnotation?
func getDocumentsDirectory() -> URL {
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let documentsDirectory = paths[0]
return documentsDirectory
}
func save() {
guard let pdfView = pdfView else {
return
}
let url = getDocumentsDirectory().appendingPathComponent("SavedPDF.pdf")
pdfView.document?.write(to: url)
print("Wrote pdf to: ", url)
}
#objc func didPanAnnotation(sender: UIPanGestureRecognizer) {
guard let pdfContainerView = pdfView else {
fatalError()
}
let touchLocation = sender.location(in: pdfContainerView)
guard let page = pdfContainerView.page(for: touchLocation, nearest: true)
else {
return
}
let locationOnPage = pdfContainerView.convert(touchLocation, to: page)
switch sender.state {
case .began:
guard let annotation = page.annotation(at: locationOnPage) else {
return
}
print("Set")
if annotation.isKind(of: ImageStampAnnotation.self) {
currentlySelectedAnnotation = annotation
}
case .changed:
guard let annotation = currentlySelectedAnnotation else {
return
}
let initialBounds = annotation.bounds
// Set the center of the annotation to the spot of our finger
annotation.bounds = CGRect(x: locationOnPage.x - (initialBounds.width / 2), y: locationOnPage.y - (initialBounds.height / 2), width: initialBounds.width, height: initialBounds.height)
//print("move to \(locationOnPage)")
case .ended, .cancelled, .failed:
currentlySelectedAnnotation = nil
save()
default:
break
}
}
}
For context, if it helps, this is how I set up the view controller and the PDFView initially:
let vc = UIViewController()
let pdfContainerView = PDFView()
let fileUrl = Bundle.main.url(forResource: "High Noon - SCORE", withExtension: "pdf")!
let pdfDocument = PDFDocument(url: fileUrl)
pdfContainerView.document = pdfDocument
pdfContainerView.autoScales = true
vc.view.addSubview(pdfContainerView)
let panAnnotationGesture = UIPanGestureRecognizer(target: context.coordinator, action: #selector(context.coordinator.didPanAnnotation(sender:)))
pdfContainerView.addGestureRecognizer(panAnnotationGesture)
let page = pdfContainerView.currentPage!
let pageBounds = page.bounds(for: .cropBox)
let imageBounds = CGRect(x: pageBounds.midX, y: pageBounds.midY, width: 200, height: 100)
let imageStamp = ImageStampAnnotation(with: UIImage(systemName: "pencil"), forBounds: imageBounds, withProperties: nil)
page.addAnnotation(imageStamp)
I am trying to write a function that will allow a user to set a new profile image, new image will be uploaded and the old image will be removed from firebase storage.
I have two functions that will do this and they work individually, however if I run the upload after the delete function the new image will not upload, even though I get a success message in the console nothing appears in the storage. Ideally I would like to remove first and the set the new image, and I have tried doing this multiple ways; completion handlers, adding delays but nothing has worked. I now even have two buttons one controlling each function to test this but this is still not working. What am I missing?? Any help would be great as ive spent hours racking my brains with this!
Here is my complete code for the VC:
//
// LandingVC.swift
// Login
//
// Created by George Woolley on 07/11/2017.
// Copyright © 2017 George Woolley. All rights reserved.
//
import UIKit
import FBSDKLoginKit
import SwiftKeychainWrapper
import Firebase
class MyAccountVC: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
#IBOutlet weak var profilePictureImg: UIImageView!
#IBOutlet weak var usernameField: UILabel!
#IBOutlet weak var saveButton: UIButton!
#IBOutlet weak var changeProfilePicButton: UIButton!
let picker = UIImagePickerController()
let myUID = KeychainWrapper.standard.string(forKey: "uid")
override func viewDidLoad() {
super.viewDidLoad()
picker.delegate = self
if myUID == nil {
print("You are not logged in")
} else {
let ref = DataService.ds.DBCurrentUser
ref.child("MyDetails").observe(.value, with: { (snapshot) in
if let snapshots = snapshot.children.allObjects as? [DataSnapshot] {
for snap in snapshots {
if snap.key == "username" {
self.usernameField.text = snap.value as? String
}
if snap.key == "profileImageURL" {
if let url = snap.value as? String {
let ref = Storage.storage().reference(forURL: url)
ref.getData(maxSize: 2 * 1024 * 1024, completion: { (data, error) in
if error != nil {
print("An error has occured downloading image")
} else {
print("Image downloaded")
if let imageData = data {
if let img = UIImage(data: imageData) {
self.profilePictureImg.image = img
}
}
}
})
}
}
}
}
})
}
}
func removeImgFromFirebaseStorage() {
let ref = DataService.ds.DBCurrentUser.child("MyDetails")
ref.observe(.value) { (snapshot) in
if let snapshots = snapshot.children.allObjects as? [DataSnapshot] {
for snap in snapshots {
if snap.key == "profileImageURL" {
if let url = snap.value as? String {
let img = Storage.storage().reference(forURL: url)
img.delete(completion: { (error) in
if error != nil {
print("Error is \(String(describing: error))")
} else {
print("Success")
}
})
}
}
}
}
}
saveButton.isHidden = false
changeProfilePicButton.isHidden = true
}
func uploadImageToFirebase() {
if let imageToUpload = profilePictureImg.image {
if let imageData = UIImageJPEGRepresentation(imageToUpload, 0.2) {
let metaData = StorageMetadata()
metaData.contentType = "image/jpeg"
let imageUID = UUID().uuidString
DataService.ds.StorageProfile.child(imageUID).putData(imageData, metadata: metaData, completion: { (metadata, error) in
if error != nil {
print("Error occured uploading profile image")
} else {
print("Sucess")
if let downloadURL = metadata?.downloadURL()?.absoluteString {
DataService.ds.DBCurrentUser.child("MyDetails").child("profileImageURL").setValue(downloadURL)
}
}
})
}
}
saveButton.isHidden = true
changeProfilePicButton.isHidden = false
}
#IBAction func saveButonPressed(_ sender: Any) {
uploadImageToFirebase()
}
#IBAction func changeProfilePicturePressed(_ sender: Any) {
picker.allowsEditing = true
picker.sourceType = .photoLibrary
picker.mediaTypes = UIImagePickerController.availableMediaTypes(for: .photoLibrary)!
present(picker, animated: true, completion: nil)
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
let chosenImage = info[UIImagePickerControllerOriginalImage] as! UIImage
profilePictureImg.contentMode = .scaleAspectFill
profilePictureImg.image = chosenImage
dismiss(animated: true, completion: removeImgFromFirebaseStorage)
}
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
dismiss(animated: true, completion: nil)
}
#IBAction func logOffPressed(_ sender: Any) {
KeychainWrapper.standard.removeObject(forKey: "uid")
performSegue(withIdentifier: "loginVC", sender: nil)
let fbLogin = FBSDKLoginManager()
fbLogin.logOut()
try! Auth.auth().signOut()
}
}
I'm using GPUImage Framework for Image filters.
I have applied the ToonFilter() on camera for cartoon filter effect.
My issue is while capture image on camera with ToonFilter effect with GPUImage.
Below is the code to show the ToonFilter with GPUImage camera & to capture a new image with the same effect.
Please guide me how I may capture an image with the same filter?
Note : I am successfully able to open camera with ToonFilter effect - the issue is only with capture a new image with a filter effect.
Code :
import UIKit
import GPUImage
import AVFoundation
class SelfieFilterVC: UIViewController, UISplitViewControllerDelegate {
#IBOutlet var filterSlider: UISlider?
#IBOutlet var filterView: RenderView?
let videoCamera:Camera?
var blendImage:PictureInput?
override func viewDidLoad() {
super.viewDidLoad()
self.filterOperation = FilterOperation(
filter:{ToonFilter()},
listName:"Toon",
titleName:"Toon",
sliderConfiguration:.disabled,
sliderUpdateCallback: nil,
filterOperationType:.singleInput
)
self.configureView()
}
required init(coder aDecoder: NSCoder)
{
do {
videoCamera = try Camera(sessionPreset:AVCaptureSessionPreset640x480, location:.backFacing)
videoCamera!.runBenchmark = true
} catch {
videoCamera = nil
print("Couldn't initialize camera with error: \(error)")
}
super.init(coder: aDecoder)!
}
var filterOperation: FilterOperationInterface?
#IBAction func btnCapture(_ sender: Any) {
videoCamera?.startCapture()
let pictureOutput = PictureOutput()
pictureOutput.encodedImageFormat = .jpeg
pictureOutput.encodedImageAvailableCallback = {imageData in
if imageData != nil {
let captureDetailVC = self.storyboard?.instantiateViewController(withIdentifier: "CaptureDetailVC") as! CaptureDetailVC
captureDetailVC.aCaptureSelectedData = imageData
self.show(captureDetailVC, sender: true)
}
}
}
#IBAction func btnBackAction(_ sender: Any) {
self.dismiss(animated: true, completion: nil)
}
func configureView() {
guard let videoCamera = videoCamera else {
let errorAlertController = UIAlertController(title: NSLocalizedString("Error", comment: "Error"), message: "Couldn't initialize camera", preferredStyle: .alert)
errorAlertController.addAction(UIAlertAction(title: NSLocalizedString("OK", comment: "OK"), style: .default, handler: nil))
self.present(errorAlertController, animated: true, completion: nil)
return
}
if let currentFilterConfiguration = self.filterOperation {
self.title = currentFilterConfiguration.titleName
// Configure the filter chain, ending with the view
if let view = self.filterView {
switch currentFilterConfiguration.filterOperationType {
case .singleInput:
videoCamera.addTarget(currentFilterConfiguration.filter)
currentFilterConfiguration.filter.addTarget(view)
case .blend:
videoCamera.addTarget(currentFilterConfiguration.filter)
self.blendImage = PictureInput(imageName:blendImageName)
self.blendImage?.addTarget(currentFilterConfiguration.filter)
self.blendImage?.processImage()
currentFilterConfiguration.filter.addTarget(view)
case let .custom(filterSetupFunction:setupFunction):
currentFilterConfiguration.configureCustomFilter(setupFunction(videoCamera, currentFilterConfiguration.filter, view))
}
videoCamera.startCapture()
}
// Hide or display the slider, based on whether the filter needs it
if let slider = self.filterSlider {
switch currentFilterConfiguration.sliderConfiguration {
case .disabled:
slider.isHidden = true
// case let .Enabled(minimumValue, initialValue, maximumValue, filterSliderCallback):
case let .enabled(minimumValue, maximumValue, initialValue):
slider.minimumValue = minimumValue
slider.maximumValue = maximumValue
slider.value = initialValue
slider.isHidden = false
self.updateSliderValue()
}
}
}
}
#IBAction func updateSliderValue() {
if let currentFilterConfiguration = self.filterOperation {
switch (currentFilterConfiguration.sliderConfiguration) {
case .enabled(_, _, _): currentFilterConfiguration.updateBasedOnSliderValue(Float(self.filterSlider!.value))
case .disabled: break
}
}
}
override func viewWillDisappear(_ animated: Bool) {
if let videoCamera = videoCamera {
videoCamera.stopCapture()
videoCamera.removeAllTargets()
blendImage?.removeAllTargets()
}
super.viewWillDisappear(animated)
}
}
The way you capture an image with GPUImage is with the method imageFromCurrentFramebuffer that you can user con your filter.
It returns a UIImage which you can later convert to whatever image format you need and save it.
let image:UIImage = filter.imageFromCurrentFramebuffer()
if let data = UIImageJPEGRepresentation(image, 0.8) {
let documentsDirectory = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
let filename = documentsDirectory.appendingPathComponent("image.jpg")
try? data.write(to: filename)
}