GPUImageVideoCamera Live Preview with Custom LUT using GPUImagePicture - ios

I'm using Swift 3 with GPUImage. I have a LUT image file named: lut_lookup.png I have successfully used this with GPUImage on a still image and it applies and displays the result using the LUT filter.
I'm now trying to apply the same LUT filter to the live camera view, but I cannot seem to get it to work. It essentially doesn't even display the camera.
I've supplied my code below, any help would be greatly appreciated. It's probably something simple I've missed/done wrong, but I can't seem to spot it.
import UIKit
import GPUImage
class LiveCameraVC: UIViewController
{
// MARK: - Variables
let videoCamera: GPUImageVideoCamera? = {
if let videoCamera = GPUImageVideoCamera(sessionPreset: AVCaptureSessionPreset640x480, cameraPosition: .back) {
videoCamera.outputImageOrientation = .portrait
videoCamera.horizontallyMirrorFrontFacingCamera = false
videoCamera.horizontallyMirrorRearFacingCamera = false
return videoCamera
} else {
print("GPUImageVideoCamera Nil")
return nil
}
}()
let filter: GPUImageLookupFilter = {
let filter = GPUImageLookupFilter()
filter.intensity = 1.0
return filter
}()
// MARK: - UI
let modifiedImageView: GPUImageView = {
let imageView = GPUImageView.newAutoLayout()
imageView.fillMode = kGPUImageFillModePreserveAspectRatioAndFill;
imageView.isUserInteractionEnabled = true
return imageView
}()
// MARK: - Life Cycle
override func viewDidLoad() {
super.viewDidLoad()
let tapGesture:UITapGestureRecognizer = UITapGestureRecognizer().bk_init { [unowned self] (sender:UIGestureRecognizer?, state:UIGestureRecognizerState, location:CGPoint) in
print("Tapped")
self.performLut()
} as! UITapGestureRecognizer
modifiedImageView.addGestureRecognizer(tapGesture)
}
override func loadView() {
super.loadView()
self.view.addSubview(modifiedImageView)
modifiedImageView.autoPinEdgesToSuperviewEdges()
}
// MARK: - Private
private func performLut() {
print("performLut")
if let videoCamera = videoCamera {
if let lookupImageSource = GPUImagePicture(image: UIImage(named:"lut_lookup")) {
videoCamera.addTarget(filter, atTextureLocation: 0)
lookupImageSource.addTarget(filter, atTextureLocation: 1)
filter.addTarget(modifiedImageView)
videoCamera.startCapture()
} else {
print("videoCamera Nil")
}
} else {
print("GPUImageVideoCamera Nil")
}
}
}
Thanks

Solved it. I changed the code to this:
let imagePicture: GPUImagePicture? = {
if let imagePicture = GPUImagePicture(image: UIImage(named:"lut_lookup")) {
return imagePicture
} else {
print("GPUImagePicture Nil")
return nil
}
}()
private func performLut() {
if let videoCamera = videoCamera {
if let imagePicture = imagePicture {
videoCamera.addTarget(filter)
imagePicture.addTarget(filter)
imagePicture.processImage()
filter.addTarget(modifiedImageView)
videoCamera.startCapture()
}
}
}
Hopefully that helps someone!

Related

Data Sharing Between My App and App Extensions

I transfer data from the sharing extension to my main application with UserDefaults and open the application (goToApp()) after hitting the "post" button. However, the view of my app is not redrawn and the text remains the same "Share Extension Example". Here's how I'm trying to do it:
class ShareViewController: SLComposeServiceViewController {
private var textString: String?
override func isContentValid() -> Bool {
if let currentMessage = contentText {
self.textString = currentMessage
}
return true
}
override func viewDidLoad() {
super.viewDidLoad()
}
override func didSelectPost() {
UserDefaults.standard.set(self.textString!, forKey: "text")
gotoApp()
self.extensionContext!.completeRequest(returningItems: [], completionHandler: nil)
}
func gotoApp() {
guard let url = URL(string: "example://") else { return }
let selectorOpenURL = sel_registerName("openURL:")
var responder: UIResponder? = self
while responder != nil {
if responder?.responds(to: selectorOpenURL) == true {
responder?.perform(selectorOpenURL, with: url)
}
responder = responder?.next
}
}
}
And the project to which I am trying to transfer data:
class ViewController: UIViewController {
private let mainVStack = UIStackView()
private let backgroundView = UIImageView()
private let titleLabel = UILabel()
override func viewDidLoad() {
super.viewDidLoad()
configureMainStack()
configureTitleLabel()
}
}
// MARK: - UI Elements
private extension ViewController {
func configureMainStack() {
mainVStack.distribution = .fillProportionally
mainVStack.embed(asSubviewTo: view, inset: 40)
}
func configureTitleLabel() {
titleLabel.textAlignment = .center
titleLabel.textColor = .blue
if let text = UserDefaults.object(forKey: "text") as? String {
titleLabel.text = text
} else {
titleLabel.text = "Share Extension Example"
}
let titleContainerView = UIView()
titleLabel.embedIn(titleContainerView, hInset: 0, vInset: 100)
mainVStack.addArrangedSubview(titleContainerView)
}
}

How do i stop detecting an object in swift?

I'm currently working IOS mobile project where objects are detected in a frame and then translated to speech to aid the visually impaired. My application already detects objects in a frame, but once it does so it doesn't stop detecting. When I'm trying to convert the object name to speech it keeps iterating over the same name over and over again.
For clarification, when I point my camera at a "chair" it gives over a 100 log for chairs in which the text to speech has to say those 100 "chairs" before moving on to the next object.
This is my viewController code:
import UIKit
import Vision
import CoreMedia
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var videoPreview: UIView!
#IBOutlet weak var boxesView: DrawingBoundingBoxView!
#IBOutlet weak var labelsTableView: UITableView!
#IBOutlet weak var inferenceLabel: UILabel!
#IBOutlet weak var etimeLabel: UILabel!
#IBOutlet weak var fpsLabel: UILabel!
let objectDectectionModel = MobileNetV2_SSDLite()
// MARK: - Vision Properties
var request: VNCoreMLRequest?
var visionModel: VNCoreMLModel?
var isInferencing = false
// MARK: - AV Property
var videoCapture: VideoCapture!
let semaphore = DispatchSemaphore(value: 1)
var lastExecution = Date()
// MARK: - TableView Data
var predictions: [VNRecognizedObjectObservation] = []
// MARK - Performance Measurement Property
private let measure = Measure()
let maf1 = MovingAverageFilter()
let maf2 = MovingAverageFilter()
let maf3 = MovingAverageFilter()
// MARK: - View Controller Life Cycle
override func viewDidLoad() {
super.viewDidLoad()
// setup the model
setUpModel()
// setup camera
setUpCamera()
// setup delegate for performance measurement
measure.delegate = self
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
self.videoCapture.start()
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
self.videoCapture.stop()
}
// MARK: - Setup Core ML
func setUpModel() {
if let visionModel = try? VNCoreMLModel(for: objectDectectionModel.model) {
self.visionModel = visionModel
request = VNCoreMLRequest(model: visionModel, completionHandler: visionRequestDidComplete)
request?.imageCropAndScaleOption = .scaleFill
} else {
fatalError("fail to create vision model")
}
}
// MARK: - SetUp Video
func setUpCamera() {
videoCapture = VideoCapture()
videoCapture.delegate = self
videoCapture.fps = 30
videoCapture.setUp(sessionPreset: .vga640x480) { success in
if success {
// add preview view on the layer
if let previewLayer = self.videoCapture.previewLayer {
self.videoPreview.layer.addSublayer(previewLayer)
self.resizePreviewLayer()
}
// start video preview when setup is done
self.videoCapture.start()
}
}
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
resizePreviewLayer()
}
func resizePreviewLayer() {
videoCapture.previewLayer?.frame = videoPreview.bounds
}
}
// MARK: - VideoCaptureDelegate
extension ViewController: VideoCaptureDelegate {
func videoCapture(_ capture: VideoCapture, didCaptureVideoFrame pixelBuffer: CVPixelBuffer?, timestamp: CMTime) {
// the captured image from camera is contained on pixelBuffer
if !self.isInferencing, let pixelBuffer = pixelBuffer {
self.isInferencing = true
// start of measure
self.measure.start()
// predict!
self.predictUsingVision(pixelBuffer: pixelBuffer)
}
}
}
extension ViewController {
func predictUsingVision(pixelBuffer: CVPixelBuffer) {
guard let request = request else { fatalError() }
// vision framework configures the input size of image following our model's input configuration automatically
self.semaphore.wait()
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer)
try? handler.perform([request])
}
// MARK: - Post-processing
func visionRequestDidComplete(request: VNRequest, error: Error?) {
self.measure.labell(with: "endInference")
if let predictions = request.results as? [VNRecognizedObjectObservation] {
// print(predictions.first?.labels.first?.identifier ?? "nil")
// print(predictions.first?.labels.first?.confidence ?? -1)
let pred = request.results?.first
// print(pred)
// print(predictions.first?.labels.first?.identifier as Any)
// print(predictions)
self.predictions = predictions
DispatchQueue.main.async {
self.boxesView.predictedObjects = predictions
self.labelsTableView.reloadData()
// end of measure
self.measure.end()
self.isInferencing = false
}
} else {
// end of measure
self.measure.end()
self.isInferencing = false
}
self.semaphore.signal()
}
}
extension ViewController: UITableViewDataSource {
func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int {
return predictions.count
}
func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell {
guard let cell = tableView.dequeueReusableCell(withIdentifier: "InfoCell") else {
return UITableViewCell()
}
// Getting the detected object and translating them to speech.
// This is where i face the problem of translating the objects as the objects
// keep iterating over themsleves.
let result = predictions[indexPath.row].label ?? "N/A"
// when trying to print(result) i get all the labells detected but it does not stop.
let utterance = AVSpeechUtterance(string: result)
utterance.voice = AVSpeechSynthesisVoice(language: "en-GB")
utterance.rate = 0.5
let synthesizer = AVSpeechSynthesizer()
synthesizer.speak(utterance)
let rectString = predictions[indexPath.row].boundingBox.toString(digit: 2)
let confidence = predictions[indexPath.row].labels.first?.confidence ?? -1
let confidenceString = String(format: "%.3f", confidence/*Math.sigmoid(confidence)*/)
cell.textLabel?.text = predictions[indexPath.row].label ?? "N/A"
cell.detailTextLabel?.text = "\(rectString), \(confidenceString)"
return cell
}
}
// MARK: - 📏(Performance Measurement) Delegate
extension ViewController: MeasureDelegate {
func updateMeasure(inferenceTime: Double, executionTime: Double, fps: Int) {
//print(executionTime, fps)
DispatchQueue.main.async {
self.maf1.append(element: Int(inferenceTime*1000.0))
self.maf2.append(element: Int(executionTime*1000.0))
self.maf3.append(element: fps)
self.inferenceLabel.text = "inference: \(self.maf1.averageValue) ms"
self.etimeLabel.text = "execution: \(self.maf2.averageValue) ms"
self.fpsLabel.text = "fps: \(self.maf3.averageValue)"
}
}
}
class MovingAverageFilter {
private var arr: [Int] = []
private let maxCount = 10
public func append(element: Int) {
arr.append(element)
if arr.count > maxCount {
arr.removeFirst()
}
}
public var averageValue: Int {
guard !arr.isEmpty else { return 0 }
let sum = arr.reduce(0) { $0 + $1 }
return Int(Double(sum) / Double(arr.count))
}
}
It seems you call tableView.reloadData() in every frame, because visionRequestDidComplete is called each frame. Thus, cellForRowAtIndexPath (and therein AVSpeechSynthesizer ) gets called over and over which produces the sound.
You should re-evaluate if you need to update your tableview that often. Maybe you only need to update the tableview, if there are new observations? You could check for that using the predictions array in visionRequestDidComplete.
You might also wanna use Apple's own VoiceOver system to read out UI elements. That's the standard approach to add support for visually impaired users. This would also offer the benefit that the user can navigate within the tableview and the text of each cell will be read out accordingly.

Crash on UICollectionViewCell with JWVideoView - Swift

A ViewController has a UICollectionView. One of the cells contains JWVideoView. The app is frequently crashing on prepareForReuse in this cell.
There is no valuable info in the log. So I am having trouble figuring out the reason for the crash.
I've created a project example that demonstrates the crash. You can find it https://github.com/fuxlud/JWExample
If the link between the cell and the videoView is removed, the crash will not happen.
import UIKit
class VideoArticleElementCollectionViewCell: UICollectionViewCell {
// MARK: - Properties
public var imageURL: String? { didSet { videoView?.imageURL = imageURL } }
public var videoId: String? { didSet { videoView?.videoId = videoId } }
#IBOutlet private var videoView: JWVideoView?
// MARK: - Reuse
override func prepareForReuse() {
super.prepareForReuse() // Crashing here! (Thread 1: EXC_BAD_ACCESS (code=1, address=0x7e8))
videoView?.stopPlayingVideo()
}
deinit {
videoView?.stopPlayingVideo()
}
}
import UIKit
class JWVideoView: UIView, JWPlayerDelegate {
// MARK: Properties
public var imageURL: String?
public var videoId: String? { didSet { setupPlayer() } }
private var jwPlayer: JWPlayerController?
private let jwPlayerURL = "https://content.jwplatform.com/manifests/"
private var didPause = false
// MARK: - Initialization
override init(frame: CGRect) {
super.init(frame: frame)
setup()
}
convenience init() {
self.init(frame: CGRect.zero)
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
setup()
}
// MARK: - Setup
private func setup() {}
private func setupPlayer() {
guard let videoId = self.videoId else { return }
let playerURL = jwPlayerURL + videoId + ".m3u8"
let configuration: JWConfig = JWConfig(contentURL: playerURL)
configuration.controls = true
configuration.autostart = true
// configuration.premiumSkin = JWPremiumSkinGlow
configuration.image = imageURL
jwPlayer = JWPlayerController(config: configuration)
if let player = jwPlayer {
player.forceFullScreenOnLandscape = true
player.forceLandscapeOnFullScreen = true
player.view?.autoresizingMask = [.flexibleHeight, .flexibleWidth]
player.view?.frame = bounds
player.delegate = self
player.volume = 0.0
if let view = player.view { addSubview(view) }
}
}
// MARK: - Orientation
private func enableAllOrientation(enable: Bool) {
if let delegate = UIApplication.shared.delegate as? AppDelegate {
// delegate.shouldEnableLandscape = enable
}
}
// MARK: API
public func stopPlayingVideo() {
enableAllOrientation(enable: false)
if jwPlayer != nil {
jwPlayer!.stop()
}
}
// MARK: - JWPlayerDelegate
internal func onFullscreen(_ status: Bool) {
if status == false {
let value = UIInterfaceOrientation.portrait.rawValue
UIDevice.current.setValue(value, forKey: "orientation")
}
}
internal func onPlayAttempt() {
if jwPlayer != nil {
enableAllOrientation(enable: true)
}
}
internal func onPlay(_ oldValue: String) {
if didPause {
didPause = false
}
}
internal func onPause(_ oldValue: String) {
didPause = true
}
internal func onComplete() {
}
}
Based on your example project a saw the following issue inside your JWVideoView class: everytime you setting the videoId property it initiliaze the jwPlayer again, and also readds this view again to the stack.
1. Solution (remove the playerView and set the player to nil):
private func setupPlayer() {
jwPlayer?.view?.removeFromSuperview()
jwPlayer = nil
guard let videoId = self.videoId else { return }
let playerURL = jwPlayerURL + videoId + ".m3u8"
let configuration: JWConfig = JWConfig(contentURL: playerURL)
configuration.controls = true
configuration.autostart = true
configuration.image = imageURL
jwPlayer = JWPlayerController(config: configuration)
jwPlayer?.forceFullScreenOnLandscape = true
jwPlayer?.forceLandscapeOnFullScreen = true
jwPlayer?.view?.autoresizingMask = [.flexibleHeight, .flexibleWidth]
jwPlayer?.view?.frame = bounds
jwPlayer?.delegate = self
jwPlayer?.volume = 0.0
if let view = jwPlayer?.view {
addSubview(view)
}
}
2. Solution (keep the player and the view instance and reset the configuration of the player)
private func setupPlayer() {
guard let videoId = self.videoId else { return }
let playerURL = jwPlayerURL + videoId + ".m3u8"
let configuration: JWConfig = JWConfig(contentURL: playerURL)
configuration.controls = true
configuration.autostart = true
configuration.image = imageURL
if jwPlayer == nil {
jwPlayer = JWPlayerController(config: configuration)
jwPlayer?.forceFullScreenOnLandscape = true
jwPlayer?.forceLandscapeOnFullScreen = true
jwPlayer?.view?.autoresizingMask = [.flexibleHeight, .flexibleWidth]
jwPlayer?.view?.frame = bounds
jwPlayer?.delegate = self
jwPlayer?.volume = 0.0
if let view = jwPlayer?.view {
addSubview(view)
}
}else{
//reset the configuration of the player here. but i dont now how this is possible with jwPlayer
}
}

NSPopoverTouchBarItems in NSScrollView (NSTouchBar)

Is there a way to add an array of NSPopoverTouchBarItems into a NSScrollView?
Currently, my view hierarchy resembles the below list.
NSTouchBar
NSCustomTouchBarItem
NSScrollView
NSStackView
Array of NSButtons
The above hierarchy outputs the following screenshot.
In sum, the end goal is to replace the array of NSButtons with NSPopoverTouchBarItems.
I believe what you need is the use of NSScrubber to be able to scroll or have fixed position of multiple buttons including NSPopoverTouchBarItem
https://developer.apple.com/documentation/appkit/nsscrubber
Check out this repository for more information and sample codes that might help you:
https://github.com/loretoparisi/touchbar
import Cocoa
fileprivate extension NSTouchBar.CustomizationIdentifier {
static let popoverBar = NSTouchBar.CustomizationIdentifier("com.TouchBarCatalog.popoverBar")
}
fileprivate extension NSTouchBarItem.Identifier {
static let scrubberPopover = NSTouchBarItem.Identifier("com.TouchBarCatalog.TouchBarItem.scrubberPopover")
}
class PopoverScrubber: NSScrubber {
var presentingItem: NSPopoverTouchBarItem?
}
class PopoverScrubberViewController: NSViewController {
// MARK: NSTouchBar
override func makeTouchBar() -> NSTouchBar? {
let touchBar = NSTouchBar()
touchBar.delegate = self
touchBar.customizationIdentifier = .popoverBar
touchBar.defaultItemIdentifiers = [.scrubberPopover]
touchBar.customizationAllowedItemIdentifiers = [.scrubberPopover]
return touchBar
}
}
// MARK: NSTouchBarDelegate
extension PopoverScrubberViewController: NSTouchBarDelegate {
func touchBar(_ touchBar: NSTouchBar, makeItemForIdentifier identifier: NSTouchBarItem.Identifier) -> NSTouchBarItem? {
guard identifier == NSTouchBarItem.Identifier.scrubberPopover else { return nil }
let popoverItem = NSPopoverTouchBarItem(identifier: identifier)
popoverItem.collapsedRepresentationLabel = "Scrubber Popover"
popoverItem.customizationLabel = "Scrubber Popover"
let scrubber = PopoverScrubber()
scrubber.register(NSScrubberTextItemView.self, forItemIdentifier: NSUserInterfaceItemIdentifier(rawValue: "TextScrubberItemIdentifier"))
scrubber.mode = .free
scrubber.selectionBackgroundStyle = .roundedBackground
scrubber.delegate = self
scrubber.dataSource = self
scrubber.presentingItem = popoverItem
popoverItem.collapsedRepresentation = scrubber
popoverItem.popoverTouchBar = PopoverTouchBarSample(presentingItem: popoverItem)
return popoverItem
}
}
// MARK: NSScrubber Data Source and delegate
extension PopoverScrubberViewController: NSScrubberDataSource, NSScrubberDelegate {
func numberOfItems(for scrubber: NSScrubber) -> Int {
return 20
}
func scrubber(_ scrubber: NSScrubber, viewForItemAt index: Int) -> NSScrubberItemView {
let itemView = scrubber.makeItem(withIdentifier: NSUserInterfaceItemIdentifier(rawValue: "TextScrubberItemIdentifier"), owner: nil) as! NSScrubberTextItemView
itemView.textField.stringValue = String(index)
return itemView
}
func scrubber(_ scrubber: NSScrubber, didSelectItemAt index: Int) {
print("\(#function) at index \(index)")
if let popoverScrubber = scrubber as? PopoverScrubber,
let popoverItem = popoverScrubber.presentingItem {
popoverItem.showPopover(nil)
}
}
}

Variable in renderer(_:didAdd:for:) not updating

I am working on a IPhone app that uses CoreML and ARKit simultaneously. The CoreML is supposed to recognize a number and the ARKit should detect a vertical plane (aka wall) and add some planes over that same wall with the content displayed on those planes depending on the recognised number.
So, the CoreML is working 100%. Everytime I "change" the number the topPrediction variable updates automatically ( so far so good ). The thing is that my variable in func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor does not update! What I mean is that the first number recognized by the CoreML is correctly sent to the renderer func and it works like a charm but if I turn the camera to another number it stills assumes that it's the first number! As you may see in the code, I even tried making a func getGabNum() -> Int and then calling it in the renderer func ( var num = getGabNum() ) but I continue getting the warning "Variable 'num' was never mutated; consider changing to 'let' constant" which means that something is not right. So guys, here is my code! Hope you can help me and thank you!
struct Room : Decodable {
let id : Int?
let num : Int?
//Adicionar Schedules
var horario = [Schedule]()
}
struct Schedule : Decodable {
let id : Int?
let hora_ini : Date?
let hora_fim : Date?
let descr : String?
private enum CodingKeys: String, CodingKey {
case id
case hora_ini
case hora_fim
case descr
}
}
class ViewController: UIViewController, ARSCNViewDelegate {
#IBOutlet weak var debugLabel: UILabel!
#IBOutlet weak var debugTextView: UITextView!
#IBOutlet weak var sceneView: ARSCNView!
let dispatchQueueML = DispatchQueue(label: "com.hw.dispatchqueueml") // A Serial Queue
var visionRequests = [VNRequest]()
var room: Room?
var room_array: [[Int]] = [[17, 0], [43, 0], [120,0]]
var teste = 0
var num = -1
override func viewDidLoad() {
super.viewDidLoad()
sceneView.delegate = self
sceneView.showsStatistics = true
let scene = SCNScene()
sceneView.scene = scene
configureLighting()
guard let selectedModel = try? VNCoreMLModel(for: SalasMLv6().model) else {
fatalError("Could not load model.")
}
let classificationRequest = VNCoreMLRequest(model: selectedModel, completionHandler: classificationCompleteHandler)
classificationRequest.imageCropAndScaleOption = VNImageCropAndScaleOption.centerCrop // Crop from centre of images and scale to appropriate size.
visionRequests = [classificationRequest]
loopCoreMLUpdate()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
setUpSceneView()
}
func setUpSceneView()
{
let configuration = ARWorldTrackingConfiguration()
configuration.planeDetection = .vertical
sceneView.session.run(configuration)
sceneView.debugOptions = [ARSCNDebugOptions.showFeaturePoints]
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneView.session.pause()
}
func configureLighting()
{
sceneView.automaticallyUpdatesLighting = true
sceneView.autoenablesDefaultLighting = true
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
// MARK: - ARSCNViewDelegate
func renderer(_ renderer: SCNSceneRenderer, updateAtTime time: TimeInterval) {
DispatchQueue.main.async {
// Do any desired updates to SceneKit here.
}
}
func loopCoreMLUpdate() {
dispatchQueueML.async {
self.updateCoreML()
self.loopCoreMLUpdate()
}
}
func updateCoreML() {
// Get Camera Image as RGB
let pixbuff : CVPixelBuffer? = (sceneView.session.currentFrame?.capturedImage)
if pixbuff == nil { return }
let ciImage = CIImage(cvPixelBuffer: pixbuff!)
// Prepare CoreML/Vision Request
let imageRequestHandler = VNImageRequestHandler(ciImage: ciImage, options: [:])
// Run Vision Image Request
do {
try imageRequestHandler.perform(self.visionRequests)
} catch {
print(error)
}
}
func classificationCompleteHandler(request: VNRequest, error: Error?) {
// Catch Errors
if error != nil {
print("Error: " + (error?.localizedDescription)!)
return
}
guard let observations = request.results else {
print("No results")
return
}
// Get Classifications
let classifications = observations[0...2] // top 3 results
.compactMap({ $0 as? VNClassificationObservation })
.map({ "\($0.identifier) \(String(format:" : %.2f", $0.confidence))" })
.joined(separator: "\n")
// Render Classifications
DispatchQueue.main.async {
// Display Debug Text on screen
self.debugTextView.text = "TOP 3 PROBABILITIES: \n" + classifications
// Display Top Symbol
var symbol = "❌"
var gabNum: Int?
let topPrediction = classifications.components(separatedBy: "\n")[0]
let topPredictionName = topPrediction.components(separatedBy: ":")[0].trimmingCharacters(in: .whitespaces)
// Only display a prediction if confidence is above 90%
let topPredictionScore:Float? = Float(topPrediction.components(separatedBy: ":")[1].trimmingCharacters(in: .whitespaces))
if (topPredictionScore != nil && topPredictionScore! > 0.05) {
if (topPredictionName == "120") {
symbol = "1️⃣2️⃣0️⃣"
gabNum = 120
self.teste = gabNum!
}
if (topPredictionName == "43") {
symbol = "4️⃣3️⃣"
gabNum = 43
self.teste = gabNum!
}
if (topPredictionName == "17") {
symbol = "1️⃣7️⃣"
gabNum = 17
self.teste = gabNum!
}
}
if let gn = gabNum {
// get room from REST
let jsonURL = "someURL\(gn)"
guard let url = URL(string: jsonURL) else {
return
}
URLSession.shared.dataTask(with: url) { (data, response, error) in
if error != nil{
print("error)")
return
}
do {
self.room = try JSONDecoder().decode(Room.self, from: data!)
}catch{
print(“Decoder Error”)
}
}.resume()
}
self.debugLabel.text = symbol
}
}
// MARK: - HIDE STATUS BAR
override var prefersStatusBarHidden : Bool { return true }
func getGabNum() -> Int {
return self.teste
}
func renderer(_ renderer: SCNSceneRenderer, didAdd node: SCNNode, for anchor: ARAnchor)
{
guard room != nil else {
print("room == nil")
return
}
guard let planeAnchor = anchor as? ARPlaneAnchor else {
return
}
num = getGabNum()
if( num == room_array[0][0] && room_array[0][1] == 1 ){
return
}else{
if( num == room_array[1][0] && room_array[1][1] == 1 ){
return
}else{
if( num == room_array[2][0] && room_array[2][1] == 1 ){
return
}else{
var i = 0
for horario in (self.room?.horario)!{
// Planes and Nodes Stuff Right Here
}
switch self.room?.num{
case 17: room_array[0][1] = 1
case 43: room_array[1][1] = 1
case 120:room_array[2][1] = 1
}
}
}
}
}
}

Resources