Record and play Video based on TensorFlow example Swift - ios

#DEFINE UPDATE
I realised I had forgotten to ask for recording permission. That has now been fixed. However, when I press the "Record button" I get the error Cannot create file. So when I start the recording, something is fishy with the path maybe?
#UNDEF UPDATE
I am working on an app where I want to have my own neural network with the functionality to start recording a video. Thereafter I want to play the video and use information from the neural network.
I have a working function in Android, now I am trying to make something similar for iPhone. As a start, I have used an ImageClassifierExample from TensorFlowLite. The first task is to add a button Record which starts recording a video and then a button Play which plays the video.
I have implemented the two features, but when I try and play the video, it is just loading. It can either be the recording is not working, or the video player is not working (or both). I have checked so the paths are the same.
I am not so familiar with iOS development so some help would be nice.
This is the base I am starting from.
Here is my slightly adopted ViewController:
// Copyright 2019 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import AVFoundation
import AVKit
import UIKit
class ViewController: UIViewController {
// MARK: Storyboards Connections
#IBOutlet weak var previewView: PreviewView!
#IBOutlet weak var cameraUnavailableLabel: UILabel!
#IBOutlet weak var resumeButton: UIButton!
#IBOutlet weak var bottomSheetView: CurvedView!
#IBOutlet weak var bottomSheetViewBottomSpace: NSLayoutConstraint!
#IBOutlet weak var bottomSheetStateImageView: UIImageView!
// MARK: Constants
private let animationDuration = 0.5
private let collapseTransitionThreshold: CGFloat = -40.0
private let expandThransitionThreshold: CGFloat = 40.0
private let delayBetweenInferencesMs: Double = 1000
// MARK: Instance Variables
// Holds the results at any time
private var result: Result?
private var initialBottomSpace: CGFloat = 0.0
private var previousInferenceTimeMs: TimeInterval = Date.distantPast.timeIntervalSince1970 * 1000
// MARK: Controllers that manage functionality
// Handles all the camera related functionality
private lazy var cameraCapture = CameraFeedManager(previewView: previewView)
private var isRecording = false // <<<----- Mine
private let captureSession: AVCaptureSession = AVCaptureSession()
// Handles all data preprocessing and makes calls to run inference through the `Interpreter`.
private var modelDataHandler: ModelDataHandler? =
ModelDataHandler(modelFileInfo: MobileNet.modelInfo, labelsFileInfo: MobileNet.labelsInfo)
#IBAction func startRecording(_ sender: Any) {. // <<<----- Mine
print("Recording pressed")
if (!isRecording) {
cameraCapture.startRecording()
} else {
cameraCapture.stopRecording()
}
isRecording = !isRecording
}
// Handles the presenting of results on the screen
private var inferenceViewController: InferenceViewController?
// MARK: View Handling Methods
override func viewDidLoad() {
super.viewDidLoad()
guard modelDataHandler != nil else {
fatalError("Model set up failed")
}
#if targetEnvironment(simulator)
previewView.shouldUseClipboardImage = true
NotificationCenter.default.addObserver(self,
selector: #selector(classifyPasteboardImage),
name: UIApplication.didBecomeActiveNotification,
object: nil)
#endif
cameraCapture.delegate = self
addPanGesture()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
changeBottomViewState()
#if !targetEnvironment(simulator)
cameraCapture.checkCameraConfigurationAndStartSession()
#endif
}
#if !targetEnvironment(simulator)
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
cameraCapture.stopSession()
}
#endif
override var preferredStatusBarStyle: UIStatusBarStyle {
return .lightContent
}
func presentUnableToResumeSessionAlert() {
let alert = UIAlertController(
title: "Unable to Resume Session",
message: "There was an error while attempting to resume session.",
preferredStyle: .alert
)
alert.addAction(UIAlertAction(title: "OK", style: .default, handler: nil))
self.present(alert, animated: true)
}
// MARK: Storyboard Segue Handlers
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
super.prepare(for: segue, sender: sender)
if segue.identifier == "EMBED" {
guard let tempModelDataHandler = modelDataHandler else {
return
}
inferenceViewController = segue.destination as? InferenceViewController
inferenceViewController?.wantedInputHeight = tempModelDataHandler.inputHeight
inferenceViewController?.wantedInputWidth = tempModelDataHandler.inputWidth
inferenceViewController?.maxResults = tempModelDataHandler.resultCount
inferenceViewController?.threadCountLimit = tempModelDataHandler.threadCountLimit
inferenceViewController?.delegate = self
}
}
#objc func classifyPasteboardImage() {
guard let image = UIPasteboard.general.images?.first else {
return
}
guard let buffer = CVImageBuffer.buffer(from: image) else {
return
}
previewView.image = image
DispatchQueue.global().async {
self.didOutput(pixelBuffer: buffer)
}
}
deinit {
NotificationCenter.default.removeObserver(self)
}
}
// MARK: InferenceViewControllerDelegate Methods
extension ViewController: InferenceViewControllerDelegate {
func didChangeThreadCount(to count: Int) {
if modelDataHandler?.threadCount == count { return }
modelDataHandler = ModelDataHandler(
modelFileInfo: MobileNet.modelInfo,
labelsFileInfo: MobileNet.labelsInfo,
threadCount: count
)
}
}
// MARK: CameraFeedManagerDelegate Methods
extension ViewController: CameraFeedManagerDelegate {
func didOutput(pixelBuffer: CVPixelBuffer) {
let currentTimeMs = Date().timeIntervalSince1970 * 1000
guard (currentTimeMs - previousInferenceTimeMs) >= delayBetweenInferencesMs else { return }
previousInferenceTimeMs = currentTimeMs
// Pass the pixel buffer to TensorFlow Lite to perform inference.
result = modelDataHandler?.runModel(onFrame: pixelBuffer)
// Display results by handing off to the InferenceViewController.
DispatchQueue.main.async {
let resolution = CGSize(width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
self.inferenceViewController?.inferenceResult = self.result
self.inferenceViewController?.resolution = resolution
self.inferenceViewController?.tableView.reloadData()
}
}
// MARK: Session Handling Alerts
func sessionWasInterrupted(canResumeManually resumeManually: Bool) {
// Updates the UI when session is interupted.
if resumeManually {
self.resumeButton.isHidden = false
} else {
self.cameraUnavailableLabel.isHidden = false
}
}
func sessionInterruptionEnded() {
// Updates UI once session interruption has ended.
if !self.cameraUnavailableLabel.isHidden {
self.cameraUnavailableLabel.isHidden = true
}
if !self.resumeButton.isHidden {
self.resumeButton.isHidden = false
}
}
func sessionRunTimeErrorOccured() {
// Handles session run time error by updating the UI and providing a button if session can be manually resumed.
self.resumeButton.isHidden = false
previewView.shouldUseClipboardImage = true
}
func presentCameraPermissionsDeniedAlert() {
let alertController = UIAlertController(title: "Camera Permissions Denied", message: "Camera permissions have been denied for this app. You can change this by going to Settings", preferredStyle: .alert)
let cancelAction = UIAlertAction(title: "Cancel", style: .cancel, handler: nil)
let settingsAction = UIAlertAction(title: "Settings", style: .default) { (action) in
UIApplication.shared.open(URL(string: UIApplication.openSettingsURLString)!, options: [:], completionHandler: nil)
}
alertController.addAction(cancelAction)
alertController.addAction(settingsAction)
present(alertController, animated: true, completion: nil)
previewView.shouldUseClipboardImage = true
}
func presentVideoConfigurationErrorAlert() {
let alert = UIAlertController(title: "Camera Configuration Failed", message: "There was an error while configuring camera.", preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "OK", style: .default, handler: nil))
self.present(alert, animated: true)
previewView.shouldUseClipboardImage = true
}
}
// MARK: Bottom Sheet Interaction Methods
extension ViewController {
// MARK: Bottom Sheet Interaction Methods
/**
This method adds a pan gesture to make the bottom sheet interactive.
*/
private func addPanGesture() {
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(ViewController.didPan(panGesture:)))
bottomSheetView.addGestureRecognizer(panGesture)
}
/** Change whether bottom sheet should be in expanded or collapsed state.
*/
private func changeBottomViewState() {
guard let inferenceVC = inferenceViewController else {
return
}
if bottomSheetViewBottomSpace.constant == inferenceVC.collapsedHeight - bottomSheetView.bounds.size.height {
bottomSheetViewBottomSpace.constant = 0.0
}
else {
bottomSheetViewBottomSpace.constant = inferenceVC.collapsedHeight - bottomSheetView.bounds.size.height
}
setImageBasedOnBottomViewState()
}
/**
Set image of the bottom sheet icon based on whether it is expanded or collapsed
*/
private func setImageBasedOnBottomViewState() {
if bottomSheetViewBottomSpace.constant == 0.0 {
bottomSheetStateImageView.image = UIImage(named: "down_icon")
}
else {
bottomSheetStateImageView.image = UIImage(named: "up_icon")
}
}
/**
This method responds to the user panning on the bottom sheet.
*/
#objc func didPan(panGesture: UIPanGestureRecognizer) {
// Opens or closes the bottom sheet based on the user's interaction with the bottom sheet.
let translation = panGesture.translation(in: view)
switch panGesture.state {
case .began:
initialBottomSpace = bottomSheetViewBottomSpace.constant
translateBottomSheet(withVerticalTranslation: translation.y)
case .changed:
translateBottomSheet(withVerticalTranslation: translation.y)
case .cancelled:
setBottomSheetLayout(withBottomSpace: initialBottomSpace)
case .ended:
translateBottomSheetAtEndOfPan(withVerticalTranslation: translation.y)
setImageBasedOnBottomViewState()
initialBottomSpace = 0.0
default:
break
}
}
/**
This method sets bottom sheet translation while pan gesture state is continuously changing.
*/
private func translateBottomSheet(withVerticalTranslation verticalTranslation: CGFloat) {
let bottomSpace = initialBottomSpace - verticalTranslation
guard bottomSpace <= 0.0 && bottomSpace >= inferenceViewController!.collapsedHeight - bottomSheetView.bounds.size.height else {
return
}
setBottomSheetLayout(withBottomSpace: bottomSpace)
}
/**
This method changes bottom sheet state to either fully expanded or closed at the end of pan.
*/
private func translateBottomSheetAtEndOfPan(withVerticalTranslation verticalTranslation: CGFloat) {
// Changes bottom sheet state to either fully open or closed at the end of pan.
let bottomSpace = bottomSpaceAtEndOfPan(withVerticalTranslation: verticalTranslation)
setBottomSheetLayout(withBottomSpace: bottomSpace)
}
/**
Return the final state of the bottom sheet view (whether fully collapsed or expanded) that is to be retained.
*/
private func bottomSpaceAtEndOfPan(withVerticalTranslation verticalTranslation: CGFloat) -> CGFloat {
// Calculates whether to fully expand or collapse bottom sheet when pan gesture ends.
var bottomSpace = initialBottomSpace - verticalTranslation
var height: CGFloat = 0.0
if initialBottomSpace == 0.0 {
height = bottomSheetView.bounds.size.height
}
else {
height = inferenceViewController!.collapsedHeight
}
let currentHeight = bottomSheetView.bounds.size.height + bottomSpace
if currentHeight - height <= collapseTransitionThreshold {
bottomSpace = inferenceViewController!.collapsedHeight - bottomSheetView.bounds.size.height
}
else if currentHeight - height >= expandThransitionThreshold {
bottomSpace = 0.0
}
else {
bottomSpace = initialBottomSpace
}
return bottomSpace
}
/**
This method layouts the change of the bottom space of bottom sheet with respect to the view managed by this controller.
*/
func setBottomSheetLayout(withBottomSpace bottomSpace: CGFloat) {
view.setNeedsLayout()
bottomSheetViewBottomSpace.constant = bottomSpace
view.setNeedsLayout()
}
}
CameraFeedManager:
// Copyright 2019 The TensorFlow Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import UIKit
import AVFoundation
// MARK: CameraFeedManagerDelegate Declaration
protocol CameraFeedManagerDelegate: AnyObject {
/**
This method delivers the pixel buffer of the current frame seen by the device's camera.
*/
func didOutput(pixelBuffer: CVPixelBuffer)
/**
This method initimates that the camera permissions have been denied.
*/
func presentCameraPermissionsDeniedAlert()
/**
This method initimates that there was an error in video configurtion.
*/
func presentVideoConfigurationErrorAlert()
/**
This method initimates that a session runtime error occured.
*/
func sessionRunTimeErrorOccured()
/**
This method initimates that the session was interrupted.
*/
func sessionWasInterrupted(canResumeManually resumeManually: Bool)
/**
This method initimates that the session interruption has ended.
*/
func sessionInterruptionEnded()
}
/**
This enum holds the state of the camera initialization.
*/
enum CameraConfiguration {
case success
case failed
case permissionDenied
}
/**
This class manages all camera related functionality
*/
class CameraFeedManager: NSObject, AVCaptureFileOutputRecordingDelegate {
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) { // << --- Mine
print("Video recorded to: " + outputFileURL.absoluteString)
}
// MARK: Camera Related Instance Variables
private let session: AVCaptureSession = AVCaptureSession()
private let previewView: PreviewView
private let sessionQueue = DispatchQueue(label: "sessionQueue")
private var cameraConfiguration: CameraConfiguration = .failed
private lazy var videoDataOutput = AVCaptureVideoDataOutput()
private var movieDataOutput = AVCaptureMovieFileOutput() // << --- Mine
private var isSessionRunning = false
// MARK: CameraFeedManagerDelegate
weak var delegate: CameraFeedManagerDelegate?
// MARK: Initializer
init(previewView: PreviewView) {
self.previewView = previewView
super.init()
// Initializes the session
session.sessionPreset = .high
self.previewView.session = session
self.previewView.previewLayer.connection?.videoOrientation = .portrait
self.previewView.previewLayer.videoGravity = .resizeAspectFill
self.attemptToConfigureSession()
}
// MARK: Session Start and End methods
/**
This method starts an AVCaptureSession based on whether the camera configuration was successful.
*/
func checkCameraConfigurationAndStartSession() {
sessionQueue.async {
switch self.cameraConfiguration {
case .success:
self.addObservers()
self.startSession()
case .failed:
DispatchQueue.main.async {
self.delegate?.presentVideoConfigurationErrorAlert()
}
case .permissionDenied:
DispatchQueue.main.async {
self.delegate?.presentCameraPermissionsDeniedAlert()
}
}
}
}
/**
This method stops a running an AVCaptureSession.
*/
func stopSession() {
self.removeObservers()
sessionQueue.async {
if self.session.isRunning {
self.session.stopRunning()
self.isSessionRunning = self.session.isRunning
}
}
}
/**
This method resumes an interrupted AVCaptureSession.
*/
func resumeInterruptedSession(withCompletion completion: #escaping (Bool) -> ()) {
sessionQueue.async {
self.startSession()
DispatchQueue.main.async {
completion(self.isSessionRunning)
}
}
}
/**
This method starts the AVCaptureSession
**/
private func startSession() {
self.session.startRunning()
self.isSessionRunning = self.session.isRunning
}
// MARK: Session Configuration Methods.
/**
This method requests for camera permissions and handles the configuration of the session and stores the result of configuration.
*/
private func attemptToConfigureSession() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
self.cameraConfiguration = .success
case .notDetermined:
self.sessionQueue.suspend()
self.requestCameraAccess(completion: { (granted) in
self.sessionQueue.resume()
})
case .denied:
self.cameraConfiguration = .permissionDenied
default:
break
}
self.sessionQueue.async {
self.configureSession()
}
}
/**
This method requests for camera permissions.
*/
private func requestCameraAccess(completion: #escaping (Bool) -> ()) {
AVCaptureDevice.requestAccess(for: .video) { (granted) in
if !granted {
self.cameraConfiguration = .permissionDenied
}
else {
self.cameraConfiguration = .success
}
completion(granted)
}
}
/**
This method handles all the steps to configure an AVCaptureSession.
*/
private func configureSession() {
guard cameraConfiguration == .success else {
return
}
session.beginConfiguration()
// Tries to add an AVCaptureDeviceInput.
guard addVideoDeviceInput() == true else {
self.session.commitConfiguration()
self.cameraConfiguration = .failed
return
}
// Tries to add an AVCaptureVideoDataOutput.
guard addVideoDataOutput() else {
self.session.commitConfiguration()
self.cameraConfiguration = .failed
return
}
session.commitConfiguration()
self.cameraConfiguration = .success
}
func startRecording() {. // << --- Mine
self.session.addOutput(movieDataOutput)
guard let homeDirectory = FileManager.default.urls(for: .desktopDirectory, in: .userDomainMask).first else { return }
let url = URL(fileURLWithPath: homeDirectory.absoluteString + "/mymovie.mov")
movieDataOutput.startRecording(to: url , recordingDelegate: self)
}
func stopRecording() { // <<< -- Mine
self.movieDataOutput.stopRecording()
self.session.removeOutput(movieDataOutput)
}
/**
This method tries to an AVCaptureDeviceInput to the current AVCaptureSession.
*/
private func addVideoDeviceInput() -> Bool {
/**Tries to get the default back camera.
*/
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back) else {
return false
}
do {
let videoDeviceInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(videoDeviceInput) {
session.addInput(videoDeviceInput)
return true
}
else {
return false
}
}
catch {
fatalError("Cannot create video device input")
}
}
/**
This method tries to an AVCaptureVideoDataOutput to the current AVCaptureSession.
*/
private func addVideoDataOutput() -> Bool {
let sampleBufferQueue = DispatchQueue(label: "sampleBufferQueue")
videoDataOutput.setSampleBufferDelegate(self, queue: sampleBufferQueue)
videoDataOutput.alwaysDiscardsLateVideoFrames = true
videoDataOutput.videoSettings = [ String(kCVPixelBufferPixelFormatTypeKey) : kCMPixelFormat_32BGRA]
if session.canAddOutput(videoDataOutput) {
session.addOutput(videoDataOutput)
videoDataOutput.connection(with: .video)?.videoOrientation = .portrait
return true
}
return false
}
// MARK: Notification Observer Handling
private func addObservers() {
NotificationCenter.default.addObserver(self, selector: #selector(CameraFeedManager.sessionRuntimeErrorOccured(notification:)), name: NSNotification.Name.AVCaptureSessionRuntimeError, object: session)
NotificationCenter.default.addObserver(self, selector: #selector(CameraFeedManager.sessionWasInterrupted(notification:)), name: NSNotification.Name.AVCaptureSessionWasInterrupted, object: session)
NotificationCenter.default.addObserver(self, selector: #selector(CameraFeedManager.sessionInterruptionEnded), name: NSNotification.Name.AVCaptureSessionInterruptionEnded, object: session)
}
private func removeObservers() {
NotificationCenter.default.removeObserver(self, name: NSNotification.Name.AVCaptureSessionRuntimeError, object: session)
NotificationCenter.default.removeObserver(self, name: NSNotification.Name.AVCaptureSessionWasInterrupted, object: session)
NotificationCenter.default.removeObserver(self, name: NSNotification.Name.AVCaptureSessionInterruptionEnded, object: session)
}
// MARK: Notification Observers
#objc func sessionWasInterrupted(notification: Notification) {
if let userInfoValue = notification.userInfo?[AVCaptureSessionInterruptionReasonKey] as AnyObject?,
let reasonIntegerValue = userInfoValue.integerValue,
let reason = AVCaptureSession.InterruptionReason(rawValue: reasonIntegerValue) {
print("Capture session was interrupted with reason \(reason)")
var canResumeManually = false
if reason == .videoDeviceInUseByAnotherClient {
canResumeManually = true
} else if reason == .videoDeviceNotAvailableWithMultipleForegroundApps {
canResumeManually = false
}
self.delegate?.sessionWasInterrupted(canResumeManually: canResumeManually)
}
}
#objc func sessionInterruptionEnded(notification: Notification) {
self.delegate?.sessionInterruptionEnded()
}
#objc func sessionRuntimeErrorOccured(notification: Notification) {
guard let error = notification.userInfo?[AVCaptureSessionErrorKey] as? AVError else {
return
}
print("Capture session runtime error: \(error)")
if error.code == .mediaServicesWereReset {
sessionQueue.async {
if self.isSessionRunning {
self.startSession()
} else {
DispatchQueue.main.async {
self.delegate?.sessionRunTimeErrorOccured()
}
}
}
} else {
self.delegate?.sessionRunTimeErrorOccured()
}
}
}
/**
AVCaptureVideoDataOutputSampleBufferDelegate
*/
extension CameraFeedManager: AVCaptureVideoDataOutputSampleBufferDelegate {
/** This method delegates the CVPixelBuffer of the frame seen by the camera currently.
*/
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Converts the CMSampleBuffer to a CVPixelBuffer.
let pixelBuffer: CVPixelBuffer? = CMSampleBufferGetImageBuffer(sampleBuffer)
guard let imagePixelBuffer = pixelBuffer else {
return
}
// Delegates the pixel buffer to the ViewController.
delegate?.didOutput(pixelBuffer: imagePixelBuffer)
}
}
PlayerController:
import Foundation
import UIKit
import AVFoundation
import AVKit
class PlayerController : UIViewController {
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
guard let homeDirectory = FileManager.default.urls(for: .desktopDirectory, in: .userDomainMask).first else { return }
let url = URL(fileURLWithPath: homeDirectory.absoluteString + "/mymovie.mov")
print(url.absoluteString)
let player = AVPlayer(url: url) // video path coming from above function
let playerViewController = AVPlayerViewController()
playerViewController.player = player
self.present(playerViewController, animated: true) {
playerViewController.player!.play()
}
}
}

The solution was to create the path using:
private func documentDirectory() -> String {
let documentDirectory = NSSearchPathForDirectoriesInDomains(.documentDirectory,
.userDomainMask,
true)
return documentDirectory[0]
}
private func append(toPath path: String,
withPathComponent pathComponent: String) -> String? {
if var pathURL = URL(string: path) {
pathURL.appendPathComponent(pathComponent)
return pathURL.absoluteString
}
return nil
}
and
guard let path = append(toPath: documentDirectory(), withPathComponent: "movie_test.mov") else {return}

Related

How to use SCNView as a SubView of ARView?

I'm using ARKit-CoreLocation library to present POIs in AR World, in iOS 14. But the thing is, I am not able to use ARCL's SceneLocationView because it is SCNView. So when I add it as a subview, it overlaps my ARView contents and creates a new ARSession, leaving my ARView in the background.
Code:
extension RealityKitViewController {
typealias Context = UIViewControllerRepresentableContext<RealityKitViewControllerRepresentable>
}
class RealityKitViewController: UIViewController {
let sceneLocationView = SceneLocationView()
let arView = ARView(frame: .zero)
let context : Context
let pins: [Pin]
var currentLocation : CLLocation? {
return sceneLocationView.sceneLocationManager.currentLocation
}
init (_ context : Context, pins: [Pin]) {
self.context = context
self.pins = pins
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func makeArView()
{
// Start AR session
let session = arView.session
let config = ARWorldTrackingConfiguration()
config.planeDetection = [.horizontal, .vertical]
session.run(config)
// Add coaching overlay
let coachingOverlay = ARCoachingOverlayView()
coachingOverlay.session = session
coachingOverlay.goal = .horizontalPlane
coachingOverlay.delegate = context.coordinator
arView.addSubview(coachingOverlay)
arView.debugOptions = [.showFeaturePoints, .showAnchorOrigins, .showAnchorGeometry]
// Handle ARSession events via delegate
context.coordinator.view = arView
session.delegate = context.coordinator
}
override func viewDidLoad() {
super.viewDidLoad()
// probably problem here
sceneLocationView.frame = view.bounds
arView.frame = sceneLocationView.bounds
sceneLocationView.addSubview(arView)
view.addSubview(sceneLocationView)
addPins()
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
sceneLocationView.frame = view.bounds
}
override func viewWillAppear(_ animated: Bool) {
DispatchQueue.main.asyncAfter(deadline: .now() + 0.2) {
super.viewWillAppear(animated)
self.sceneLocationView.run()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
sceneLocationView.pause()
}
func addPins() {
guard let currentLocation = currentLocation, currentLocation.horizontalAccuracy < 16 else {
return DispatchQueue.main.asyncAfter(deadline: .now() + 0.5) { [weak self] in
self?.addPins()
}
}
self.pins.forEach { pin in
guard pin.isLocation else { return }
guard let location = pin.location else { return assertionFailure() }
guard let image = UIImage(named: pin.image) else { return assertionFailure() }
let node = LocationAnnotationNode(location : location, image: image)
node.scaleRelativeToDistance = true
sceneLocationView.addLocationNodeWithConfirmedLocation(locationNode: node)
}
}
}
// if you want to test it, you can try to place these pins to a location where you can easily get coordinates from Google Earth.
struct RealityKitViewControllerRepresentable : UIViewControllerRepresentable {
let pins = [Pin(image: "test", location: CLLocation(coordinate: CLLocationCoordinate2D(latitude: 0.03275742958, longitude: 0.32827424), altitude: 772.1489524841309), isLocation: true)]
#Binding var arActivate : Bool
func makeUIViewController(context: Context) -> RealityKitViewController {
let viewController = RealityKitViewController(context, pins: pins)
return viewController
}
func updateUIViewController(_ uiViewController: RealityKitViewController, context: Context) {
}
func makeCoordinator() -> Coordinator {
Coordinator()
}
}
class Coordinator: NSObject, ARSessionDelegate {
weak var view: ARView?
}

how to live a live application screen having camera view with some other UIViews over Camera view

Actually, I want to broadcast a live match with some overlays over it like sponsors images on top corners of the screen and a score card on the bottom of the screen. Can someone help me or guide me on a way of implementation I use this pod (haishinkit) but this pod is not serving the purpose. I use rtmpstream.attachScreen function for broadcasting my UIView but this function is not picking up my camera view (AVCaptureVideoPreviewLayer) other than this scorecard and sponsor images are broadcasting. I want to broadcast my Camera Screen along with Scorecard, other images along with the audio.
import UIKit
import HaishinKit
import AVFoundation
import VideoToolbox
import Loaf
import WebKit
class BroadcastViewController: UIViewController, RTMPStreamDelegate {
// Camera Preview View
#IBOutlet private weak var previewView: UIView!
#IBOutlet weak var videoView: UIView!
// Camera Selector
#IBOutlet weak var cameraSelector: UISegmentedControl!
#IBOutlet weak var webview: WKWebView!
// Go Live Button
#IBOutlet weak var startStopButton: UIButton!
// FPS and Bitrate Labels
#IBOutlet weak var fpsLabel: UILabel!
#IBOutlet weak var bitrateLabel: UILabel!
// RTMP Connection & RTMP Stream
private var rtmpConnection = RTMPConnection()
private var rtmpStream: RTMPStream!
// Default Camera
private var defaultCamera: AVCaptureDevice.Position = .back
// Flag indicates if we should be attempting to go live
private var liveDesired = false
// Reconnect attempt tracker
private var reconnectAttempt = 0
// The RTMP Stream key to broadcast to.
public var streamKey: String!
// The Preset to use
public var preset: Preset!
// A tracker of the last time we changed the bitrate in ABR
private var lastBwChange = 0
// The RTMP endpoint
let rtmpEndpoint = "rtmps://live-api-s.facebook.com:443/rtmp/"
//Camera Capture requiered properties
var videoDataOutput: AVCaptureVideoDataOutput!
var videoDataOutputQueue: DispatchQueue!
var previewLayer:AVCaptureVideoPreviewLayer!
var captureDevice : AVCaptureDevice!
let session = AVCaptureSession()
var isPublic = false
// Some basic presets for live streaming
enum Preset {
case hd_1080p_30fps_5mbps
case hd_720p_30fps_3mbps
case sd_540p_30fps_2mbps
case sd_360p_30fps_1mbps
}
// An encoding profile - width, height, framerate, video bitrate
private class Profile {
public var width : Int = 0
public var height : Int = 0
public var frameRate : Int = 0
public var bitrate : Int = 0
init(width: Int, height: Int, frameRate: Int, bitrate: Int) {
self.width = width
self.height = height
self.frameRate = frameRate
self.bitrate = bitrate
}
}
// Converts a Preset to a Profile
private func presetToProfile(preset: Preset) -> Profile {
switch preset {
case .hd_1080p_30fps_5mbps:
return Profile(width: 1920, height: 1080, frameRate: 30, bitrate: 5000000)
case .hd_720p_30fps_3mbps:
return Profile(width: 1280, height: 720, frameRate: 30, bitrate: 3000000)
case .sd_540p_30fps_2mbps:
return Profile(width: 960, height: 540, frameRate: 30, bitrate: 2000000)
case .sd_360p_30fps_1mbps:
return Profile(width: 640, height: 360, frameRate: 30, bitrate: 1000000)
}
}
// Configures the live stream
private func configureStream(preset: Preset) {
let profile = presetToProfile(preset: preset)
// Configure the capture settings from the camera
rtmpStream.captureSettings = [
.sessionPreset: AVCaptureSession.Preset.hd1920x1080,
.continuousAutofocus: true,
.continuousExposure: true,
.fps: profile.frameRate
]
// Get the orientation of the app, and set the video orientation appropriately
if #available(iOS 13.0, *) {
if let orientation = UIApplication.shared.windows.first?.windowScene?.interfaceOrientation {
// let videoOrientation = DeviceUtil.videoOrientation(by: orientation)
rtmpStream.orientation = .landscapeRight
rtmpStream.videoSettings = [
.width: (orientation.isPortrait) ? profile.height : profile.width,
.height: (orientation.isPortrait) ? profile.width : profile.height,
.bitrate: profile.bitrate,
.profileLevel: kVTProfileLevel_H264_Main_AutoLevel,
.maxKeyFrameIntervalDuration: 2, // 2 seconds
]
}
} else {
// Fallback on earlier versions
}
// Configure the RTMP audio stream
// rtmpStream.audioSettings = [
// .bitrate: 128000 // Always use 128kbps
// ]
}
// Publishes the live stream
private func publishStream() {
print("Calling publish()")
rtmpStream.attachScreen(ScreenCaptureSession(viewToCapture: previewView))
rtmpStream.publish("minestreamkey")
DispatchQueue.main.async {
self.startStopButton.setTitle("Stop Streaming!", for: .normal)
}
}
// Triggers and attempt to connect to an RTMP hostname
private func connectRTMP() {
print("Calling connect()")
rtmpConnection.connect(rtmpEndpoint)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// videoView.startSession()
}
override func viewDidLoad() {
super.viewDidLoad()
self.setupAVCapture()
previewView.bringSubviewToFront(webview)
webview.load(NSURLRequest(url: NSURL(string: "https://graphics.crickslab.com/scorecard/0865e840-f147-11eb-95cb-65228ef0512c/Blitzz-vs-Crickslab-Officials-Fri30Jul2021-1201AM-")! as URL) as URLRequest)
print("Broadcast View Controller Init")
print("Stream Key: " + "FB-3940543509404805-0-AbxeU6r48NpFcasH")
// Work out the orientation of the device, and set this on the RTMP Stream
rtmpStream = RTMPStream(connection: rtmpConnection)
// Get the orientation of the app, and set the video orientation appropriately
if #available(iOS 13.0, *) {
if let orientation = UIApplication.shared.windows.first?.windowScene?.interfaceOrientation {
let videoOrientation = DeviceUtil.videoOrientation(by: orientation)
rtmpStream.orientation = videoOrientation!
}
} else {
// Fallback on earlier versions
}
// And a listener for orientation changes
// Note: Changing the orientation once the stream has been started will not change the orientation of the live stream, only the preview.
NotificationCenter.default.addObserver(self, selector: #selector(on(_:)), name: UIDevice.orientationDidChangeNotification, object: nil)
// Configure the encoder profile
configureStream(preset: self.preset)
// Attatch to the default audio device
// rtmpStream.attachAudio(AVCaptureDevice.default(for: .audio)) { error in
// print(error.description)
// }
//
// // Attatch to the default camera
// rtmpStream.attachCamera(DeviceUtil.device(withPosition: defaultCamera)) { error in
// print(error.description)
// }
// Register a tap gesture recogniser so we can use tap to focus
let tap = UITapGestureRecognizer(target: self, action: #selector(self.handleTap(_:)))
previewView.addGestureRecognizer(tap)
previewView.isUserInteractionEnabled = true
// Attatch the preview view
// previewView?.attachStream(rtmpStream)
// Add event listeners for RTMP status changes and IO Errors
rtmpConnection.addEventListener(.rtmpStatus, selector: #selector(rtmpStatusHandler), observer: self)
rtmpConnection.addEventListener(.ioError, selector: #selector(rtmpErrorHandler), observer: self)
rtmpStream.delegate = self
startStopButton.setTitle("Go Live!", for: .normal)
}
// 👉📱 Tap to focus / exposure
#objc func handleTap(_ sender: UITapGestureRecognizer) {
if sender.state == UIGestureRecognizer.State.ended {
let point = sender.location(in: previewView)
let pointOfInterest = CGPoint(x: point.x / previewView.bounds.size.width, y: point.y / previewView.bounds.size.height)
rtmpStream.setPointOfInterest(pointOfInterest, exposure: pointOfInterest)
}
}
// Triggered when the user tries to change camera
#IBAction func changeCameraToggle(_ sender: UISegmentedControl) {
switch cameraSelector.selectedSegmentIndex
{
case 0:
rtmpStream.attachCamera(DeviceUtil.device(withPosition: AVCaptureDevice.Position.back))
case 1:
rtmpStream.attachCamera(DeviceUtil.device(withPosition: AVCaptureDevice.Position.front))
default:
rtmpStream.attachCamera(DeviceUtil.device(withPosition: defaultCamera))
}
}
// Triggered when the user taps the go live button
#IBAction func goLiveButton(_ sender: UIButton) {
print("Go Live Button tapped!")
if !liveDesired {
if rtmpConnection.connected {
// If we're already connected to the RTMP server, wr can just call publish() to start the stream
publishStream()
} else {
// Otherwise, we need to setup the RTMP connection and wait for a callback before we can safely
// call publish() to start the stream
connectRTMP()
}
// Modify application state to streaming
liveDesired = true
startStopButton.setTitle("Connecting...", for: .normal)
} else {
// Unpublish the live stream
rtmpStream.close()
// Modify application state to idle
liveDesired = false
startStopButton.setTitle("Go Live!", for: .normal)
}
}
// Called when the RTMPStream or RTMPConnection changes status
#objc
private func rtmpStatusHandler(_ notification: Notification) {
print("RTMP Status Handler called.")
let e = Event.from(notification)
guard let data: ASObject = e.data as? ASObject, let code: String = data["code"] as? String else {
return
}
// Send a nicely styled notification about the RTMP Status
var loafStyle = Loaf.State.info
switch code {
case RTMPConnection.Code.connectSuccess.rawValue, RTMPStream.Code.publishStart.rawValue, RTMPStream.Code.unpublishSuccess.rawValue:
loafStyle = Loaf.State.success
case RTMPConnection.Code.connectFailed.rawValue:
loafStyle = Loaf.State.error
case RTMPConnection.Code.connectClosed.rawValue:
loafStyle = Loaf.State.warning
default:
break
}
DispatchQueue.main.async {
Loaf("RTMP Status: " + code, state: loafStyle, location: .top, sender: self).show(.short)
}
switch code {
case RTMPConnection.Code.connectSuccess.rawValue:
reconnectAttempt = 0
if liveDesired {
// Publish our stream to our stream key
publishStream()
}
case RTMPConnection.Code.connectFailed.rawValue, RTMPConnection.Code.connectClosed.rawValue:
print("RTMP Connection was not successful.")
// Retry the connection if "live" is still the desired state
if liveDesired {
reconnectAttempt += 1
DispatchQueue.main.async {
self.startStopButton.setTitle("Reconnect attempt " + String(self.reconnectAttempt) + " (Cancel)" , for: .normal)
}
// Retries the RTMP connection every 5 seconds
DispatchQueue.main.asyncAfter(deadline: .now() + 5) {
self.connectRTMP()
}
}
default:
break
}
}
// Called when there's an RTMP Error
#objc
private func rtmpErrorHandler(_ notification: Notification) {
print("RTMP Error Handler called.")
}
// Called when the device changes rotation
#objc
private func on(_ notification: Notification) {
if #available(iOS 13.0, *) {
if let orientation = UIApplication.shared.windows.first?.windowScene?.interfaceOrientation {
let videoOrientation = DeviceUtil.videoOrientation(by: orientation)
rtmpStream.orientation = videoOrientation!
// Do not change the outpur rotation if the stream has already started.
if liveDesired == false {
let profile = presetToProfile(preset: self.preset)
rtmpStream.videoSettings = [
.width: (orientation.isPortrait) ? profile.height : profile.width,
.height: (orientation.isPortrait) ? profile.width : profile.height
]
}
}
} else {
// Fallback on earlier versions
}
}
// Button tapped to return to the configuration screen
#IBAction func closeButton(_ sender: Any) {
self.dismiss(animated: true, completion: nil)
}
// RTMPStreamDelegate callbacks
func rtmpStreamDidClear(_ stream: RTMPStream) {
}
// Statistics callback
func rtmpStream(_ stream: RTMPStream, didStatics connection: RTMPConnection) {
DispatchQueue.main.async {
self.fpsLabel.text = String(stream.currentFPS) + " fps"
self.bitrateLabel.text = String((connection.currentBytesOutPerSecond / 125)) + " kbps"
}
}
// Insufficient bandwidth callback
func rtmpStream(_ stream: RTMPStream, didPublishInsufficientBW connection: RTMPConnection) {
print("ABR: didPublishInsufficientBW")
// If we last changed bandwidth over 10 seconds ago
if (Int(NSDate().timeIntervalSince1970) - lastBwChange) > 5 {
print("ABR: Will try to change bitrate")
// Reduce bitrate by 30% every 10 seconds
let b = Double(stream.videoSettings[.bitrate] as! UInt32) * Double(0.7)
print("ABR: Proposed bandwidth: " + String(b))
stream.videoSettings[.bitrate] = b
lastBwChange = Int(NSDate().timeIntervalSince1970)
DispatchQueue.main.async {
Loaf("Insuffient Bandwidth, changing video bandwidth to: " + String(b), state: Loaf.State.warning, location: .top, sender: self).show(.short)
}
} else {
print("ABR: Still giving grace time for last bandwidth change")
}
}
// Today this example doesn't attempt to increase bandwidth to find a sweet spot.
// An implementation might be to gently increase bandwidth by a few percent, but that's hard without getting into an aggressive cycle.
func rtmpStream(_ stream: RTMPStream, didPublishSufficientBW connection: RTMPConnection) {
}
}
// AVCaptureVideoDataOutputSampleBufferDelegate protocol and related methods
extension BroadcastViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func setupAVCapture(){
session.sessionPreset = AVCaptureSession.Preset.vga640x480
guard let device = AVCaptureDevice
.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera,
for: .video,
position: AVCaptureDevice.Position.back) else {
return
}
captureDevice = device
beginSession()
}
func beginSession(){
var deviceInput: AVCaptureDeviceInput!
do {
deviceInput = try AVCaptureDeviceInput(device: captureDevice)
guard deviceInput != nil else {
print("error: cant get deviceInput")
return
}
if self.session.canAddInput(deviceInput){
self.session.addInput(deviceInput)
}
videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.alwaysDiscardsLateVideoFrames=true
videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue")
videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
if session.canAddOutput(self.videoDataOutput){
session.addOutput(self.videoDataOutput)
}
videoDataOutput.connection(with: .video)?.isEnabled = true
previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect
// let rootLayer :CALayer = self.previewView.layer
self.videoView.layer.masksToBounds=true
previewLayer.frame = videoView.bounds
videoView.layer.addSublayer(self.previewLayer)
session.startRunning()
} catch let error as NSError {
deviceInput = nil
print("error: \(error.localizedDescription)")
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// do stuff here
if let description = CMSampleBufferGetFormatDescription(sampleBuffer) {
let dimensions = CMVideoFormatDescriptionGetDimensions(description)
rtmpStream.videoSettings = [
.width: dimensions.width,
.height: dimensions.height ,
.profileLevel: kVTProfileLevel_H264_Baseline_AutoLevel
]
}
rtmpStream.appendSampleBuffer(sampleBuffer, withType: .video)
}
// clean up AVCapture
func stopCamera(){
session.stopRunning()
}
}
I have found a way to live stream camera view with overlays on it by creating 2 RTMPStream objects, one for attaching the camera and the second one is for attachscreen. following is the code.
import AVFoundation
import HaishinKit
import Photos
import UIKit
import VideoToolbox
import WebKit
final class ExampleRecorderDelegate: DefaultAVRecorderDelegate {
static let `default` = ExampleRecorderDelegate()
override func didFinishWriting(_ recorder: AVRecorder) {
guard let writer: AVAssetWriter = recorder.writer else {
return
}
PHPhotoLibrary.shared().performChanges({() -> Void in
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: writer.outputURL)
}, completionHandler: { _, error -> Void in
do {
try FileManager.default.removeItem(at: writer.outputURL)
} catch {
print(error)
}
})
}
}
final class LiveViewController: UIViewController {
private static let maxRetryCount: Int = 5
#IBOutlet private weak var lfView: MTHKView!
#IBOutlet private weak var currentFPSLabel: UILabel!
#IBOutlet private weak var publishButton: UIButton!
#IBOutlet private weak var pauseButton: UIButton!
#IBOutlet private weak var videoBitrateLabel: UILabel!
#IBOutlet private weak var videoBitrateSlider: UISlider!
#IBOutlet private weak var audioBitrateLabel: UILabel!
#IBOutlet private weak var zoomSlider: UISlider!
#IBOutlet private weak var audioBitrateSlider: UISlider!
#IBOutlet private weak var fpsControl: UISegmentedControl!
#IBOutlet private weak var effectSegmentControl: UISegmentedControl!
#IBOutlet weak var webview: WKWebView!
private var rtmpConnection = RTMPConnection()
private var rtmpStream: RTMPStream!
private var rtmpStreamLayer: RTMPStream!
private var sharedObject: RTMPSharedObject!
private var currentEffect: VideoEffect?
private var currentPosition: AVCaptureDevice.Position = .back
private var retryCount: Int = 0
override func viewDidLoad() {
super.viewDidLoad()
rtmpStream = RTMPStream(connection: rtmpConnection)
rtmpStreamLayer = RTMPStream(connection: rtmpConnection)
if let orientation = DeviceUtil.videoOrientation(by: UIApplication.shared.statusBarOrientation) {
rtmpStream.orientation = orientation
}
rtmpStream.captureSettings = [
.sessionPreset: AVCaptureSession.Preset.hd1280x720,
.continuousAutofocus: true,
.continuousExposure: true
// .preferredVideoStabilizationMode: AVCaptureVideoStabilizationMode.auto
]
rtmpStreamLayer.captureSettings = [
.sessionPreset: AVCaptureSession.Preset.hd1280x720,
.continuousAutofocus: true,
.continuousExposure: true
// .preferredVideoStabilizationMode: AVCaptureVideoStabilizationMode.auto
]
rtmpStream.videoSettings = [
.width: 720,
.height: 1280
]
rtmpStream.mixer.recorder.delegate = ExampleRecorderDelegate.shared
rtmpStreamLayer.videoSettings = [
.width: 720,
.height: 1280
]
rtmpStream.mixer.recorder.delegate = ExampleRecorderDelegate.shared
videoBitrateSlider?.value = Float(RTMPStream.defaultVideoBitrate) / 1000
audioBitrateSlider?.value = Float(RTMPStream.defaultAudioBitrate) / 1000
NotificationCenter.default.addObserver(self, selector: #selector(on(_:)), name: UIDevice.orientationDidChangeNotification, object: nil)
NotificationCenter.default.addObserver(self, selector: #selector(didEnterBackground(_:)), name: UIApplication.didEnterBackgroundNotification, object: nil)
NotificationCenter.default.addObserver(self, selector: #selector(didBecomeActive(_:)), name: UIApplication.didBecomeActiveNotification, object: nil)
}
override func viewWillAppear(_ animated: Bool) {
logger.info("viewWillAppear")
super.viewWillAppear(animated)
rtmpStream.attachAudio(AVCaptureDevice.default(for: .audio)) { error in
logger.warn(error.description)
}
rtmpStream.attachScreen(ScreenCaptureSession(viewToCapture: view))
rtmpStream.attachCamera(DeviceUtil.device(withPosition: currentPosition)) { error in
logger.warn(error.description)
}
rtmpStreamLayer.attachScreen(ScreenCaptureSession(viewToCapture: view))
rtmpStreamLayer.receiveAudio = false
rtmpStream.addObserver(self, forKeyPath: "currentFPS", options: .new, context: nil)
lfView?.attachStream(rtmpStream)
lfView?.attachStream(rtmpStreamLayer)
}
override func viewWillDisappear(_ animated: Bool) {
logger.info("viewWillDisappear")
super.viewWillDisappear(animated)
rtmpStream.removeObserver(self, forKeyPath: "currentFPS")
rtmpStream.close()
rtmpStream.dispose()
}
#IBAction func rotateCamera(_ sender: UIButton) {
logger.info("rotateCamera")
let position: AVCaptureDevice.Position = currentPosition == .back ? .front : .back
rtmpStream.captureSettings[.isVideoMirrored] = position == .front
rtmpStream.attachCamera(DeviceUtil.device(withPosition: position)) { error in
logger.warn(error.description)
}
currentPosition = position
}
#IBAction func toggleTorch(_ sender: UIButton) {
rtmpStream.torch.toggle()
}
#IBAction func on(slider: UISlider) {
if slider == audioBitrateSlider {
audioBitrateLabel?.text = "audio \(Int(slider.value))/kbps"
rtmpStream.audioSettings[.bitrate] = slider.value * 1000
}
if slider == videoBitrateSlider {
videoBitrateLabel?.text = "video \(Int(slider.value))/kbps"
rtmpStream.videoSettings[.bitrate] = slider.value * 1000
}
if slider == zoomSlider {
rtmpStream.setZoomFactor(CGFloat(slider.value), ramping: true, withRate: 5.0)
}
}
#IBAction func on(pause: UIButton) {
rtmpStream.paused.toggle()
}
#IBAction func on(close: UIButton) {
self.dismiss(animated: true, completion: nil)
}
#IBAction func on(publish: UIButton) {
if publish.isSelected {
UIApplication.shared.isIdleTimerDisabled = false
rtmpConnection.close()
rtmpConnection.removeEventListener(.rtmpStatus, selector: #selector(rtmpStatusHandler), observer: self)
rtmpConnection.removeEventListener(.ioError, selector: #selector(rtmpErrorHandler), observer: self)
publish.setTitle("●", for: [])
} else {
UIApplication.shared.isIdleTimerDisabled = true
rtmpConnection.addEventListener(.rtmpStatus, selector: #selector(rtmpStatusHandler), observer: self)
rtmpConnection.addEventListener(.ioError, selector: #selector(rtmpErrorHandler), observer: self)
rtmpConnection.connect(Preference.defaultInstance.uri!)
publish.setTitle("■", for: [])
}
publish.isSelected.toggle()
}
#objc
private func rtmpStatusHandler(_ notification: Notification) {
let e = Event.from(notification)
guard let data: ASObject = e.data as? ASObject, let code: String = data["code"] as? String else {
return
}
logger.info(code)
switch code {
case RTMPConnection.Code.connectSuccess.rawValue:
retryCount = 0
rtmpStream!.publish("yourstreamkey")
DispatchQueue.main.asyncAfter(deadline: .now() + 0.2)
{
self.rtmpStreamLayer!.publish("yourstreamkey")
}
// sharedObject!.connect(rtmpConnection)
case RTMPConnection.Code.connectFailed.rawValue, RTMPConnection.Code.connectClosed.rawValue:
guard retryCount <= LiveViewController.maxRetryCount else {
return
}
Thread.sleep(forTimeInterval: pow(2.0, Double(retryCount)))
rtmpConnection.connect(Preference.defaultInstance.uri!)
retryCount += 1
default:
break
}
}
#objc
private func rtmpErrorHandler(_ notification: Notification) {
logger.error(notification)
rtmpConnection.connect(Preference.defaultInstance.uri!)
}
func tapScreen(_ gesture: UIGestureRecognizer) {
if let gestureView = gesture.view, gesture.state == .ended {
let touchPoint: CGPoint = gesture.location(in: gestureView)
let pointOfInterest = CGPoint(x: touchPoint.x / gestureView.bounds.size.width, y: touchPoint.y / gestureView.bounds.size.height)
print("pointOfInterest: \(pointOfInterest)")
rtmpStream.setPointOfInterest(pointOfInterest, exposure: pointOfInterest)
}
}
#IBAction private func onFPSValueChanged(_ segment: UISegmentedControl) {
switch segment.selectedSegmentIndex {
case 0:
rtmpStream.captureSettings[.fps] = 15.0
case 1:
rtmpStream.captureSettings[.fps] = 30.0
case 2:
rtmpStream.captureSettings[.fps] = 60.0
default:
break
}
}
#IBAction private func onEffectValueChanged(_ segment: UISegmentedControl) {
if let currentEffect: VideoEffect = currentEffect {
_ = rtmpStream.unregisterVideoEffect(currentEffect)
}
switch segment.selectedSegmentIndex {
case 1:
currentEffect = MonochromeEffect()
_ = rtmpStream.registerVideoEffect(currentEffect!)
case 2:
currentEffect = PronamaEffect()
_ = rtmpStream.registerVideoEffect(currentEffect!)
default:
break
}
}
#objc
private func on(_ notification: Notification) {
guard let orientation = DeviceUtil.videoOrientation(by: UIApplication.shared.statusBarOrientation) else {
return
}
rtmpStream.orientation = orientation
}
#objc
private func didEnterBackground(_ notification: Notification) {
// rtmpStream.receiveVideo = false
}
#objc
private func didBecomeActive(_ notification: Notification) {
// rtmpStream.receiveVideo = true
}
override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey: Any]?, context: UnsafeMutableRawPointer?) {
if Thread.isMainThread {
currentFPSLabel?.text = "\(rtmpStream.currentFPS)"
}
}
}
extension LiveViewController : UIWebViewDelegate
{
func webViewDidFinishLoad(_ webView: UIWebView) {
webview.scrollView.zoomScale = 10
}
}

Swift: Instance member cannot be used on type in ARKitVision example

The Apple ARKitVision example has the following declaration in the ViewController.swift file:
// The view controller that displays the status and "restart experience" UI.
private lazy var statusViewController: StatusViewController = {
return children.lazy.compactMap({ $0 as? StatusViewController }).first!
}()
However, if I copy the same views and source files and incorporate them into another test storyboard/project I get the error message "Instance member 'children' cannot be used on type 'StatusViewController'"
So, why does this work on the ARKitVision example but it does not work if I set it up myself from scratch? What else is the ARKitVision example doing to get this working? Thanks 😊
The complete class definition for StatusViewController is:
/*
See LICENSE folder for this sample’s licensing information.
Abstract:
Utility class for showing messages above the AR view.
*/
import Foundation
import ARKit
/**
Displayed at the top of the main interface of the app that allows users to see
the status of the AR experience, as well as the ability to control restarting
the experience altogether.
- Tag: StatusViewController
*/
class StatusViewController: UIViewController {
// MARK: - Types
enum MessageType {
case trackingStateEscalation
case planeEstimation
case contentPlacement
case focusSquare
static var all: [MessageType] = [
.trackingStateEscalation,
.planeEstimation,
.contentPlacement,
.focusSquare
]
}
// MARK: - IBOutlets
#IBOutlet weak private var messagePanel: UIVisualEffectView!
#IBOutlet weak private var messageLabel: UILabel!
#IBOutlet weak private var restartExperienceButton: UIButton!
// MARK: - Properties
/// Trigerred when the "Restart Experience" button is tapped.
var restartExperienceHandler: () -> Void = {}
/// Seconds before the timer message should fade out. Adjust if the app needs longer transient messages.
private let displayDuration: TimeInterval = 6
// Timer for hiding messages.
private var messageHideTimer: Timer?
private var timers: [MessageType: Timer] = [:]
// MARK: - Message Handling
func showMessage(_ text: String, autoHide: Bool = true) {
// Cancel any previous hide timer.
messageHideTimer?.invalidate()
messageLabel.text = text
// Make sure status is showing.
setMessageHidden(false, animated: true)
if autoHide {
messageHideTimer = Timer.scheduledTimer(withTimeInterval: displayDuration, repeats: false, block: { [weak self] _ in
self?.setMessageHidden(true, animated: true)
})
}
}
func scheduleMessage(_ text: String, inSeconds seconds: TimeInterval, messageType: MessageType) {
cancelScheduledMessage(for: messageType)
let timer = Timer.scheduledTimer(withTimeInterval: seconds, repeats: false, block: { [weak self] timer in
self?.showMessage(text)
timer.invalidate()
})
timers[messageType] = timer
}
func cancelScheduledMessage(`for` messageType: MessageType) {
timers[messageType]?.invalidate()
timers[messageType] = nil
}
func cancelAllScheduledMessages() {
for messageType in MessageType.all {
cancelScheduledMessage(for: messageType)
}
}
// MARK: - ARKit
func showTrackingQualityInfo(for trackingState: ARCamera.TrackingState, autoHide: Bool) {
showMessage(trackingState.presentationString, autoHide: autoHide)
}
func escalateFeedback(for trackingState: ARCamera.TrackingState, inSeconds seconds: TimeInterval) {
cancelScheduledMessage(for: .trackingStateEscalation)
let timer = Timer.scheduledTimer(withTimeInterval: seconds, repeats: false, block: { [unowned self] _ in
self.cancelScheduledMessage(for: .trackingStateEscalation)
var message = trackingState.presentationString
if let recommendation = trackingState.recommendation {
message.append(": \(recommendation)")
}
self.showMessage(message, autoHide: false)
})
timers[.trackingStateEscalation] = timer
}
// MARK: - IBActions
#IBAction private func restartExperience(_ sender: UIButton) {
restartExperienceHandler()
}
// MARK: - Panel Visibility
private func setMessageHidden(_ hide: Bool, animated: Bool) {
// The panel starts out hidden, so show it before animating opacity.
messagePanel.isHidden = false
guard animated else {
messagePanel.alpha = hide ? 0 : 1
return
}
UIView.animate(withDuration: 0.2, delay: 0, options: [.beginFromCurrentState], animations: {
self.messagePanel.alpha = hide ? 0 : 1
}, completion: nil)
}
}
extension ARCamera.TrackingState {
var presentationString: String {
switch self {
case .notAvailable:
return "TRACKING UNAVAILABLE"
case .normal:
return "TRACKING NORMAL"
case .limited(.excessiveMotion):
return "TRACKING LIMITED\nExcessive motion"
case .limited(.insufficientFeatures):
return "TRACKING LIMITED\nLow detail"
case .limited(.initializing):
return "Initializing"
case .limited(.relocalizing):
return "Recovering from interruption"
}
}
var recommendation: String? {
switch self {
case .limited(.excessiveMotion):
return "Try slowing down your movement, or reset the session."
case .limited(.insufficientFeatures):
return "Try pointing at a flat surface, or reset the session."
case .limited(.relocalizing):
return "Return to the location where you left off or try resetting the session."
default:
return nil
}
}
}
The definition of the ViewController class is:
/*
See LICENSE folder for this sample’s licensing information.
Abstract:
Main view controller for the ARKitVision sample.
*/
import UIKit
import SpriteKit
import ARKit
import Vision
class ViewController: UIViewController, UIGestureRecognizerDelegate, ARSKViewDelegate, ARSessionDelegate {
#IBOutlet weak var sceneView: ARSKView!
// The view controller that displays the status and "restart experience" UI.
private lazy var statusViewController: StatusViewController = {
return children.lazy.compactMap({ $0 as? StatusViewController }).first!
}()
// MARK: - View controller lifecycle
override func viewDidLoad() {
super.viewDidLoad()
// Configure and present the SpriteKit scene that draws overlay content.
let overlayScene = SKScene()
overlayScene.scaleMode = .aspectFill
sceneView.delegate = self
sceneView.presentScene(overlayScene)
sceneView.session.delegate = self
// Hook up status view controller callback.
statusViewController.restartExperienceHandler = { [unowned self] in
self.restartSession()
}
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Create a session configuration
let configuration = ARWorldTrackingConfiguration()
// Run the view's session
sceneView.session.run(configuration)
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// Pause the view's session
sceneView.session.pause()
}
// MARK: - ARSessionDelegate
// Pass camera frames received from ARKit to Vision (when not already processing one)
/// - Tag: ConsumeARFrames
func session(_ session: ARSession, didUpdate frame: ARFrame) {
// Do not enqueue other buffers for processing while another Vision task is still running.
// The camera stream has only a finite amount of buffers available; holding too many buffers for analysis would starve the camera.
guard currentBuffer == nil, case .normal = frame.camera.trackingState else {
return
}
// Retain the image buffer for Vision processing.
self.currentBuffer = frame.capturedImage
classifyCurrentImage()
}
// MARK: - Vision classification
// Vision classification request and model
/// - Tag: ClassificationRequest
private lazy var classificationRequest: VNCoreMLRequest = {
do {
// Instantiate the model from its generated Swift class.
let model = try VNCoreMLModel(for: Inceptionv3().model)
let request = VNCoreMLRequest(model: model, completionHandler: { [weak self] request, error in
self?.processClassifications(for: request, error: error)
})
// Crop input images to square area at center, matching the way the ML model was trained.
request.imageCropAndScaleOption = .centerCrop
// Use CPU for Vision processing to ensure that there are adequate GPU resources for rendering.
request.usesCPUOnly = true
return request
} catch {
fatalError("Failed to load Vision ML model: \(error)")
}
}()
// The pixel buffer being held for analysis; used to serialize Vision requests.
private var currentBuffer: CVPixelBuffer?
// Queue for dispatching vision classification requests
private let visionQueue = DispatchQueue(label: "com.example.apple-samplecode.ARKitVision.serialVisionQueue")
// Run the Vision+ML classifier on the current image buffer.
/// - Tag: ClassifyCurrentImage
private func classifyCurrentImage() {
// Most computer vision tasks are not rotation agnostic so it is important to pass in the orientation of the image with respect to device.
let orientation = CGImagePropertyOrientation(UIDevice.current.orientation)
let requestHandler = VNImageRequestHandler(cvPixelBuffer: currentBuffer!, orientation: orientation)
visionQueue.async {
do {
// Release the pixel buffer when done, allowing the next buffer to be processed.
defer { self.currentBuffer = nil }
try requestHandler.perform([self.classificationRequest])
} catch {
print("Error: Vision request failed with error \"\(error)\"")
}
}
}
// Classification results
private var identifierString = ""
private var confidence: VNConfidence = 0.0
// Handle completion of the Vision request and choose results to display.
/// - Tag: ProcessClassifications
func processClassifications(for request: VNRequest, error: Error?) {
guard let results = request.results else {
print("Unable to classify image.\n\(error!.localizedDescription)")
return
}
// The `results` will always be `VNClassificationObservation`s, as specified by the Core ML model in this project.
let classifications = results as! [VNClassificationObservation]
// Show a label for the highest-confidence result (but only above a minimum confidence threshold).
if let bestResult = classifications.first(where: { result in result.confidence > 0.5 }),
let label = bestResult.identifier.split(separator: ",").first {
identifierString = String(label)
confidence = bestResult.confidence
} else {
identifierString = ""
confidence = 0
}
DispatchQueue.main.async { [weak self] in
self?.displayClassifierResults()
}
}
// Show the classification results in the UI.
private func displayClassifierResults() {
guard !self.identifierString.isEmpty else {
return // No object was classified.
}
let message = String(format: "Detected \(self.identifierString) with %.2f", self.confidence * 100) + "% confidence"
statusViewController.showMessage(message)
}
// MARK: - Tap gesture handler & ARSKViewDelegate
// Labels for classified objects by ARAnchor UUID
private var anchorLabels = [UUID: String]()
// When the user taps, add an anchor associated with the current classification result.
/// - Tag: PlaceLabelAtLocation
#IBAction func placeLabelAtLocation(sender: UITapGestureRecognizer) {
let hitLocationInView = sender.location(in: sceneView)
let hitTestResults = sceneView.hitTest(hitLocationInView, types: [.featurePoint, .estimatedHorizontalPlane])
if let result = hitTestResults.first {
// Add a new anchor at the tap location.
let anchor = ARAnchor(transform: result.worldTransform)
sceneView.session.add(anchor: anchor)
// Track anchor ID to associate text with the anchor after ARKit creates a corresponding SKNode.
anchorLabels[anchor.identifier] = identifierString
}
}
// When an anchor is added, provide a SpriteKit node for it and set its text to the classification label.
/// - Tag: UpdateARContent
func view(_ view: ARSKView, didAdd node: SKNode, for anchor: ARAnchor) {
guard let labelText = anchorLabels[anchor.identifier] else {
fatalError("missing expected associated label for anchor")
}
let label = TemplateLabelNode(text: labelText)
node.addChild(label)
}
// MARK: - AR Session Handling
func session(_ session: ARSession, cameraDidChangeTrackingState camera: ARCamera) {
statusViewController.showTrackingQualityInfo(for: camera.trackingState, autoHide: true)
switch camera.trackingState {
case .notAvailable, .limited:
statusViewController.escalateFeedback(for: camera.trackingState, inSeconds: 3.0)
case .normal:
statusViewController.cancelScheduledMessage(for: .trackingStateEscalation)
// Unhide content after successful relocalization.
setOverlaysHidden(false)
}
}
func session(_ session: ARSession, didFailWithError error: Error) {
guard error is ARError else { return }
let errorWithInfo = error as NSError
let messages = [
errorWithInfo.localizedDescription,
errorWithInfo.localizedFailureReason,
errorWithInfo.localizedRecoverySuggestion
]
// Filter out optional error messages.
let errorMessage = messages.compactMap({ $0 }).joined(separator: "\n")
DispatchQueue.main.async {
self.displayErrorMessage(title: "The AR session failed.", message: errorMessage)
}
}
func sessionWasInterrupted(_ session: ARSession) {
setOverlaysHidden(true)
}
func sessionShouldAttemptRelocalization(_ session: ARSession) -> Bool {
/*
Allow the session to attempt to resume after an interruption.
This process may not succeed, so the app must be prepared
to reset the session if the relocalizing status continues
for a long time -- see `escalateFeedback` in `StatusViewController`.
*/
return true
}
private func setOverlaysHidden(_ shouldHide: Bool) {
sceneView.scene!.children.forEach { node in
if shouldHide {
// Hide overlay content immediately during relocalization.
node.alpha = 0
} else {
// Fade overlay content in after relocalization succeeds.
node.run(.fadeIn(withDuration: 0.5))
}
}
}
private func restartSession() {
statusViewController.cancelAllScheduledMessages()
statusViewController.showMessage("RESTARTING SESSION")
anchorLabels = [UUID: String]()
let configuration = ARWorldTrackingConfiguration()
sceneView.session.run(configuration, options: [.resetTracking, .removeExistingAnchors])
}
// MARK: - Error handling
private func displayErrorMessage(title: String, message: String) {
// Present an alert informing about the error that has occurred.
let alertController = UIAlertController(title: title, message: message, preferredStyle: .alert)
let restartAction = UIAlertAction(title: "Restart Session", style: .default) { _ in
alertController.dismiss(animated: true, completion: nil)
self.restartSession()
}
alertController.addAction(restartAction)
present(alertController, animated: true, completion: nil)
}
}
What this would indicate is that your class StatusViewController doesn't inherit from UIViewController, as the property of children has been available to a subclass of UIViewController for quite some time.
Are you able to share how you have composed your StatusViewController?

QR Code Scanning not working

So I have just taken over an iOS project as their first in-house dev, where previously this app had been developed by an agency.
One of the features of the app is that it needs to scan QR codes—by the look of the code the previous developers have followed this tutorial on AppCoda to implement the QR scan. Everything looks fine and I can't see anything wrong with the code yet it isn't working.
I also downloaded the completed tutorial project and that worked when I tried the QR scan. I also tried copying and pasting every single line so that it was identical to the working tutorial yet no luck.
I'm tearing my hair out trying to figure out why it isn't working.
Any help is much appreciated!
enum ScanState : Int {
case newDevice = 1
case resetDevice = 2
case replaceDevice = 3
}
class QRScannerViewController: BaseViewController,AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet var scanZoneView: UIView!
#IBOutlet var scannerVIew: UIView!
#IBOutlet var scanInfoLabel: UILabel!
var capturedQR: String? = nil
var captureSession:AVCaptureSession?
var videoPreviewLayer:AVCaptureVideoPreviewLayer?
var qrCodeFrameView:UIView?
let supportedBarCodes = [AVMetadataObject.ObjectType.qr, AVMetadataObject.ObjectType.code128, AVMetadataObject.ObjectType.code39, AVMetadataObject.ObjectType.code93, AVMetadataObject.ObjectType.upce, AVMetadataObject.ObjectType.pdf417, AVMetadataObject.ObjectType.ean13, AVMetadataObject.ObjectType.aztec]
var type = "leg scan"
var device:Device?
var state:ScanState = .newDevice
override func viewDidLoad() {
super.viewDidLoad()
scanInfoLabel.text = "Scan your existing\nleg QR code"
self.navigationController?.navigationBar.dark()
//self.navigationItem.backBarButtonItem = UIBarButtonItem(title: "", style: .Plain, target: nil, action: nil)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
self.navigationController?.setNavigationBarHidden(false, animated: true)
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
#if NOHARDWARE
moveToNextViewController()
#else
initiateCapture()
#endif
}
func initiateCapture() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
var error:NSError?
let input: AnyObject!
do {
input = try AVCaptureDeviceInput(device: captureDevice!) as AVCaptureDeviceInput
} catch let error1 as NSError {
error = error1
input = nil
} catch _ {
input = nil
}
if (error != nil) {
// If any error occurs, simply log the description of it and don't continue any more.
print("\(error?.localizedDescription)")
return
}
// Initialize the captureSession object.
captureSession = AVCaptureSession()
// Set the input device on the capture session.
captureSession?.addInput(input as! AVCaptureInput)
// Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session.
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession?.addOutput(captureMetadataOutput)
// Set delegate and use the default dispatch queue to execute the call back
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = supportedBarCodes
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = scannerVIew.layer.bounds
scannerVIew.layer.addSublayer(videoPreviewLayer!)
// Start video capture.
captureSession?.startRunning()
// Initialize QR Code Frame to highlight the QR code
qrCodeFrameView = UIView()
qrCodeFrameView?.layer.borderColor = UIColor.green.cgColor
qrCodeFrameView?.layer.borderWidth = 2
scannerVIew.addSubview(qrCodeFrameView!)
scannerVIew.bringSubview(toFront: qrCodeFrameView!)
//qrCapturedLabel.text = "No QR code is detected"
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func metadataOutput(captureOutput: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// Check if the metadataObjects array is not nil and it contains at least one object.
if metadataObjects == nil || metadataObjects.count == 0 {
qrCodeFrameView?.frame = CGRect.zero
return
}
// Get the metadata object.
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
// Here we use filter method to check if the type of metadataObj is supported
// Instead of hardcoding the AVMetadataObjectTypeQRCode, we check if the type
// can be found in the array of supported bar codes.
if supportedBarCodes.filter({ $0 == metadataObj.type }).count > 0 {
// If the found metadata is equal to the QR code metadata then update the status label's text and set the bounds
let barCodeObject = videoPreviewLayer?.transformedMetadataObject(for: metadataObj as AVMetadataMachineReadableCodeObject) as! AVMetadataMachineReadableCodeObject
let intersectionRect = barCodeObject.bounds.intersection(self.scanZoneView.frame)
if !intersectionRect.isNull &&
(intersectionRect.size.width * intersectionRect.size.height) > self.scanZoneView.bounds.width * self.scanZoneView.bounds.height / 7 {
qrCodeFrameView?.frame = barCodeObject.bounds
if process(metadataObj.stringValue!) {
captureSession?.stopRunning()
}
}
}
}
#IBAction func didTapCancel(_ sender: AnyObject) {
self.dismiss(animated: true, completion: nil)
}
}
extension QRScannerViewController {
func process(_ scanText : String) -> Bool {
var legCode : String
let codeComponents = scanText.components(separatedBy: ";")
if codeComponents.count > 0 {
legCode = codeComponents[0]
} else {
// Invalid number of parameters seperated by a ;
return false
}
// TODO Validate the LEG to LEG-XXXXX
if legCode.hasPrefix("LEG-") {
let delta: Int64 = 1 * Int64(NSEC_PER_SEC)
let time = DispatchTime.now() + Double(delta) / Double(NSEC_PER_SEC)
DispatchQueue.main.asyncAfter(deadline: time, execute: {
switch self.state {
case .resetDevice:
let realm = try! Realm()
let deviceList = realm.objects(Device.self)
let lc = legCode
self.device = deviceList.filter("legCode = %#", lc).first
if self.device == nil {
// TODO Error message: Device not associated with LEG
let vc = ErrorViewController.createErrorViewController(.DeviceNotFound)
self.present(vc, animated: true, completion: nil)
return
}
self.moveToNextViewController()
default:
self.presentingViewController?.dismiss(animated: true, completion: nil)
}
});
return true
}
return false
}
func moveToNextViewController() {
let inspectionStoryboard = UIStoryboard(name: "Impact", bundle: nil)
if let resetVC = inspectionStoryboard.instantiateViewController(withIdentifier: ImpactDetectionViewController.storyboardID) as? ImpactDetectionViewController {
resetVC.device = device
// TODO Pass the impact type across too when the G2 API is set
self.navigationController?.pushViewController(resetVC, animated: false)
}
}
#IBAction func cancelToVC(_ segue: UIStoryboardSegue) { }
}
EDIT
By not working I mean the delegate for AVCaptureMetadataOutputObjectsDelegate is never being called so it never seems to be detecting a QR code. In the AppCoda tutorial it overlays a green square where it detects the QR code but this never happens when I put that code into this app.
The camera is actually running but QR codes are never deteced.
Turns out the answer was deceptively (and annoyingly) simple! Of course in Swift 4 the delegates have been renamed slightly. To fix all I had to was change
func metadataOutput(captureOutput: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
to
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
You're not connecting your class to AVCaptureMetadataOutput's delegate, that's why the AVCaptureMetadataOutputObjectsDelegate functions are not being invoked. More info: https://developer.apple.com/documentation/avfoundation/avcapturemetadataoutputobjectsdelegate
Try this:
// Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session.
let captureMetadataOutput = AVCaptureMetadataOutput()
captureMetadataOutput.metadataObjectsDelegate = self // assign self to the delegate.
captureSession?.addOutput(captureMetadataOutput)

How to take a picture using the proximity sensor?

I am having trouble getting the device to take an image using the rear view camera when the proximity sensor is enabled. I don't want the camera preview to show up, just want the device to take the photo and present it in the imageView. I have the proximity sensor working, and I am using imagePicker.takePicture() to take the image when the proximity sensor is enabled, but that doesn't seem to work. What is the method/function that I can use to programmatically take the picture without the user input.
This is my code so far:
class ViewController: UIViewController, UINavigationControllerDelegate, UIImagePickerControllerDelegate {
#IBOutlet var imageView: UIImageView!
var imagePicker: UIImagePickerController!
//*The function in question*
func proximityChanged(notification: NSNotification) {
let device = notification.object as? UIDevice
if device?.proximityState == true {
print("\(device) detected!")
If you have troubles capturing photos with UIImagePickerController, I suggest using AVFoundation.
Below is a working example. Photo capture is triggered by the proximity sensor.
You can add a preview if you need it.
import UIKit
import AVFoundation
final class CaptureViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
private static let captureSessionPreset = AVCaptureSessionPresetPhoto
private var captureSession: AVCaptureSession!
private var photoOutput: AVCaptureStillImageOutput!
private var initialized = false
override func viewDidLoad() {
super.viewDidLoad()
initialized = setupCaptureSession()
}
override func viewWillAppear(animated: Bool) {
super.viewWillAppear(animated)
if initialized {
captureSession.startRunning()
UIDevice.currentDevice().proximityMonitoringEnabled = true
NSNotificationCenter.defaultCenter().addObserver(self, selector: #selector(proximityStateDidChange), name: UIDeviceProximityStateDidChangeNotification, object: nil)
}
}
override func viewDidDisappear(animated: Bool) {
super.viewDidDisappear(animated)
if initialized {
NSNotificationCenter.defaultCenter().removeObserver(self, name: UIDeviceProximityStateDidChangeNotification, object: nil)
UIDevice.currentDevice().proximityMonitoringEnabled = false
captureSession.stopRunning()
}
}
dynamic func proximityStateDidChange(notification: NSNotification) {
if UIDevice.currentDevice().proximityState {
captureImage()
}
}
// MARK: - Capture Image
private func captureImage() {
if let c = findConnection() {
photoOutput.captureStillImageAsynchronouslyFromConnection(c) { sampleBuffer, error in
if let jpeg = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer),
let image = UIImage(data: jpeg)
{
dispatch_async(dispatch_get_main_queue()) { [weak self] in
self?.imageView.image = image
}
}
}
}
}
private func findConnection() -> AVCaptureConnection? {
for c in photoOutput.connections {
let c = c as? AVCaptureConnection
for p in c?.inputPorts ?? [] {
if p.mediaType == AVMediaTypeVideo {
return c
}
}
}
return nil
}
// MARK: - Setup Capture Session
private func setupCaptureSession() -> Bool {
captureSession = AVCaptureSession()
if captureSession.canSetSessionPreset(CaptureViewController.captureSessionPreset) {
captureSession.sessionPreset = CaptureViewController.captureSessionPreset
if setupCaptureSessionInput() && setupCaptureSessionOutput() {
return true
}
}
return false
}
private func setupCaptureSessionInput() -> Bool {
if let captureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo),
let captureDeviceInput = try? AVCaptureDeviceInput.init(device: captureDevice)
{
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
return true
}
}
return false
}
private func setupCaptureSessionOutput() -> Bool {
photoOutput = AVCaptureStillImageOutput()
photoOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession.canAddOutput(photoOutput) {
captureSession.addOutput(photoOutput)
return true
}
return false
}
}

Resources