SwiftUI AVCaptureSession Freezes other Views in a VStack or ScrollView - ios

I have a view which contains some few texts, button and camera view inside a scroll view
VStack {
ZStack{
ScrollView{
HStack(alignment: .top) {
LogoMainView()
}
.frame(maxWidth: .infinity, maxHeight: 44, alignment: .leading)
.padding(.horizontal,5)
VStack{
HeaderText(title:"Some header")
ParagraphText(text:"Some random text")
CameraDropDown(
toggleFront:{
self.captureSession.changeOrientation(orientation: .front)
},toggleBack: {
self.captureSession.changeOrientation(orientation: .back)
})
CameraView()
.onAppear(){
self.appDelegate.captureSession.setup()
self.appDelegate.captureSession.start()
}
.onDisappear(){
self.appDelegate.captureSession.stop()
}
.onChange(of: self.appDelegate.captureSession.orientation){
newvalue in
self.appDelegate.captureSession.orientation = newvalue
self.appDelegate.captureSession.setup()
self.appDelegate.captureSession.start()
}
}
.padding(.horizontal,10)
}
}
}
.clipped()
}
Whenever the camera starts on clicking CameraDropDown it should change the orientation but expanding then choosing the desired orientation but when the CameraView() is running i.e (the camera is showing) it just does not respond to touches but when i switch of the CameraView() it the responds to the touchess and orientation changes.
What i have tried is to profile for hangs but i could not get a hang.
here is my cameraclass
//
// CaptureSession.swift
import Foundation
import AVFoundation
import SwiftUI
class CaptureSession: NSObject, ObservableObject {
#Published var sampleBuffer: CMSampleBuffer?
#Published var orientation: AVCaptureDevice.Position = .back
var captureSession: AVCaptureSession?
func setup() {
var allowedAccess = false
let blocker = DispatchGroup()
blocker.enter()
AVCaptureDevice.requestAccess(for: .video) { flag in
allowedAccess = flag
blocker.leave()
}
blocker.wait()
if !allowedAccess { return }
if !allowedAccess {
print("Camera access is not allowed.")
return
}
let session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: orientation)
guard videoDevice != nil, let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!), session.canAddInput(videoDeviceInput) else {
print("Unable to detect camera.")
return
}
session.addInput(videoDeviceInput)
session.commitConfiguration()
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "SampleBuffer"))
if (session.canAddOutput(videoOutput)) {
session.addOutput(videoOutput)
}
self.captureSession = session
}
func start() {
guard let captureSession = self.captureSession else {
return
}
if (!captureSession.isRunning) {
DispatchQueue.global(qos: .default).async {
captureSession.startRunning()
}
}
}
func stop() {
guard let captureSession = self.captureSession else {
return
}
if (captureSession.isRunning) {
captureSession.stopRunning()
}
}
func changeOrientation(
orientation:AVCaptureDevice.Position) {
guard let captureSession = self.captureSession else {
return
}
if(captureSession.isRunning){
DispatchQueue.main.async {
self.orientation = orientation
self.setup()
self.start()
}
}else{
DispatchQueue.main.async {
self.orientation = orientation
self.setup()
self.start()
}
}
}
}
extension CaptureSession: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
DispatchQueue.main.async {
self.sampleBuffer = sampleBuffer
}
}
}
how can I make the camera run and also respond to the touches for the other views that I have or what am I doing wrong here? Please help.

Related

captureOutput from AVCaptureVideoDataOutputSampleBufferDelegate is not being called

I am seeking to leverage the device camera as a light sensor as described in this post. Unfortunately, the captureObject function is never called by the AVCaptureVideoDataOutputSampleBufferDelegate. It may be relevant that I am attempting this inside of a SwiftUI app, I have not seen this problem posted about or resolved in the context of a SwiftUI app.
class VideoStream: NSObject, ObservableObject,
AVCaptureVideoDataOutputSampleBufferDelegate {
#Published var luminosityReading : Double = 0.0
var session : AVCaptureSession!
override init() {
super.init()
authorizeCapture()
}
func authorizeCapture() {
// request camera permissions and call beginCapture()
...
}
func beginCapture() {
print("beginCapture entered") // prints
session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = bestDevice() // func def omitted for readability
print("Device: \(videoDevice)") // prints a valid device
guard
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
session.canAddInput(videoDeviceInput)
else {
print("Camera selection failed")
return
}
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label:
"VideoQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
}
// From: https://stackoverflow.com/questions/41921326/how-to-get-light-value-from-
avfoundation/46842115#46842115
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
print("captureOutput entered") // never printed
// Retrieving EXIF data of camara frame buffer
let rawMetadata = CMCopyDictionaryOfAttachments(allocator: nil, target: sampleBuffer, attachmentMode: CMAttachmentMode(kCMAttachmentMode_ShouldPropagate))
let metadata = CFDictionaryCreateMutableCopy(nil, 0, rawMetadata) as NSMutableDictionary
let exifData = metadata.value(forKey: "{Exif}") as? NSMutableDictionary
let FNumber : Double = exifData?["FNumber"] as! Double
let ExposureTime : Double = exifData?["ExposureTime"] as! Double
let ISOSpeedRatingsArray = exifData!["ISOSpeedRatings"] as? NSArray
let ISOSpeedRatings : Double = ISOSpeedRatingsArray![0] as! Double
let CalibrationConstant : Double = 50
//Calculating the luminosity
let luminosity : Double = (CalibrationConstant * FNumber * FNumber ) / ( ExposureTime * ISOSpeedRatings )
luminosityReading = luminosity
}
}
Lastly, I instantiate VideoStream as a StatreObject in my ContentView and attempt to read the updated luminosityReading:
struct ContentView: View {
#StateObject var videoStream = VideoStream()
var body: some View {
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
.padding()
}
}
I have read and implemented the solutions described in these similar posts:
Using AVCaptureVideoDataOutputSampleBufferDelegate without a preview window
captureOutput not being called
captureOutput not being called from delegate
captureOutput not being called by AVCaptureAudioDataOutputSampleBufferDelegate
In Swift, adapted AVCaptureVideoDataOutputSampleBufferDelegate, but captureOutput never getting called
AVCaptureVideoDataOutput captureOutput not being called
Swift - captureOutput is not being executed
Why AVCaptureVideoDataOutputSampleBufferDelegate method is not called
Why captureOutput is never called?
func captureOutput is never called
captureOutput() function is never called swift4
Minimal Reproducible Example:
import SwiftUI
import AVKit
struct ContentView: View {
#StateObject var videoStream = VideoStream()
var body: some View {
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
}
}
class VideoStream: NSObject, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
#Published var luminosityReading : Double = 0.0
var session : AVCaptureSession!
override init() {
super.init()
authorizeCapture()
}
func authorizeCapture() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized: // The user has previously granted access to the camera.
beginCapture()
case .notDetermined: // The user has not yet been asked for camera access.
AVCaptureDevice.requestAccess(for: .video) { granted in
if granted {
self.beginCapture()
}
}
case .denied: // The user has previously denied access.
return
case .restricted: // The user can't grant access due to restrictions.
return
}
}
func beginCapture() {
print("beginCapture entered")
let testDevice = AVCaptureDevice.default(for: .video)
print("Image Capture Device: \(testDevice)")
guard
let videoDeviceInput = try? AVCaptureDeviceInput(device: testDevice!),
session.canAddInput(videoDeviceInput)
else {
print("Camera selection failed")
return
}
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("captureOutput entered") // never printed
// light meter logic to update luminosityReading
}
}
You are missing adding the input
if session.canAddInput(videoDeviceInput){
session.addInput(videoDeviceInput)
}

Why is my view not updating when value is updated using #Published and #ObservedObject?

I am creating an object recognition app that takes frames from the camera and outputs a description of the image into a text view on the screen. I can print the output to the console, but cannot get the view to update with the text using the ObservableObject protocol. I have looked for solutions but none have seemed to work.
Camera View:
struct Analysis {
var description: String = "No description"
var confidence: Double = 0.0
}
final class CameraView: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, ObservableObject {
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
let dataOutput = AVCaptureVideoDataOutput()
var frameCounter = 0
#Published var analysis = Analysis()
override func viewDidLoad() {
super.viewDidLoad()
NotificationCenter.default.addObserver(self, selector: #selector(CameraView.rotated), name: UIDevice.orientationDidChangeNotification, object: nil)
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
captureSession.startRunning()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
frameCounter += 1
if frameCounter == 15 {
frameCounter = 0
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
guard let model = try? VNCoreMLModel(for: Resnet50().model) else {return}
let request = VNCoreMLRequest(model: model) { finishedReq, err in
guard let results = finishedReq.results as? [VNClassificationObservation] else {return}
guard let firstObservation = results.first else {return}
self.analysis = Analysis(description: firstObservation.identifier, confidence: Double(firstObservation.confidence))
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
print(self.analysis)
}
}
extension CameraView: UIViewControllerRepresentable {
public typealias UIViewControllerType = CameraView
func makeUIViewController(context: UIViewControllerRepresentableContext<CameraView>) -> CameraView {
return CameraView()
}
func updateUIViewController(_ uiViewController: CameraView, context: UIViewControllerRepresentableContext<CameraView>) {
}
}
Content View:
struct ContentView: View {
#ObservedObject var camera = CameraView()
var body: some View {
ZStack {
camera
.ignoresSafeArea()
VStack {
Spacer()
ZStack {
Rectangle()
.frame(height: 75)
.background(.regularMaterial)
.cornerRadius(20)
.padding()
Text(camera.analysis.description)
.font(.largeTitle)
}
}
}
}
}
Please let me know if you need more information.

iOS Swift AVFoundation Video Recording AVCaptureMovieFileOutput isRecording value false every time

I'm trying to record and save video using AVFoundation Framework with both front and rear camera. I'm able to start session but unable to save video recording in document directory.
I check movieOutput.isRecording it gives false every time. Hence the delegate output method is also not called due to this. Even Start delegate is not called on start recording.
import UIKit
import Foundation
import AVKit
import AVFoundation
class AppVideoRecorder: NSObject {
private var session = AVCaptureSession()
private var movieOutput = AVCaptureMovieFileOutput()
private var camera: AVCaptureDevice?
private var activeInput: AVCaptureDeviceInput?
private var previewLayer = AVCaptureVideoPreviewLayer()
private var renderView: UIView!
var isFrontCamera: Bool = false
init(for view: UIView) {
self.renderView = view
}
deinit {
print("Called")
}
func setupSession() {
self.session.sessionPreset = .high
// Setup Camera
self.camera = AVCaptureDevice.default(
.builtInWideAngleCamera,
for: .video,
position: self.isFrontCamera ? .front : .back
)
if let camera = self.camera {
do {
let input = try AVCaptureDeviceInput(device: camera)
if self.session.canAddInput(input) {
self.session.addInput(input)
self.activeInput = input
}
} catch {
print(error)
}
}
// Setup Microphone
if let microphone = AVCaptureDevice.default(for: .audio) {
do {
let micInput = try AVCaptureDeviceInput(device: microphone)
if self.session.canAddInput(micInput) {
self.session.addInput(micInput)
}
} catch {
print(error)
}
}
// Movie output
if self.session.canAddOutput(self.movieOutput) {
self.session.addOutput(self.movieOutput)
}
}
func setupPreview() {
// Configure previewLayer
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
self.previewLayer.frame = self.renderView.bounds
self.previewLayer.videoGravity = .resizeAspectFill
self.renderView.layer.addSublayer(self.previewLayer)
}
func startSession() {
if self.session.isRunning { return }
DispatchQueue.main.async {
self.session.startRunning()
}
}
func stopSession() {
if self.session.isRunning {
DispatchQueue.main.async {
self.session.stopRunning()
}
}
}
func removeInput() {
guard let input = self.activeInput else { return }
self.session.removeInput(input)
}
func isCameraOn(completion: #escaping (Bool) -> Void) {
if AVCaptureDevice.authorizationStatus(for: .video) == .authorized {
completion(true)
} else {
AVCaptureDevice.requestAccess(for: .video,
completionHandler: { (granted) in
completion(granted)
})
}
}
func toggleCamera() {
self.session.beginConfiguration()
for input in self.session.inputs {
if let inputObj = input as? AVCaptureDeviceInput {
self.session.removeInput(inputObj)
}
}
self.camera = AVCaptureDevice.default(
.builtInWideAngleCamera,
for: .video,
position: self.isFrontCamera ? .front : .back
)
if let camera = self.camera {
do {
let input = try AVCaptureDeviceInput(device: camera)
if self.session.canAddInput(input) {
self.session.addInput(input)
self.activeInput = input
}
} catch {
print(error)
}
}
self.session.commitConfiguration()
}
}
extension AppVideoRecorder: AVCaptureFileOutputRecordingDelegate {
private var currentVideoOrientation: AVCaptureVideoOrientation {
var orientation: AVCaptureVideoOrientation
switch UIDevice.current.orientation {
case .portrait:
orientation = AVCaptureVideoOrientation.portrait
case .landscapeRight:
orientation = AVCaptureVideoOrientation.landscapeLeft
case .portraitUpsideDown:
orientation = AVCaptureVideoOrientation.portraitUpsideDown
default:
orientation = AVCaptureVideoOrientation.landscapeRight
}
return orientation
}
func recordVideo() {
if self.movieOutput.isRecording { // FALSE EVERY TIME
self.stopRecording()
} else {
self.startRecording()
}
}
private func startRecording() {
guard let connection = self.movieOutput.connection(with: .video),
let device = self.activeInput?.device else { return }
// handle return error
if connection.isVideoOrientationSupported {
connection.videoOrientation = self.currentVideoOrientation
}
if connection.isVideoStabilizationSupported {
connection.preferredVideoStabilizationMode = .auto
}
if device.isSmoothAutoFocusSupported {
do {
try device.lockForConfiguration()
device.isSmoothAutoFocusEnabled = false
device.unlockForConfiguration()
} catch {
print("Error setting configuration: \(error)")
}
}
let paths = FileManager.default.urls(for: .documentDirectory,
in: .userDomainMask)
guard let path = paths.first else { return }
let fileUrl = path.appendingPathComponent("celeb_video.mp4")
try? FileManager.default.removeItem(at: fileUrl)
self.movieOutput.startRecording(to: fileUrl, recordingDelegate: self)
}
private func stopRecording() {
self.movieOutput.stopRecording()
}
func fileOutput(_ output: AVCaptureFileOutput,
didFinishRecordingTo outputFileURL: URL,
from connections: [AVCaptureConnection],
error: Error?) {
print("DELEGATE CALL BACK")
if let error = error {
//do something
print(error)
} else {
//do something
print(outputFileURL.path)
// UISaveVideoAtPathToSavedPhotosAlbum(outputFileURL.path, nil, nil, nil)
}
}
func fileOutput(_ output: AVCaptureFileOutput,
didStartRecordingTo fileURL: URL,
from connections: [AVCaptureConnection]) {
print("didStartRecordingTo CALL BACK:", fileURL.path)
}
}
Here is my calling code in view controller. recordingView is UIView
private lazy var recorder: AppVideoRecorder = {
return AppVideoRecorder(for: self.recordingView)
}()
#IBAction func recordingAction(_ sender: UIButton) {
sender.isSelected.toggle()
if sender.isSelected {
self.recorder.setupSession()
self.recorder.setupPreview()
self.recorder.startSession()
self.recorder.recordVideo()
} else {
self.recorder.recordVideo()
self.recorder.removeInput()
self.recorder.stopSession()
}
}
#IBAction func swapCameraAction(_ sender: UIButton) {
sender.isSelected.toggle()
self.recorder.isFrontCamera = sender.isSelected
self.recorder.toggleCamera()
}
Please let me know what I missed.
As from the link Starting video recording immediately with AVCaptureMovieFileOutput
I have added notifications, now it is working as it takes time to start.
private func setupNotifications() {
NotificationCenter.default.addObserver(self,
selector: #selector(sessionDidStartRunning(_:)),
name: .AVCaptureSessionDidStartRunning,
object: nil)
NotificationCenter.default.addObserver(self,
selector: #selector(sessionDidStopRunning(_:)),
name: .AVCaptureSessionDidStopRunning,
object: nil)
}
#objc
private func sessionDidStartRunning(_ notification: NSNotification) {
self.startRecording()
}
#objc
private func sessionDidStopRunning(_ notification: NSNotification) {
}

How to get camera frame to process using swiftUI?

I'm trying to get frame like VideoCapture in opencv using swiftUI
here is my code which is not working
I expected that "Got a frame!" should print in console when each frame from camera is available
at least, I can see the preview on the screen
How can I solve it or is there an any other idea?
If you have some reference to do same thing, It would be helpful
// For camera streaming
struct DemoVideoStreaming: View {
var body: some View {
VStack {
PreviewHolder()
}.frame(minWidth: 0, maxWidth: .infinity, minHeight: 0, maxHeight: .infinity, alignment: .center)
}
}
struct PreviewHolder: UIViewRepresentable {
func makeUIView(context: UIViewRepresentableContext<PreviewHolder>) -> PreviewView {
PreviewView()
}
func updateUIView(_ uiView: PreviewView, context: UIViewRepresentableContext<PreviewHolder>) {
}
typealias UIViewType = PreviewView
}
class PreviewView: UIView, AVCaptureVideoDataOutputSampleBufferDelegate {
private var captureSession: AVCaptureSession?
init() {
super.init(frame: .zero)
var allowedAccess = false
let blocker = DispatchGroup()
blocker.enter()
AVCaptureDevice.requestAccess(for: .video) { flag in
allowedAccess = flag
blocker.leave()
}
blocker.wait()
if !allowedAccess {
print("!!! NO ACCESS TO CAMERA")
return
}
// setup session
// Step 3. AVCaptureSession
let session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for : .video, position: .front) //alternate AVCaptureDevice.default(for: .video)
guard videoDevice != nil, let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!), session.canAddInput(videoDeviceInput) else {
print("!!! NO CAMERA DETECTED")
return
}
session.addInput(videoDeviceInput)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sample buffer"))
guard session.canAddOutput(videoOutput) else { return }
session.addOutput(videoOutput)
session.commitConfiguration()
self.captureSession = session
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
// wrapping dispatch
override func didMoveToSuperview() {
super.didMoveToSuperview()
if nil != self.superview {
self.videoPreviewLayer.session = self.captureSession
self.videoPreviewLayer.videoGravity = .resizeAspect
self.captureSession?.startRunning()
} else {
self.captureSession?.stopRunning()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Got a frame!")
}
}
that is because you didn't sign the AVCaptureVideoDataOutputSampleBufferDelegate delegate to your code
here is how should it look like
import UIKit
import AVFoundation
final class PreviewView: UIView ,AVCaptureVideoDataOutputSampleBufferDelegate{
private var captureSession: AVCaptureSession?
var imageBufferHandler: ImageBufferHandler?
private var videoConnection: AVCaptureConnection!
init() {
super.init(frame: .zero)
var allowedAccess = false
let blocker = DispatchGroup()
blocker.enter()
AVCaptureDevice.requestAccess(for: .video) { flag in
allowedAccess = flag
blocker.leave()
}
blocker.wait()
if !allowedAccess {
print("!!! NO ACCESS TO CAMERA")
return
}
// setup session
let session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: .unspecified) //alternate AVCaptureDevice.default(for: .video)
guard videoDevice != nil, let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!), session.canAddInput(videoDeviceInput) else {
print("!!! NO CAMERA DETECTED")
return
}
session.addInput(videoDeviceInput)
session.commitConfiguration()
self.captureSession = session
// setup video output
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
let queue = DispatchQueue(label: "com.queue.videosamplequeue")
videoDataOutput.setSampleBufferDelegate(self, queue: queue)
guard captureSession!.canAddOutput(videoDataOutput) else {
fatalError()
}
captureSession!.addOutput(videoDataOutput)
videoConnection = videoDataOutput.connection(with: .video)
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
override func didMoveToSuperview() {
super.didMoveToSuperview()
if nil != self.superview {
self.videoPreviewLayer.session = self.captureSession
self.videoPreviewLayer.videoGravity = .resize
self.captureSession?.startRunning()
}
else {
self.captureSession?.stopRunning()
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("Got a frame!")
if connection.videoOrientation != .portrait {
connection.videoOrientation = .portrait
return
}
if let imageBufferHandler = imageBufferHandler
{
imageBufferHandler(sampleBuffer)
}
}}
don't forget to add the image buffer handler
typealias ImageBufferHandler = ((_ imageBuffer: CMSampleBuffer) -> ())

No Video Output and [MC] Reading from public effective user settings. Error in Swift/iOS 11

Using Xcode 9 Beta for iOS 11:
I've followed a walkthrough on how to extract frames from an AVCaptureSession, but have not been able to get the capture to appear. While I have included the camera permissions in the info.plist file, the app seems to stall after opening and I get the following errors:
[App Name] does not have sandbox access for frZQaeyWLUvLjeuEK43hmg and IS NOT appropriately entitled
[MC] System group container for systemgroup.com.apple.configurationprofiles path is /private/var/containers/Shared/SystemGroup/systemgroup.com.apple.configurationprofiles
[MC] Reading from public effective user settings.
Here is the code for FrameExtractor.swift for reference:
import UIKit
import AVFoundation
protocol FrameExtractorDelegate: class {
func captured(image: UIImage)
}
class FrameExtractor: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
private let position = AVCaptureDevice.Position.front
private let quality = AVCaptureSession.Preset.medium
private var permissionGranted = false
private let sessionQueue = DispatchQueue(label: "session queue")
private let captureSession = AVCaptureSession()
private let context = CIContext()
weak var delegate: FrameExtractorDelegate?
override init() {
super.init()
checkPermission()
sessionQueue.async { [unowned self] in
self.configureSession()
self.captureSession.startRunning()
}
}
// MARK: AVSession configuration
private func checkPermission() {
switch AVCaptureDevice.authorizationStatus(for: AVMediaType.video) {
case .authorized:
permissionGranted = true
case .notDetermined:
requestPermission()
default:
permissionGranted = false
}
}
private func requestPermission() {
sessionQueue.suspend()
AVCaptureDevice.requestAccess(for: AVMediaType.video) { [unowned self] granted in
self.permissionGranted = granted
self.sessionQueue.resume()
}
}
private func configureSession() {
guard permissionGranted else { return }
captureSession.sessionPreset = quality
guard let captureDevice = selectCaptureDevice() else { return }
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else { return }
guard captureSession.canAddInput(captureDeviceInput) else { return }
captureSession.addInput(captureDeviceInput)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sample buffer"))
guard captureSession.canAddOutput(videoOutput) else { return }
captureSession.addOutput(videoOutput)
guard let connection = videoOutput.connection(with: AVFoundation.AVMediaType.video) else { return }
guard connection.isVideoOrientationSupported else { return }
guard connection.isVideoMirroringSupported else { return }
connection.videoOrientation = .portrait
connection.isVideoMirrored = position == .front
}
private func selectCaptureDevice() -> AVCaptureDevice? {
return AVCaptureDevice.default(for: AVMediaType.video)
// return AVCaptureDevice.devices().filter {
// ($0 as AnyObject).hasMediaType(AVMediaType.video) &&
// ($0 as AnyObject).position == position
// }.first
}
// MARK: Sample buffer to UIImage conversion
private func imageFromSampleBuffer(sampleBuffer: CMSampleBuffer) -> UIImage? {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return nil }
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return nil }
return UIImage(cgImage: cgImage)
}
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Got a Frame!")
guard let uiImage = imageFromSampleBuffer(sampleBuffer: sampleBuffer) else { return }
DispatchQueue.main.async { [unowned self] in
self.delegate?.captured(image: uiImage)
}
}
}
And for ViewController.swift:
import UIKit
class ViewController: UIViewController, FrameExtractorDelegate{
#IBOutlet var imageView: UIImageView!
var frameExtractor: FrameExtractor!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
frameExtractor = FrameExtractor()
frameExtractor.delegate = self
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func captured(image: UIImage) {
imageView.image = image
}
}`
The issue is in a different function call in captureOutput. This is the new function call in iOS 11 for captureOutput in AVCaptureVideoDataOutputSampleBufferDelegate:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let uiImage = imageFromSampleBuffer(sampleBuffer: sampleBuffer) else { return }
DispatchQueue.main.async { [unowned self] in
self.delegate?.captured(image: uiImage)
}
}
Notice the change between "didOutput sampleBuffer:" and "didOutputSampleBuffer sampleBuffer:"

Resources