How to integrate AVFoundation in a flutter project? - ios

I am trying to integrate AVFoundation in my flutter project to scan QR code. I have followed the docs and wrote the following code
class CealScanQrView extends StatelessWidget {
const CealScanQrView({super.key});
#override
Widget build(BuildContext context) {
final Map<String, dynamic> creationParams = <String, dynamic>{};
return Platform.isAndroid
? AndroidView(
viewType: cealScanQrView,
layoutDirection: TextDirection.ltr,
creationParams: creationParams,
creationParamsCodec: const StandardMessageCodec(),
)
: UiKitView(
viewType: cealScanQrView,
layoutDirection: TextDirection.ltr,
creationParams: creationParams,
creationParamsCodec: const StandardMessageCodec(),
);
}
}
I have created CealScanViewNativeViewFactory, CealScanViewNativeView and registered in my AppDelegate using below code
weak var registrar = self.registrar(forPlugin: "ceal-views")
let cealQrViewfactory = CealQrViewNativeViewFactory(messenger: registrar!.messenger())
let viewRegistrar = self.registrar(forPlugin: "<ceal-views>")!
viewRegistrar.register(
cealQrViewfactory,
withId: "cealQrView")
Below is my CealScanViewNativeView code
import Foundation
import UIKit
import AVFoundation
class CealScanViewNativeView: NSObject, FlutterPlatformView,AVCaptureMetadataOutputObjectsDelegate {
private var _view: UIView
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
init(
frame: CGRect,
viewIdentifier viewId: Int64,
arguments args: Any?,
binaryMessenger messenger: FlutterBinaryMessenger?
) {
_view = UIView()
super.init()
setUpView()
}
func view() -> UIView {
return _view
}
private func setUpView(){
_view.backgroundColor = UIColor.clear
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr]
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = _view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
_view.layer.addSublayer(previewLayer)
self.captureSession.startRunning()
}
func failed() {
let ac = UIAlertController(title: "Scanning not supported", message: "Device does not support scanning", preferredStyle: .alert)
ac.addAction(UIAlertAction(title: "Ok", style: .default))
UIApplication.shared.connectedScenes.flatMap { ($0 as? UIWindowScene)?.windows ?? [] }
.first { $0.isKeyWindow }?.rootViewController?.present(ac, animated: true)
captureSession = nil
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
}
UIApplication.shared.connectedScenes.flatMap { ($0 as? UIWindowScene)?.windows ?? [] }
.first { $0.isKeyWindow }?.rootViewController?.dismiss(animated: true)
}
func found(code: String) {
debugPrint("Code is \(code)")
}
}
I have given the camera permission as well but as soon as I open the view, I don't see the camera. I tried changing the _view.backgroundColor to red and it is visible so the view is setup correctly but I don't get the camera. In my Xcode logs I see below warning
Thread Performance Checker: -[AVCaptureSession startRunning] should be called from background thread. Calling it on the main thread can lead to UI unresponsiveness
PID: 1107, TID: 125465

I don't see where you are adding the view holding the preview layer to the hierarchy. Make sure you add it too.
There are a couple of things that you can do to make this work.
Session configuration takes time and it needs to be done on a separate serial queue, like so:
private let sessionQueue = DispatchQueue(label: "session queue")
...
sessionQueue.async {
self.setUpView()
}
Make variables class properties, like so:
private let metadataOutput = AVCaptureMetadataOutput()
private let metadataObjectsQueue = DispatchQueue(label: "metadata objects queue", attributes: [], target: nil)
Process output on a separate queue
metadataOutput.setMetadataObjectsDelegate(self, queue: metadataObjectsQueue)
metadataOutput.metadataObjectTypes = metadataOutput.availableMetadataObjectTypes
Start session on the session queue
sessionQueue.async {
if self.isSessionRunning {
self.session.startRunning()
self.isSessionRunning = self.session.isRunning
}
}
You can find complete sample at https://developer.apple.com/documentation/avfoundation/capture_setup/avcambarcode_detecting_barcodes_and_faces

If the purpose is scanning qr code, why are you not using
https://pub.dev/packages/qr_code_scanner

Instead of using
previewLayer.frame = _view.layer.bounds
use
previewLayer.frame = CGRect(x: 0, y: 0,width: 200,height: 200)
Change width and height according to your needs

Related

Why is my view not updating when value is updated using #Published and #ObservedObject?

I am creating an object recognition app that takes frames from the camera and outputs a description of the image into a text view on the screen. I can print the output to the console, but cannot get the view to update with the text using the ObservableObject protocol. I have looked for solutions but none have seemed to work.
Camera View:
struct Analysis {
var description: String = "No description"
var confidence: Double = 0.0
}
final class CameraView: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, ObservableObject {
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
let dataOutput = AVCaptureVideoDataOutput()
var frameCounter = 0
#Published var analysis = Analysis()
override func viewDidLoad() {
super.viewDidLoad()
NotificationCenter.default.addObserver(self, selector: #selector(CameraView.rotated), name: UIDevice.orientationDidChangeNotification, object: nil)
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
captureSession.startRunning()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
frameCounter += 1
if frameCounter == 15 {
frameCounter = 0
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
guard let model = try? VNCoreMLModel(for: Resnet50().model) else {return}
let request = VNCoreMLRequest(model: model) { finishedReq, err in
guard let results = finishedReq.results as? [VNClassificationObservation] else {return}
guard let firstObservation = results.first else {return}
self.analysis = Analysis(description: firstObservation.identifier, confidence: Double(firstObservation.confidence))
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
print(self.analysis)
}
}
extension CameraView: UIViewControllerRepresentable {
public typealias UIViewControllerType = CameraView
func makeUIViewController(context: UIViewControllerRepresentableContext<CameraView>) -> CameraView {
return CameraView()
}
func updateUIViewController(_ uiViewController: CameraView, context: UIViewControllerRepresentableContext<CameraView>) {
}
}
Content View:
struct ContentView: View {
#ObservedObject var camera = CameraView()
var body: some View {
ZStack {
camera
.ignoresSafeArea()
VStack {
Spacer()
ZStack {
Rectangle()
.frame(height: 75)
.background(.regularMaterial)
.cornerRadius(20)
.padding()
Text(camera.analysis.description)
.font(.largeTitle)
}
}
}
}
}
Please let me know if you need more information.

How to get camera frame to process using swiftUI?

I'm trying to get frame like VideoCapture in opencv using swiftUI
here is my code which is not working
I expected that "Got a frame!" should print in console when each frame from camera is available
at least, I can see the preview on the screen
How can I solve it or is there an any other idea?
If you have some reference to do same thing, It would be helpful
// For camera streaming
struct DemoVideoStreaming: View {
var body: some View {
VStack {
PreviewHolder()
}.frame(minWidth: 0, maxWidth: .infinity, minHeight: 0, maxHeight: .infinity, alignment: .center)
}
}
struct PreviewHolder: UIViewRepresentable {
func makeUIView(context: UIViewRepresentableContext<PreviewHolder>) -> PreviewView {
PreviewView()
}
func updateUIView(_ uiView: PreviewView, context: UIViewRepresentableContext<PreviewHolder>) {
}
typealias UIViewType = PreviewView
}
class PreviewView: UIView, AVCaptureVideoDataOutputSampleBufferDelegate {
private var captureSession: AVCaptureSession?
init() {
super.init(frame: .zero)
var allowedAccess = false
let blocker = DispatchGroup()
blocker.enter()
AVCaptureDevice.requestAccess(for: .video) { flag in
allowedAccess = flag
blocker.leave()
}
blocker.wait()
if !allowedAccess {
print("!!! NO ACCESS TO CAMERA")
return
}
// setup session
// Step 3. AVCaptureSession
let session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for : .video, position: .front) //alternate AVCaptureDevice.default(for: .video)
guard videoDevice != nil, let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!), session.canAddInput(videoDeviceInput) else {
print("!!! NO CAMERA DETECTED")
return
}
session.addInput(videoDeviceInput)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sample buffer"))
guard session.canAddOutput(videoOutput) else { return }
session.addOutput(videoOutput)
session.commitConfiguration()
self.captureSession = session
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
// wrapping dispatch
override func didMoveToSuperview() {
super.didMoveToSuperview()
if nil != self.superview {
self.videoPreviewLayer.session = self.captureSession
self.videoPreviewLayer.videoGravity = .resizeAspect
self.captureSession?.startRunning()
} else {
self.captureSession?.stopRunning()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Got a frame!")
}
}
that is because you didn't sign the AVCaptureVideoDataOutputSampleBufferDelegate delegate to your code
here is how should it look like
import UIKit
import AVFoundation
final class PreviewView: UIView ,AVCaptureVideoDataOutputSampleBufferDelegate{
private var captureSession: AVCaptureSession?
var imageBufferHandler: ImageBufferHandler?
private var videoConnection: AVCaptureConnection!
init() {
super.init(frame: .zero)
var allowedAccess = false
let blocker = DispatchGroup()
blocker.enter()
AVCaptureDevice.requestAccess(for: .video) { flag in
allowedAccess = flag
blocker.leave()
}
blocker.wait()
if !allowedAccess {
print("!!! NO ACCESS TO CAMERA")
return
}
// setup session
let session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: .unspecified) //alternate AVCaptureDevice.default(for: .video)
guard videoDevice != nil, let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!), session.canAddInput(videoDeviceInput) else {
print("!!! NO CAMERA DETECTED")
return
}
session.addInput(videoDeviceInput)
session.commitConfiguration()
self.captureSession = session
// setup video output
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
let queue = DispatchQueue(label: "com.queue.videosamplequeue")
videoDataOutput.setSampleBufferDelegate(self, queue: queue)
guard captureSession!.canAddOutput(videoDataOutput) else {
fatalError()
}
captureSession!.addOutput(videoDataOutput)
videoConnection = videoDataOutput.connection(with: .video)
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
override func didMoveToSuperview() {
super.didMoveToSuperview()
if nil != self.superview {
self.videoPreviewLayer.session = self.captureSession
self.videoPreviewLayer.videoGravity = .resize
self.captureSession?.startRunning()
}
else {
self.captureSession?.stopRunning()
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("Got a frame!")
if connection.videoOrientation != .portrait {
connection.videoOrientation = .portrait
return
}
if let imageBufferHandler = imageBufferHandler
{
imageBufferHandler(sampleBuffer)
}
}}
don't forget to add the image buffer handler
typealias ImageBufferHandler = ((_ imageBuffer: CMSampleBuffer) -> ())

How to scan only certain areas through the camera with Swift5?

I'd like to scan QRcode through the camera. There is no problem scanning QRcode,
but I want to scan only certain areas. How can I do this?
I am currently aware of the QR code anywhere in the entire camera area.
import Foundation
import UIKit
import AVFoundation
class ScannerViewController : UIViewController, AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet weak var qrcodeView: UIView!
#IBOutlet weak var mainText: UITextView!
#IBOutlet weak var headerBar: UINavigationBar!
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
self.qrcodeView.backgroundColor = UIColor.black.withAlphaComponent(0.5)
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr]
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.insertSublayer(previewLayer, at: 0)
captureSession.startRunning()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// let scanRect = CGRect(x: 0, y: 0, width: 200, height: 200)
// let rectOfInterest = layer.metadataOutputRectConverted(fromLayerRect: scanRect)
// metadataObjects.rectOfInterest = rectOfInterest
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
} else {
print("not support")
}
}
func found(code: String) {
print(code)
self.dismiss(animated: true, completion: nil)
}
func failed() {
captureSession = nil
}
}
Like the picture above, I would like to scan only within the square area.
I desperately need this.
Thanks in advance.
You can use rectOfInterest property to achieve this
add following code after captureSession.startRunning()
First you need to convert using rect using
let rectOfInterest = videoPreviewLayer?.metadataOutputRectConverted(fromLayerRect: self.viewAreaOfScan.frame) // videoPreviewLayer is AVCaptureVideoPreviewLayer
after that you can assign it to rectOfInterest of metadataOutput
metadataOutput.rectOfInterest = rectOfInterest ?? CGRect(x: 0, y: 0, width: 1, height: 1)

How to take UIImage of AVCaptureVideoPreviewLayer instead of AVCapturePhotoOutput capture

I want to "stream" the preview layer to my server, however, I only want specific frames to be sent. Basically, I want to take a snapshot of the AVCaptureVideoPreviewLayer, scale it down to 28*28, turn it into an intensity array, and send it to my socket layer where my python backend handles the rest.
Problem here is that AVCapturePhotoOutput's capture function is insanely slow. I can't repeatedly call the function. Not to mention it always makes a camera shutter sound haha.
The other problem is that taking a snapshot of AVCaptureVideoPreviewLayer is really difficult. Using UIGraphicsBeginImageContext almost always returns a blank/clear image.
Help a brother out, thanks!
Basically instead of using AVCaptureVideoPreviewLayer for grabbing frames you should use AVCaptureVideoDataOutputSampleBufferDelegate.
Here is example:
import Foundation
import UIKit
import AVFoundation
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let input = try! AVCaptureDeviceInput(device: device)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Update: To process images you should implement a processCapturedImage method of the CaptureManagerDelegate protocol in any other class where you want, like:
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
#ninjaproger's answer was great! Simply writing this as a Swift 4 version of the answer for future reference.
import UIKit
import AVFoundation
var customPreviewLayer: AVCaptureVideoPreviewLayer?
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let input = try! AVCaptureDeviceInput(device: device!)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Details
Xcode 10.2.1 (10E1001), Swift 5
Features
This solution allow:
to check camera access
to select front or back camera
if no access to the camera show alert with link to the app settings page
to make o photo
to play standard capture photo sound
Solution
CameraService
import UIKit
import AVFoundation
import Vision
class CameraService: NSObject {
private weak var previewView: UIView?
private(set) var cameraIsReadyToUse = false
private let session = AVCaptureSession()
private weak var previewLayer: AVCaptureVideoPreviewLayer?
private lazy var sequenceHandler = VNSequenceRequestHandler()
private lazy var capturePhotoOutput = AVCapturePhotoOutput()
private lazy var dataOutputQueue = DispatchQueue(label: "FaceDetectionService",
qos: .userInitiated, attributes: [],
autoreleaseFrequency: .workItem)
private var captureCompletionBlock: ((UIImage) -> Void)?
private var preparingCompletionHandler: ((Bool) -> Void)?
private var snapshotImageOrientation = UIImage.Orientation.upMirrored
private var cameraPosition = AVCaptureDevice.Position.front {
didSet {
switch cameraPosition {
case .front: snapshotImageOrientation = .upMirrored
case .unspecified, .back: fallthrough
#unknown default: snapshotImageOrientation = .up
}
}
}
func prepare(previewView: UIView,
cameraPosition: AVCaptureDevice.Position,
completion: ((Bool) -> Void)?) {
self.previewView = previewView
self.preparingCompletionHandler = completion
self.cameraPosition = cameraPosition
checkCameraAccess { allowed in
if allowed { self.setup() }
completion?(allowed)
self.preparingCompletionHandler = nil
}
}
private func setup() { configureCaptureSession() }
func start() { if cameraIsReadyToUse { session.startRunning() } }
func stop() { session.stopRunning() }
}
extension CameraService {
private func askUserForCameraPermission(_ completion: ((Bool) -> Void)?) {
AVCaptureDevice.requestAccess(for: AVMediaType.video) { (allowedAccess) -> Void in
DispatchQueue.main.async { completion?(allowedAccess) }
}
}
private func checkCameraAccess(completion: ((Bool) -> Void)?) {
askUserForCameraPermission { [weak self] allowed in
guard let self = self, let completion = completion else { return }
self.cameraIsReadyToUse = allowed
if allowed {
completion(true)
} else {
self.showDisabledCameraAlert(completion: completion)
}
}
}
private func configureCaptureSession() {
guard let previewView = previewView else { return }
// Define the capture device we want to use
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: cameraPosition) else {
let error = NSError(domain: "", code: 0, userInfo: [NSLocalizedDescriptionKey : "No front camera available"])
show(error: error)
return
}
// Connect the camera to the capture session input
do {
try camera.lockForConfiguration()
defer { camera.unlockForConfiguration() }
if camera.isFocusModeSupported(.continuousAutoFocus) {
camera.focusMode = .continuousAutoFocus
}
if camera.isExposureModeSupported(.continuousAutoExposure) {
camera.exposureMode = .continuousAutoExposure
}
let cameraInput = try AVCaptureDeviceInput(device: camera)
session.addInput(cameraInput)
} catch {
show(error: error as NSError)
return
}
// Create the video data output
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
// Add the video output to the capture session
session.addOutput(videoOutput)
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
// Configure the preview layer
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.frame = previewView.bounds
previewView.layer.insertSublayer(previewLayer, at: 0)
self.previewLayer = previewLayer
}
}
extension CameraService: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard captureCompletionBlock != nil,
let outputImage = UIImage(sampleBuffer: sampleBuffer, orientation: snapshotImageOrientation) else { return }
DispatchQueue.main.async { [weak self] in
guard let self = self else { return }
if let captureCompletionBlock = self.captureCompletionBlock{
captureCompletionBlock(outputImage)
AudioServicesPlayAlertSound(SystemSoundID(1108))
}
self.captureCompletionBlock = nil
}
}
}
// Navigation
extension CameraService {
private func show(alert: UIAlertController) {
DispatchQueue.main.async {
UIApplication.topViewController?.present(alert, animated: true, completion: nil)
}
}
private func showDisabledCameraAlert(completion: ((Bool) -> Void)?) {
let alertVC = UIAlertController(title: "Enable Camera Access",
message: "Please provide access to your camera",
preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Go to Settings", style: .default, handler: { action in
guard let previewView = self.previewView,
let settingsUrl = URL(string: UIApplication.openSettingsURLString),
UIApplication.shared.canOpenURL(settingsUrl) else { return }
UIApplication.shared.open(settingsUrl) { [weak self] _ in
guard let self = self else { return }
self.prepare(previewView: previewView,
cameraPosition: self.cameraPosition,
completion: self.preparingCompletionHandler)
}
}))
alertVC.addAction(UIAlertAction(title: "Cancel", style: .cancel, handler: { _ in completion?(false) }))
show(alert: alertVC)
}
private func show(error: NSError) {
let alertVC = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Ok", style: .cancel, handler: nil ))
show(alert: alertVC)
}
}
extension CameraService: AVCapturePhotoCaptureDelegate {
func capturePhoto(completion: ((UIImage) -> Void)?) { captureCompletionBlock = completion }
}
Helpers
///////////////////////////////////////////////////////////////////////////
import UIKit
import AVFoundation
extension UIImage {
convenience init?(sampleBuffer: CMSampleBuffer, orientation: UIImage.Orientation = .upMirrored) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return nil }
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
defer { CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly) }
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height,
bitsPerComponent: 8, bytesPerRow: bytesPerRow,
space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else { return nil }
guard let cgImage = context.makeImage() else { return nil }
self.init(cgImage: cgImage, scale: 1, orientation: orientation)
}
}
///////////////////////////////////////////////////////////////////////////
import UIKit
extension UIApplication {
private class func topViewController(controller: UIViewController? = UIApplication.shared.keyWindow?.rootViewController) -> UIViewController? {
if let navigationController = controller as? UINavigationController {
return topViewController(controller: navigationController.visibleViewController)
}
if let tabController = controller as? UITabBarController {
if let selected = tabController.selectedViewController {
return topViewController(controller: selected)
}
}
if let presented = controller?.presentedViewController {
return topViewController(controller: presented)
}
return controller
}
class var topViewController: UIViewController? { return topViewController() }
}
Usage
private lazy var cameraService = CameraService()
//...
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
//...
cameraService.capturePhoto { [weak self] image in
//...
}
Full sample
import UIKit
class ViewController: UIViewController {
private lazy var cameraService = CameraService()
private weak var button: UIButton?
private weak var imagePreviewView: UIImageView?
private var cameraInited = false
private enum ButtonState { case cancel, makeSnapshot }
private var buttonState = ButtonState.makeSnapshot {
didSet {
switch buttonState {
case .makeSnapshot: button?.setTitle("Make a photo", for: .normal)
case .cancel: button?.setTitle("Cancel", for: .normal)
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
setupCameraPreviewView()
setupButton()
// Do any additional setup after loading the view.
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
cameraService.start()
}
override func viewDidDisappear(_ animated: Bool) {
super.viewDidDisappear(animated)
cameraService.stop()
}
// Ensure that the interface stays locked in Portrait.
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
return .portrait
}
// Ensure that the interface stays locked in Portrait.
override var preferredInterfaceOrientationForPresentation: UIInterfaceOrientation {
return .portrait
}
}
extension ViewController {
private func setupCameraPreviewView() {
let previewView = UIView(frame: .zero)
view.addSubview(previewView)
previewView.translatesAutoresizingMaskIntoConstraints = false
previewView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
previewView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
previewView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
previewView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
previewView.layoutIfNeeded()
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
}
private func setupButton() {
let button = UIButton(frame: .zero)
button.addTarget(self, action: #selector(buttonTouchedUpInside), for: .touchUpInside)
view.addSubview(button)
self.button = button
buttonState = .makeSnapshot
button.translatesAutoresizingMaskIntoConstraints = false
button.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
button.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
button.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
button.heightAnchor.constraint(equalToConstant: 44).isActive = true
button.backgroundColor = UIColor.black.withAlphaComponent(0.4)
}
private func show(image: UIImage) {
let imageView = UIImageView(frame: .zero)
view.insertSubview(imageView, at: 1)
imagePreviewView = imageView
imageView.translatesAutoresizingMaskIntoConstraints = false
imageView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
imageView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
imageView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
imageView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
imageView.image = image
}
#objc func buttonTouchedUpInside() {
switch buttonState {
case .makeSnapshot:
cameraService.capturePhoto { [weak self] image in
guard let self = self else {return }
self.cameraService.stop()
self.buttonState = .cancel
self.show(image: image)
}
case .cancel:
buttonState = .makeSnapshot
cameraService.start()
imagePreviewView?.removeFromSuperview()
}
}
}

iOS Swift - How to use rectOfInterest correctly (barcode scanner)

I have an iOS App which scans a barcode.
Now I wanted to define a specific scan area. For this purpose I am using the rectOfInterest property and metadataOutputRectOfInterest method.
The problems I face
When I use just this code below nothing will be scanned, if I remove barcodeAreaView scanning works fine all over the display
class BarcodeReaderViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
// Camera view
var cameraView: AVCaptureVideoPreviewLayer?
// AV capture session and dispatch queue
let captureSession = AVCaptureSession()
let sessionQueue = DispatchQueue(label: AVCaptureSession.self.description(), attributes: [], target: nil)
var barcodeAreaView: UIView?
let supportedCodeTypes = [
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeInterleaved2of5Code
]
let metadataOutput = AVCaptureMetadataOutput()
var barcodeArea:CGRect!
override func viewDidLoad() {
super.viewDidLoad()
let width = 250
let height = 100
print(self.view.frame.size.width)
let xPos = (CGFloat(self.view.frame.size.width) / CGFloat(2)) - (CGFloat(width) / CGFloat(2))
let yPos = (CGFloat(self.view.frame.size.height) / CGFloat(2)) - (CGFloat(height) / CGFloat(2))
barcodeArea = CGRect(x: Int(xPos), y: Int(yPos), width: width, height: height)
barcodeAreaView = UIView()
barcodeAreaView?.layer.borderColor = UIColor.red.cgColor
barcodeAreaView?.layer.borderWidth = 1
barcodeAreaView?.frame = barcodeArea
view.addSubview(barcodeAreaView!)
captureSession.beginConfiguration()
let videoDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
if videoDevice != nil {
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice)
if videoDeviceInput != nil {
if captureSession.canAddInput(videoDeviceInput) {
captureSession.addInput(videoDeviceInput)
}
}
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
metadataOutput.metadataObjectTypes = supportedCodeTypes
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
cameraView = AVCaptureVideoPreviewLayer(session: captureSession)
cameraView?.videoGravity = AVLayerVideoGravityResizeAspectFill
cameraView?.frame = view.layer.bounds
metadataOutput.rectOfInterest = cameraView!.metadataOutputRectOfInterest(for: barcodeArea)
view.layer.addSublayer(cameraView!)
}
}
captureSession.commitConfiguration()
view.bringSubview(toFront: barcodeAreaView!)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Start AV capture session
sessionQueue.async {
self.captureSession.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// Stop AV capture session
sessionQueue.async {
self.captureSession.stopRunning()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [Any]!, from connection: AVCaptureConnection!) {
if let metadataObject = metadataObjects.first {
let readableObject = metadataObject as! AVMetadataMachineReadableCodeObject
let message = readableObject.stringValue
print(message)
}
}
}
I already took a look on this question
How do I use the metadataOutputRectOfInterestForRect method and rectOfInterest property to scan a specific area? (QR Code)
The problem I face here, the scan is a bit laggy and my navigationController is not working (because the listener awaits a scan)
UPDATE:
solved the issue, as suggested in this answer
https://stackoverflow.com/a/37603743/2319900
I added the following line right after my self.captureSession.startRunning()
metadataOutput.rectOfInterest = cameraView!.metadataOutputRectOfInterest(for: barcodeArea)

Resources