Custom Barcode Scanner, tap to focus - ios

Referencing these questions here:
How to implement tap to focus on barcode scanner app using swift?
Set Camera Focus On Tap Point With Swift
https://stackoverflow.com/a/41796603/8272698
The links above are quite old and out of date. I have tried to use the answers provided above but to no avail... below is my attempt from them..
I need to tap my screen when reading barcodes to implement a focus on the object in the view.
Here is my code attempt
var captureDevice: AVCaptureDevice? //capture device Is this right?
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let screenSize = videoPreviewLayer!.bounds.size
if let touchPoint = touches.first {
let x = touchPoint.location(in: self.view).y / screenSize.height
let y = 1.0 - touchPoint.location(in: self.view).x / screenSize.width
let focusPoint = CGPoint(x: x, y: y)
if let device = captureDevice {
do {
try device.lockForConfiguration()
device.focusPointOfInterest = focusPoint
//device.focusMode = .continuousAutoFocus
device.focusMode = .autoFocus
//device.focusMode = .locked
device.exposurePointOfInterest = focusPoint
device.exposureMode = AVCaptureDevice.ExposureMode.continuousAutoExposure
device.unlockForConfiguration()
}
catch {
// just ignore
}
}
}
}
This code doesn't work as when I tap no focusing happens.
Here is the rest of my camera code.
import UIKit
import AVFoundation
class BarcodeScanVC: UIViewController {
struct GlobalVariable{
static var senderTags = 0
}
var captureSession = AVCaptureSession()
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var qrCodeFrameView: UIView?
var row = 0
var senderTag = 0
var waybillData: String = ""
var diagnosticErrorCodeData: String = ""
var hddSerialNumberData: String = ""
var scanRectView: UIView?
var delegate: BarcodeScanDelegate?
var captureDevice: AVCaptureDevice?
private let supportedCodeTypes = [AVMetadataObject.ObjectType.upce,
AVMetadataObject.ObjectType.code39,
AVMetadataObject.ObjectType.code39Mod43,
AVMetadataObject.ObjectType.code93,
AVMetadataObject.ObjectType.code128,
AVMetadataObject.ObjectType.ean8,
AVMetadataObject.ObjectType.ean13,
AVMetadataObject.ObjectType.aztec,
AVMetadataObject.ObjectType.pdf417,
AVMetadataObject.ObjectType.itf14,
AVMetadataObject.ObjectType.dataMatrix,
AVMetadataObject.ObjectType.interleaved2of5,
AVMetadataObject.ObjectType.qr]
override func viewDidLoad() {
super.viewDidLoad()
// Get the back-facing camera for capturing videos
//let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice)
// Set the input device on the capture session.
captureSession.addInput(input)
// Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session.
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession.addOutput(captureMetadataOutput)
// Set delegate and use the default dispatch queue to execute the call back
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = supportedCodeTypes
// captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]
} catch {
// If any error occurs, simply print it out and don't continue any more.
print(error)
return
}
captureSession.commitConfiguration()
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
//videoPreviewLayer?.frame
// let height: CGFloat = ((videoPreviewLayer?.frame.size.width)!)/2
// let width: CGFloat = ((videoPreviewLayer?.frame.size.width)!)/2
let height: CGFloat = (view.frame.size.height)/2
let width: CGFloat = (view.frame.size.width) - 200
let path = UIBezierPath()
//Corner1
path.move(to: CGPoint(x: 5, y: 50))
path.addLine(to: CGPoint(x: 5, y: 5))
path.addLine(to: CGPoint(x: 50, y: 5))
//Corner2
path.move(to: CGPoint(x: height - 55, y: 5))
path.addLine(to: CGPoint(x: height - 5, y: 5))
path.addLine(to: CGPoint(x: height - 5, y: 55))
//Corner3
path.move(to: CGPoint(x: 5, y: width - 55))
path.addLine(to: CGPoint(x: 5, y: width - 5))
path.addLine(to: CGPoint(x: 55, y: width - 5))
//Corner4 -bottom right
path.move(to: CGPoint(x: height - 5, y: width - 55))
path.addLine(to: CGPoint(x: height - 5, y: width - 5))
path.addLine(to: CGPoint(x: height - 55, y: width - 5))
let shape = CAShapeLayer()
shape.path = path.cgPath
shape.strokeColor = UIColor.white.cgColor
shape.lineWidth = 5
shape.frame.origin.x = 20
shape.frame.origin.y = 180
shape.fillColor = UIColor.clear.cgColor
videoPreviewLayer?.addSublayer(shape)
view.layer.addSublayer(videoPreviewLayer!)
//videoPreviewLayer?.anchorPoint.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true
//view.layer.addSublayer(scanRectView)
// Start video capture.
captureSession.startRunning()
// Move the message label and top bar to the front
//view.bringSubview(toFront: messageLabel)
//view.bringSubview(toFront: topbar)
// Initialize QR Code Frame to highlight the QR code
qrCodeFrameView = UIView()
if let qrCodeFrameView = qrCodeFrameView {
qrCodeFrameView.layer.borderColor = UIColor.green.cgColor
qrCodeFrameView.layer.borderWidth = 2
view.addSubview(qrCodeFrameView)
view.bringSubviewToFront(qrCodeFrameView)
}
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let screenSize = videoPreviewLayer!.bounds.size
if let touchPoint = touches.first {
let x = touchPoint.location(in: self.view).y / screenSize.height
let y = 1.0 - touchPoint.location(in: self.view).x / screenSize.width
let focusPoint = CGPoint(x: x, y: y)
if let device = captureDevice {
do {
try device.lockForConfiguration()
device.focusPointOfInterest = focusPoint
//device.focusMode = .continuousAutoFocus
device.focusMode = .autoFocus
//device.focusMode = .locked
device.exposurePointOfInterest = focusPoint
device.exposureMode = AVCaptureDevice.ExposureMode.continuousAutoExposure
device.unlockForConfiguration()
}
catch {
// just ignore
}
}
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func launchApp(barcodeScan: String) {
// if presentedViewController != nil {
// return
// }
guard presentedViewController == nil else {
return
}
let alertPrompt = UIAlertController(title: "Barcode Found", message: "\(barcodeScan)", preferredStyle: .actionSheet)
let confirmAction = UIAlertAction(title: "Confirm", style: UIAlertAction.Style.default, handler: { (action) -> Void in
if self.senderTag == 1 {
GlobalVariable.senderTags = 1
self.delegate?.didScan(barcodeData: barcodeScan)
self.navigationController?.popViewController(animated: true)
}
if self.senderTag == 2 {
GlobalVariable.senderTags = 2
self.delegate?.didScan(barcodeData: barcodeScan)
self.navigationController?.popViewController(animated: true)
}
if self.senderTag == 3 {
GlobalVariable.senderTags = 3
self.delegate?.didScan(barcodeData: barcodeScan)
self.navigationController?.popViewController(animated: true)
}
if self.senderTag != 1 && self.senderTag != 2 && self.senderTag != 3 {
let indexPath = IndexPath(row: self.row, section: 0)
let cell: PartsOrderRequestTableCell = globalPartsOrderRequestTableVC?.tableView.cellForRow(at: indexPath) as! PartsOrderRequestTableCell
cell.diagnosticCodeLabel.text = barcodeScan
cell.diagnosticCodeLabel.endEditing(true)
self.navigationController?.popViewController(animated: true)
//return
}
})
let cancelAction = UIAlertAction(title: "Cancel", style: UIAlertAction.Style.cancel, handler: nil)
alertPrompt.addAction(confirmAction)
alertPrompt.addAction(cancelAction)
present(alertPrompt, animated: true, completion: nil)
}
}
extension BarcodeScanVC: AVCaptureMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// Check if the metadataObjects array is not nil and it contains at least one object.
if metadataObjects.count == 0 {
qrCodeFrameView?.frame = CGRect.zero
//messageLabel.text = "No QR code is detected"
return
}
// Get the metadata object.
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if supportedCodeTypes.contains(metadataObj.type) {
// If the found metadata is equal to the QR code metadata (or barcode) then update the status label's text and set the bounds
let barCodeObject = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
qrCodeFrameView?.frame = barCodeObject!.bounds
if metadataObj.stringValue != nil {
launchApp(barcodeScan: metadataObj.stringValue!)
//messageLabel.text = metadataObj.stringValue
}
}
}
private func updatePreviewLayer(layer: AVCaptureConnection, orientation: AVCaptureVideoOrientation) {
layer.videoOrientation = orientation
videoPreviewLayer?.frame = self.view.bounds
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
if let connection = self.videoPreviewLayer?.connection {
let currentDevice: UIDevice = UIDevice.current
let orientation: UIDeviceOrientation = currentDevice.orientation
let previewLayerConnection : AVCaptureConnection = connection
if previewLayerConnection.isVideoOrientationSupported {
switch (orientation) {
case .portrait: updatePreviewLayer(layer: previewLayerConnection, orientation: .portrait)
break
case .landscapeRight: updatePreviewLayer(layer: previewLayerConnection, orientation: .landscapeLeft)
break
case .landscapeLeft: updatePreviewLayer(layer: previewLayerConnection, orientation: .landscapeRight)
break
case .portraitUpsideDown: updatePreviewLayer(layer: previewLayerConnection, orientation: .portraitUpsideDown)
break
default: updatePreviewLayer(layer: previewLayerConnection, orientation: .portrait)
break
}
}
}
}
}
Am I missing something obvious ?
Is there a simple answer to this without changing much of the code I already have?
Thanks!

You're on the right track. It's getting stuck at
if let device = captureDevice
captureDevice is always nil at that point. You're setting it locally in viewDidLoad but nowhere the touchesBegan method can access it.
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
Change your code from the guard let captureDevice for something like:
captureDevice = deviceDiscoverySession.devices.first
and when you use captureDevice, test for nil when you need to.
EDIT:
override func viewDidLoad() {
super.viewDidLoad()
// Get the back-facing camera for capturing videos
//let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back)
captureDevice = deviceDiscoverySession.devices.first
if let captureDevice = captureDevice {
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice)
// Set the input device on the capture session.
captureSession.addInput(input)
// Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session.
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession.addOutput(captureMetadataOutput)
// Set delegate and use the default dispatch queue to execute the call back
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = supportedCodeTypes
// captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]
} catch {
// If any error occurs, simply print it out and don't continue any more.
print(error)
return
}
}
..... Method cut short as no other changes.

Related

Swift iOS Vision inaccurate rectangle detection result

xcode: Version 12.5.1,
ios: 14.7.1,
device: iPhone 12 Pro
Hello, help needed!
iOS Vision framework returns inaccurate rectangle results:
incorrect rect result
The recognized rectangle (red) does not fit real one (black).
It is not square and narrower then real one.
I don't understand why it happening. And get same inaccurate result if drawing observed rectangle bounding box instead.
Full code:
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
private let captureSession = AVCaptureSession()
private lazy var previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private let videoDataOutput = AVCaptureVideoDataOutput()
private var maskLayer = CAShapeLayer()
override func viewDidLoad() {
super.viewDidLoad()
self.setCameraInput()
self.showCameraFeed()
self.setCameraOutput()
self.videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue"))
self.captureSession.startRunning()
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.previewLayer.frame = self.view.frame
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let frame = CMSampleBufferGetImageBuffer(sampleBuffer) else {
debugPrint("unable to get image from sample buffer")
return
}
self.detectRectangle(in: frame)
}
private func setCameraInput() {
guard let device = AVCaptureDevice.DiscoverySession(
deviceTypes: [.builtInWideAngleCamera, .builtInDualCamera, .builtInTrueDepthCamera],
mediaType: .video,
position: .back).devices.first else {
fatalError("No back camera device found.")
}
let cameraInput = try! AVCaptureDeviceInput(device: device)
self.captureSession.addInput(cameraInput)
}
private func showCameraFeed() {
self.previewLayer.videoGravity = .resizeAspectFill
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.frame
}
private func setCameraOutput() {
self.videoDataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
self.videoDataOutput.alwaysDiscardsLateVideoFrames = true
self.videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue"))
self.captureSession.addOutput(self.videoDataOutput)
guard let connection = self.videoDataOutput.connection(with: AVMediaType.video),
connection.isVideoOrientationSupported else { return }
connection.videoOrientation = .portrait
}
private func detectRectangle(in image: CVPixelBuffer) {
let request = VNDetectRectanglesRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
guard let results = request.results as? [VNRectangleObservation] else { return }
self.maskLayer.removeFromSuperlayer()
guard let rect = results.first else{return}
self.drawBoundingBox(rect: rect)
}
})
request.minimumAspectRatio = VNAspectRatio(1.3)
request.maximumAspectRatio = VNAspectRatio(1.6)
request.minimumSize = Float(0.5)
request.maximumObservations = 1
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: image, options: [:])
try? imageRequestHandler.perform([request])
}
func drawBoundingBox(rect : VNRectangleObservation) {
let transform = CGAffineTransform(scaleX: 1, y: -1).translatedBy(x: 0, y: -self.previewLayer.frame.height)
let scale = CGAffineTransform.identity.scaledBy(x: self.previewLayer.frame.width, y: self.previewLayer.frame.height)
let path = UIBezierPath()
path.move(to: CGPoint(x: rect.bottomLeft.x, y: rect.bottomLeft.y))
path.addLine(to: CGPoint(x: rect.bottomRight.x, y: rect.bottomRight.y))
path.addLine(to: CGPoint(x: rect.topRight.x, y: rect.topRight.y))
path.addLine(to: CGPoint(x: rect.topLeft.x, y: rect.topLeft.y))
path.addLine(to: CGPoint(x: rect.bottomLeft.x, y: rect.bottomLeft.y))
path.apply(scale)
path.apply(transform)
path.close()
maskLayer = CAShapeLayer()
maskLayer.fillColor = UIColor.clear.cgColor
maskLayer.lineWidth = 5
maskLayer.strokeColor = UIColor.red.cgColor
maskLayer.path = path.cgPath
previewLayer.insertSublayer(maskLayer, at: 1)
}
}
extension CGPoint {
func scaled(to size: CGSize) -> CGPoint {
return CGPoint(x: self.x * size.width,
y: self.y * size.height)
}
}
Code above is changed version from tutorial: rectangle detection tutorial
There is my code example.
///SET THE VALUE FOR THE DETECTED RECTANGLE
detectRectanglesRequest.minimumAspectRatio = VNAspectRatio(0.3)
detectRectanglesRequest.maximumAspectRatio = VNAspectRatio(0.9)
detectRectanglesRequest.minimumSize = Float(0.4)
detectRectanglesRequest.maximumObservations = 0
detectRectanglesRequest.minimumConfidence = 0.2
detectRectanglesRequest.quadratureTolerance = 2
detectRectanglesRequest.revision = VNDetectRectanglesRequestRevision1
detectRectanglesRequest.preferBackgroundProcessing = true
"try" use better like this:
///SEND THE REQUESTS TO THE REQUEST HANDLER
DispatchQueue.global(qos: .userInteractive).async {
do {
try imageRequestHandler.perform([detectRectanglesRequest])
} catch let error as NSError {
print("Failed to perform image request: \(error)")
// self.presentAlert("Image Request Failed", error: error)
return
}
}
...and the last:
private func drawBoundingBox(rect: VNRectangleObservation) {
CATransaction.begin()
let transform = CGAffineTransform(scaleX: 1, y: -1)
.translatedBy(x: 0, y: -scanCam.videoPreviewLayer.bounds.height)
let scale = CGAffineTransform.identity
.scaledBy(x: scanCam.videoPreviewLayer.bounds.width,
y: scanCam.videoPreviewLayer.bounds.height)
let currentBounds = rect.boundingBox
.applying(scale).applying(transform)
createLayer(in: currentBounds)
CATransaction.commit()
//viewModel.cameraDetectRectFrame = currentBounds
}
private func createLayer(in rect: CGRect) {
maskLayer = CAShapeLayer()
maskLayer.frame = rect
maskLayer.opacity = 1
maskLayer.borderColor = UIColor.blue.cgColor ///for visual test
maskLayer.borderWidth = 2
scanCam.videoPreviewLayer.insertSublayer(maskLayer, at: 1)
}

How to detect barcode using Apple Vision in CGRect only?

I have an app that uses a CGRect(x: 0, y: 0, width: 335, height: 150) to show the camera for barcode scanning. However when presented a barcode off camera (not in the CGRect) will get scanned. How can I limit the area for scanning to the CGRect in my preview layer? This is using Vision.
let captureSession = AVCaptureSession()
var previewLayer: AVCaptureVideoPreviewLayer!
var activeInput: AVCaptureDeviceInput!
lazy var detectBarcodeRequest = VNDetectBarcodesRequest { request, error in
guard error == nil else {
print("Barcode Error: \(String(describing: error?.localizedDescription))")
return
}
self.processBarCode(request)
}
override func viewDidLoad() {
super.viewDidLoad()
setupSession()
setupPreview()
startSession()
}
func setupSession() {
guard let camera = AVCaptureDevice.default(for: .video) else {
return
}
do {
let videoInput = try AVCaptureDeviceInput(device: camera)
for input in [videoInput] {
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
}
activeInput = videoInput
} catch {
print("Error setting device input: \(error)")
return
}
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
captureSession.addOutput(captureOutput)
}
func setupPreview() {
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = CGRect(x: 0, y: 0, width: 335, height: 150)
view.layer.insertSublayer(previewLayer, at: 0)
}//setupPreview
func startSession() {
if !captureSession.isRunning {
DispatchQueue.global(qos: .default).async { [weak self] in
self?.captureSession.startRunning()
}
}
}
// MARK: - processBarCode
func processBarCode(_ request: VNRequest) {
DispatchQueue.main.async {
guard let results = request.results as? [VNBarcodeObservation] else {
return
}
if let payload = results.first?.payloadStringValue, let symbology = results.first?.symbology {
print("payload is \(payload) \(symbology) ")
}
}
}//processBarCode
Edit:
// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate
extension CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let imageRequestHandler = VNImageRequestHandler(
cvPixelBuffer: pixelBuffer,
orientation: .up)
do {
try imageRequestHandler.perform([detectBarcodeRequest])
} catch {
print(error)
}
}//captureOutput
}//extension
extension AVCaptureVideoPreviewLayer {
func rectOfInterestConverted(parentRect: CGRect, fromLayerRect: CGRect) -> CGRect {
let parentWidth = parentRect.width
let parentHeight = parentRect.height
let newX = (parentWidth - fromLayerRect.maxX)/parentWidth
let newY = 1 - (parentHeight - fromLayerRect.minY)/parentHeight
let width = 1 - (fromLayerRect.minX/parentWidth + newX)
let height = (fromLayerRect.maxY/parentHeight) - newY
return CGRect(x: newX, y: newY, width: width, height: height)
}
}
Usage:
if let rect = videoPreviewLayer?.rectOfInterestConverted(parentRect: self.view.frame, fromLayerRect: scanAreaView.frame) {
captureMetadataOutput.rectOfInterest = rect
}
In func captureOutput(_:didOutput:from:) you most likely passing whole image into VNImageRequestHandler.
Instead you need to crop image to your visible rect.
Another approach would be lock Vision to only visible part of image via regionOfInterest as #HurricaneOnTheMoon proposed below.
You can use a property regionOfInterest of VNDetectBarcodesRequest to setup a detection region.
The default value is { { 0, 0 }, { 1, 1 } } according to the Apple documentation

Camera Barcode Scanning Works on iPhone, But Not iPad

I'm using AVCaptureDevice API for scanning barcode and it works very well on iPhone, but very similar code I have doesn't work on iPad and I'm not quite sure why (not detecting any barcode at all). The main differences are the size of scan area, position and orientation. I tested using iPhone 12 mini (iOS 15 beta) and the original iPad Pro 9.7" (iOS 14.6). Not sure if that matters.
Below is the code for the scanner. Please let me know if you noticed something that should be changed.
import Foundation
import AVFoundation
import UIKit
class ScannerViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
var barcodeCallback: (String) -> Void
var cameraScanDismissedCallback: (Bool) -> Void
var scanned = false
var currentDevice: AVCaptureDevice!
var scanRectView: UIView!
init(barcodeCallback: #escaping (String) -> Void, cameraScanDismissedCallback: #escaping (Bool) -> Void) {
self.barcodeCallback = barcodeCallback;
self.cameraScanDismissedCallback = cameraScanDismissedCallback;
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
} else {
failed()
return
}
let windowSize = UIScreen.main.bounds.size
var scanSize: CGSize!;
var scanRect: CGRect!;
if(UIDevice.current.userInterfaceIdiom == .pad){
scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/7);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}else{
scanSize = CGSize(width:windowSize.width*2/3, height:windowSize.width*1/3);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
metadataOutput.rectOfInterest = scanRect
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if(UIDevice.current.userInterfaceIdiom == .pad){
let orientation: UIDeviceOrientation = UIDevice.current.orientation
previewLayer.connection?.videoOrientation = {
switch (orientation) {
case .faceUp:
return .landscapeLeft
case .portrait:
return .portrait
case .landscapeRight:
return .landscapeLeft
case .landscapeLeft:
return .landscapeRight
default:
return .portrait
}
}()
}
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
scanRectView = UIView();
view.addSubview(self.scanRectView)
scanRectView.frame = CGRect(x:0, y:0, width: scanSize.width,
height: scanSize.height);
if(UIDevice.current.userInterfaceIdiom == .pad){
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX - scanSize.width/2,
y:UIScreen.main.bounds.midY - scanSize.height/2)
}else{
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
}
scanRectView.layer.borderColor = UIColor.yellow.cgColor
scanRectView.layer.borderWidth = 5;
currentDevice = videoCaptureDevice
captureSession.startRunning()
toggleTorch(on: true)
}
func toggleTorch(on: Bool) {
guard let device = currentDevice else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if(UIDevice.current.userInterfaceIdiom == .pad){
device.videoZoomFactor = 1.3
}else{
device.videoZoomFactor = 1.5
}
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func failed() {
let ac = UIAlertController(title: "Scanning not supported", message: "Your device does not support scanning a code from an item. Please use a device with a camera.", preferredStyle: .alert)
ac.addAction(UIAlertAction(title: "OK", style: .default))
present(ac, animated: true)
captureSession = nil
toggleTorch(on: false)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
toggleTorch(on: true)
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
toggleTorch(on: false)
}
cameraScanDismissedCallback(scanned)
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
scanned = true
}
dismiss(animated: true)
}
func found(code: String) {
print(code)
barcodeCallback(code)
}
override var prefersStatusBarHidden: Bool {
return true
}
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
if(UIDevice.current.userInterfaceIdiom == .pad){
return .landscape
}else{
return .portrait
}
}
}
Just in case anyone ran into a similar issue, check for the following:
Orientation of the video
The position of AVCaptureMetadataOutput.rectOfInterest
I'm still having an issue with the rectOfInterest not positioned at the center, but it works. Once I can figure out how to center it, I will post the solution here.
it isn't center probably because of your navigationBar.
Try set rectOfInterest as below, remember that camer use different coordinate than UIView, posX and posY are in UIView coordinates
let aimRect = CGRect(x: (posY - navBar.height) / UIScreen.main.bounds.height,
y: posX / UIScreen.main.bounds.width,
width: rectHeight / UIScreen.main.bounds.height,
height: rectWidth / UIScreen.main.bounds.width)

Why scan 1D Barcode only get result in middle of view by Swift?

I have a question about scan 1D barcode. Why I set the yellow border about scanning region view, only I put it in the middle of the region and get the result successfully?
It works fine when I put my 1D barcode in the green region by the following image. When I put the red region, and nothing happens. My app orientation only right and left.
What's wrong with my code?
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate, UIAlertViewDelegate {
var scanRectView:UIView!
var device:AVCaptureDevice!
var input:AVCaptureDeviceInput!
var output:AVCaptureMetadataOutput!
var session:AVCaptureSession!
var preview:AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidLayoutSubviews() {
self.configureVideoOrientation()
}
fileprivate func configureVideoOrientation() {
let previewLayer = self.preview
if let connection = previewLayer?.connection {
let orientation = UIDevice.current.orientation
if connection.isVideoOrientationSupported, let videoOrientation = AVCaptureVideoOrientation(rawValue: orientation.rawValue) {
previewLayer?.frame = self.view.bounds
connection.videoOrientation = videoOrientation
}
}
}
#IBAction func btnClicked(_ sender: Any) {
do{
self.device = AVCaptureDevice.default(for: AVMediaType.video)
self.input = try AVCaptureDeviceInput(device: device)
self.output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
self.session = AVCaptureSession()
if UIScreen.main.bounds.size.height < 500 {
self.session.sessionPreset = AVCaptureSession.Preset.vga640x480
}else{
self.session.sessionPreset = AVCaptureSession.Preset.high
}
self.session.addInput(self.input)
self.session.addOutput(self.output)
self.output.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
let windowSize = UIScreen.main.bounds.size
let scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/3)
var scanRect = CGRect(x:(windowSize.width-scanSize.width)/2,
y:(windowSize.height-scanSize.height)/2,
width:scanSize.width, height:scanSize.height)
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
self.output.rectOfInterest = scanRect
self.preview = AVCaptureVideoPreviewLayer(session:self.session)
self.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.preview.frame = UIScreen.main.bounds
self.view.layer.insertSublayer(self.preview, at:0)
self.scanRectView = UIView();
self.view.addSubview(self.scanRectView)
self.scanRectView.frame = CGRect(x:0, y:0, width:scanSize.width,
height:scanSize.height);
self.scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
self.scanRectView.layer.borderColor = UIColor.yellow.cgColor
self.scanRectView.layer.borderWidth = 5;
self.session.startRunning()
do {
try self.device!.lockForConfiguration()
} catch _ {
NSLog("Error: lockForConfiguration.");
}
self.device!.videoZoomFactor = 1.5
self.device!.unlockForConfiguration()
}catch _ {
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
var stringValue:String?
if metadataObjects.count > 0 {
let metadataObject = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
stringValue = metadataObject.stringValue
if stringValue != nil{
self.session.stopRunning()
}
}
self.session.stopRunning()
}
}

How to make a draggable UIView snap to the corners over the screen?

I have a draggable UIView and I am trying to make it snap to four corners of the screen. I tried a few things, but none of them have worked. Here's the code that I have:
import UIKit
import AVKit
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var crystalName: UILabel!
#IBOutlet weak var crystalInfoContainer: UIView!
#IBOutlet weak var accuracy: UILabel!
var model = IdenticrystClassification().model
override func viewDidLoad() {
super.viewDidLoad()
// This method starts the camera.
let captureSession = AVCaptureSession()
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
// This method defines sub view and defines it's properties.
view.addSubview(crystalInfoContainer)
crystalInfoContainer.clipsToBounds = true
crystalInfoContainer.layer.cornerRadius = 10.0
//crystalInfoContainer.layer.maskedCorners = [.layerMinXMinYCorner, .layerMaxXMinYCorner]
// This method defines torch functionality.
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
// This is the code that I am trying to work out.
func relativeVelocity(forVelocity velocity: CGFloat, from currentValue: CGFloat, to targetValue: CGFloat) -> CGFloat {
guard currentValue - targetValue != 0 else { return 0 }
return velocity / (targetValue - currentValue)
}
func nearestCorner(to point: CGPoint) -> CGPoint {
var minDistance = CGFloat.greatestFiniteMagnitude
var closestPosition = CGPoint.zero
for position in crystalInfoContainer { **Error1**
let distance = point.distance(to: position)
if distance < minDistance {
closestPosition = position
minDistance = distance
}
}
return closestPosition
let decelerationRate = UIScrollView.DecelerationRate.normal.rawValue
let velocity = UIPanGestureRecognizer.velocity(in: view)**Error2**
let projectedPosition = CGPoint(
x: crystalInfoContainer.center.x + project(initialVelocity: velocity.x, decelerationRate: decelerationRate),
y: crystalInfoContainer.center.y + project(initialVelocity: velocity.y, decelerationRate: decelerationRate)
)
let nearestCornerPosition = nearestCorner(to: projectedPosition)
let relativeInitialVelocity = CGVector(
dx: relativeVelocity(forVelocity: velocity.x, from: crystalInfoContainer.center.x, to: nearestCornerPosition.x),
dy: relativeVelocity(forVelocity: velocity.y, from: crystalInfoContainer.center.y, to: nearestCornerPosition.y)
)
let params = UISpringTimingParameters(damping: 1, response: 0.4, initialVelocity: relativeInitialVelocity)
let animator = UIViewPropertyAnimator(duration: 0, timingParameters: params)
animator.addAnimations {
self.crystalInfoContainer.center = nearestCornerPosition
}
animator.startAnimation()
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "video"))
captureSession.addOutput(dataOutput)
toggleTorch(on: true)
}
// Handles Visiout output.
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let model = try? VNCoreMLModel(for: model) else { return }
let request = VNCoreMLRequest(model: model)
{ (finishedReq, err) in
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
let name: String = firstObservation.identifier
let acc: Int = Int(firstObservation.confidence * 100)
DispatchQueue.main.async {
self.crystalName.text = name
self.accuracy.text = "Confidence: \(acc)%"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
override var prefersStatusBarHidden: Bool {
return true
}
}
Error1: For-in loop requires 'UIView?' to conform to 'Sequence'; did you mean to unwrap optional?
Error2: Instance member 'velocity' cannot be used on type 'UIPanGestureRecognizer'; did you mean to use a value of this type instead?
The problem is that your panView method is wrong. You need to switch on the gesture recognizer’s state — began, changed, or ended. Pan only when the gesture changes. When the gesture ends, then and only then, animate the view into the nearest corner.

Resources