iOS Swift - How to use rectOfInterest correctly (barcode scanner) - ios

I have an iOS App which scans a barcode.
Now I wanted to define a specific scan area. For this purpose I am using the rectOfInterest property and metadataOutputRectOfInterest method.
The problems I face
When I use just this code below nothing will be scanned, if I remove barcodeAreaView scanning works fine all over the display
class BarcodeReaderViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
// Camera view
var cameraView: AVCaptureVideoPreviewLayer?
// AV capture session and dispatch queue
let captureSession = AVCaptureSession()
let sessionQueue = DispatchQueue(label: AVCaptureSession.self.description(), attributes: [], target: nil)
var barcodeAreaView: UIView?
let supportedCodeTypes = [
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeInterleaved2of5Code
]
let metadataOutput = AVCaptureMetadataOutput()
var barcodeArea:CGRect!
override func viewDidLoad() {
super.viewDidLoad()
let width = 250
let height = 100
print(self.view.frame.size.width)
let xPos = (CGFloat(self.view.frame.size.width) / CGFloat(2)) - (CGFloat(width) / CGFloat(2))
let yPos = (CGFloat(self.view.frame.size.height) / CGFloat(2)) - (CGFloat(height) / CGFloat(2))
barcodeArea = CGRect(x: Int(xPos), y: Int(yPos), width: width, height: height)
barcodeAreaView = UIView()
barcodeAreaView?.layer.borderColor = UIColor.red.cgColor
barcodeAreaView?.layer.borderWidth = 1
barcodeAreaView?.frame = barcodeArea
view.addSubview(barcodeAreaView!)
captureSession.beginConfiguration()
let videoDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
if videoDevice != nil {
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice)
if videoDeviceInput != nil {
if captureSession.canAddInput(videoDeviceInput) {
captureSession.addInput(videoDeviceInput)
}
}
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
metadataOutput.metadataObjectTypes = supportedCodeTypes
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
cameraView = AVCaptureVideoPreviewLayer(session: captureSession)
cameraView?.videoGravity = AVLayerVideoGravityResizeAspectFill
cameraView?.frame = view.layer.bounds
metadataOutput.rectOfInterest = cameraView!.metadataOutputRectOfInterest(for: barcodeArea)
view.layer.addSublayer(cameraView!)
}
}
captureSession.commitConfiguration()
view.bringSubview(toFront: barcodeAreaView!)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Start AV capture session
sessionQueue.async {
self.captureSession.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// Stop AV capture session
sessionQueue.async {
self.captureSession.stopRunning()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [Any]!, from connection: AVCaptureConnection!) {
if let metadataObject = metadataObjects.first {
let readableObject = metadataObject as! AVMetadataMachineReadableCodeObject
let message = readableObject.stringValue
print(message)
}
}
}
I already took a look on this question
How do I use the metadataOutputRectOfInterestForRect method and rectOfInterest property to scan a specific area? (QR Code)
The problem I face here, the scan is a bit laggy and my navigationController is not working (because the listener awaits a scan)
UPDATE:
solved the issue, as suggested in this answer
https://stackoverflow.com/a/37603743/2319900
I added the following line right after my self.captureSession.startRunning()
metadataOutput.rectOfInterest = cameraView!.metadataOutputRectOfInterest(for: barcodeArea)

Related

Why scan 1D Barcode only get result in middle of view by Swift?

I have a question about scan 1D barcode. Why I set the yellow border about scanning region view, only I put it in the middle of the region and get the result successfully?
It works fine when I put my 1D barcode in the green region by the following image. When I put the red region, and nothing happens. My app orientation only right and left.
What's wrong with my code?
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate, UIAlertViewDelegate {
var scanRectView:UIView!
var device:AVCaptureDevice!
var input:AVCaptureDeviceInput!
var output:AVCaptureMetadataOutput!
var session:AVCaptureSession!
var preview:AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidLayoutSubviews() {
self.configureVideoOrientation()
}
fileprivate func configureVideoOrientation() {
let previewLayer = self.preview
if let connection = previewLayer?.connection {
let orientation = UIDevice.current.orientation
if connection.isVideoOrientationSupported, let videoOrientation = AVCaptureVideoOrientation(rawValue: orientation.rawValue) {
previewLayer?.frame = self.view.bounds
connection.videoOrientation = videoOrientation
}
}
}
#IBAction func btnClicked(_ sender: Any) {
do{
self.device = AVCaptureDevice.default(for: AVMediaType.video)
self.input = try AVCaptureDeviceInput(device: device)
self.output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
self.session = AVCaptureSession()
if UIScreen.main.bounds.size.height < 500 {
self.session.sessionPreset = AVCaptureSession.Preset.vga640x480
}else{
self.session.sessionPreset = AVCaptureSession.Preset.high
}
self.session.addInput(self.input)
self.session.addOutput(self.output)
self.output.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
let windowSize = UIScreen.main.bounds.size
let scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/3)
var scanRect = CGRect(x:(windowSize.width-scanSize.width)/2,
y:(windowSize.height-scanSize.height)/2,
width:scanSize.width, height:scanSize.height)
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
self.output.rectOfInterest = scanRect
self.preview = AVCaptureVideoPreviewLayer(session:self.session)
self.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.preview.frame = UIScreen.main.bounds
self.view.layer.insertSublayer(self.preview, at:0)
self.scanRectView = UIView();
self.view.addSubview(self.scanRectView)
self.scanRectView.frame = CGRect(x:0, y:0, width:scanSize.width,
height:scanSize.height);
self.scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
self.scanRectView.layer.borderColor = UIColor.yellow.cgColor
self.scanRectView.layer.borderWidth = 5;
self.session.startRunning()
do {
try self.device!.lockForConfiguration()
} catch _ {
NSLog("Error: lockForConfiguration.");
}
self.device!.videoZoomFactor = 1.5
self.device!.unlockForConfiguration()
}catch _ {
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
var stringValue:String?
if metadataObjects.count > 0 {
let metadataObject = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
stringValue = metadataObject.stringValue
if stringValue != nil{
self.session.stopRunning()
}
}
self.session.stopRunning()
}
}

How to scan only certain areas through the camera with Swift5?

I'd like to scan QRcode through the camera. There is no problem scanning QRcode,
but I want to scan only certain areas. How can I do this?
I am currently aware of the QR code anywhere in the entire camera area.
import Foundation
import UIKit
import AVFoundation
class ScannerViewController : UIViewController, AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet weak var qrcodeView: UIView!
#IBOutlet weak var mainText: UITextView!
#IBOutlet weak var headerBar: UINavigationBar!
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
self.qrcodeView.backgroundColor = UIColor.black.withAlphaComponent(0.5)
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr]
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.insertSublayer(previewLayer, at: 0)
captureSession.startRunning()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// let scanRect = CGRect(x: 0, y: 0, width: 200, height: 200)
// let rectOfInterest = layer.metadataOutputRectConverted(fromLayerRect: scanRect)
// metadataObjects.rectOfInterest = rectOfInterest
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
} else {
print("not support")
}
}
func found(code: String) {
print(code)
self.dismiss(animated: true, completion: nil)
}
func failed() {
captureSession = nil
}
}
Like the picture above, I would like to scan only within the square area.
I desperately need this.
Thanks in advance.
You can use rectOfInterest property to achieve this
add following code after captureSession.startRunning()
First you need to convert using rect using
let rectOfInterest = videoPreviewLayer?.metadataOutputRectConverted(fromLayerRect: self.viewAreaOfScan.frame) // videoPreviewLayer is AVCaptureVideoPreviewLayer
after that you can assign it to rectOfInterest of metadataOutput
metadataOutput.rectOfInterest = rectOfInterest ?? CGRect(x: 0, y: 0, width: 1, height: 1)

AVCapturePhotoOutput iOS Camera Super Dark

I have an app setup to use the camera for a photo (on a timer basis) to detect the presence of a face. The detection process works fairly well when I feed the app a photo that I have added to assets. However, when I attempt to use the output of the camera directly or even after saving the image to a file, the resulting image is so dark that the face recognition is completely unreliable.
If I display the image as seen by the camera, it looks correct. I captured the following two images - one from the camera as seen live, the other of the same view after the image was created from AVCapturePhotoOutput. The same darkness happens if I simply display the captured image in an image view.
Note the comment: "I put the breakpoint here and took a screen shot". Then I took the second screen shot when the code completed. These were taken in HIGH light.
Here's the basic code:
class CRSFaceRecognitionViewController: UIViewController, UIImagePickerControllerDelegate {
var sentBy : String?
//timers
var faceTimer : Timer?
var frvcTimer : Timer?
//capture
var captureSession = AVCaptureSession()
var settings = AVCapturePhotoSettings()
var backCamera : AVCaptureDevice?
var frontCamera : AVCaptureDevice?
var currentCamera : AVCaptureDevice?
var photoOutput : AVCapturePhotoOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
var image : UIImage?
var outputImage : UIImage?
#IBOutlet weak var imageView: UIImageView!
//MARK: - Setup
override func viewDidLoad() {
super.viewDidLoad()
}//viewDidLoad
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(true)
}//viewWillAppear
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(true)
//check for camera
if (UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)) {
setupCaptureSession()
setupDevices()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
} else {
print("Camera not present")
}
}//viewDidAppear
//MARK: - Video
#objc func showFaceRecognitionViewController() {
//all this does is present the image in a new ViewController imageView
performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
}//showThePhotoView
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession
func setupDevices() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}//if else
}//for in
currentCamera = frontCamera
}//setupDevices
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
print("in photoOutput completion handler")
})
captureSession.addOutput(photoOutput!)
} catch {
print("Error creating AVCaptureDeviceInput:", error)
}//do catch
}//setupInputOutput
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session : captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}//setupPreviewLayer
func startRunningCaptureSession() {
captureSession.startRunning()
}//startRunningCaptureSession
//MARK: - Segue
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showSavedCameraPhoto" {
let controller = segue.destination as! JustToSeeThePhotoViewController
controller.inImage = outputImage
}//if segue
}//prepare
//MARK: - Look for Faces
func findTheFaces() {
let myView : UIView = self.view
guard let outImage = outputImage else {return}
let imageView = UIImageView(image: outImage)
imageView.contentMode = .scaleAspectFit
let scaledHeight = myView.frame.width / outImage.size.width * outImage.size.height
imageView.frame = CGRect(x: 0, y: 0, width: myView.frame.width, height: myView.frame.height)
imageView.backgroundColor = UIColor.blue
myView.addSubview(imageView)
let request = VNDetectFaceRectanglesRequest { (req, err) in
if let err = err {
print("VNDetectFaceRectanglesRequest failed to run:", err)
return
}//if let err
print(req.results ?? "req.results is empty")
req.results?.forEach({ (res) in
DispatchQueue.main.async {
guard let faceObservation = res as? VNFaceObservation else {return}
let x = myView.frame.width * faceObservation.boundingBox.origin.x
let width = myView.frame.width * faceObservation.boundingBox.width
let height = scaledHeight * faceObservation.boundingBox.height
let y = scaledHeight * (1 - faceObservation.boundingBox.origin.y) - height
let redView = UIView()
redView.backgroundColor = .red
redView.alpha = 0.4
redView.frame = CGRect(x: x, y: y, width: width, height: height)
myView.addSubview(redView)
print("faceObservation bounding box:")
print(faceObservation.boundingBox)
//if you get here, then you have a face bounding box
}//main
})//forEach block
}//let request
guard let cgImage = outImage.cgImage else {return}
DispatchQueue.global(qos: .utility).async {
let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
do {
try handler.perform([request])
print("handler request was successful")
self.performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
} catch let reqErr {
print("Failed to perform request:", reqErr)
}
}//DispatchQueue
}//findTheFaces
//MARK: - Memory
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}//didReceiveMemoryWarning
}//class
extension CRSFaceRecognitionViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
print(imageData)
outputImage = UIImage(data : imageData)
//
//I put breakpoint here and took a screen shot
//
if let outImage = outputImage?.updateImageOrientionUpSide() {
self.outputImage = outImage
}
DispatchQueue.main.async {
self.findTheFaces()
}
}//if let imageData
}//photoOutput
}//extension
extension UIImage {
//you need to do this to ensure that the image is in portrait mode
//the face recognition method will not work if the face is horizontal
func updateImageOrientionUpSide() -> UIImage? {
if self.imageOrientation == .up {
return self
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
if let normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext() {
UIGraphicsEndImageContext()
return normalizedImage
}
UIGraphicsEndImageContext()
return nil
}//updateImageOrientionUpSide
}//image
I must be doing something wrong with the camera capture. Any help would be appreciated. Swift 4, iOS 11.2.5, Xcode 9.2
I would try adding a delay between startRunningCaptureSession() and photoOutput?.capturePhoto(with:settings, delegate: self)
For example,
DispatchQueue.main.asyncAfter(deadline: .now() + .seconds(4), execute: {
// take a photo
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
})
It appears as though I have too many async pieces. I broke the code into separate functions for each major piece - async or not and put them all into a DispatchGroup. That seems to have solved the issue.

Access data outside its method in Swift

I am having trouble with my swift code. The problem is that I need to have my prepareForSegue outside of the method, so the data inside the method cannot be used outside and in my prepareForSegue. How can I use the data outside so it will work in my prepareForSegue?
I am giving you all of my code, since it can be usefull all of it :-)
The error is marked with ////-- --//// down in the bottom of the code. It says: use of unresolved identifier "metadataObj".
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet weak var messageLabel:UILabel!
var captureSession:AVCaptureSession?
var videoPreviewLayer:AVCaptureVideoPreviewLayer?
var qrCodeFrameView:UIView?
$$$$$$$$ var metadataObj: AVMetadataMachineReadableCodeObject?
let supportedBarCodes = [AVMetadataObjectTypeQRCode, AVMetadataObjectTypeCode128Code, AVMetadataObjectTypeCode39Code, AVMetadataObjectTypeCode93Code, AVMetadataObjectTypeUPCECode, AVMetadataObjectTypePDF417Code, AVMetadataObjectTypeEAN13Code, AVMetadataObjectTypeAztecCode]
override func viewDidLoad() {
super.viewDidLoad()
let captureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
do {
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession = AVCaptureSession()
captureSession?.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession?.addOutput(captureMetadataOutput)
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
captureMetadataOutput.metadataObjectTypes = supportedBarCodes
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
view.bringSubviewToFront(messageLabel)
qrCodeFrameView = UIView()
if let qrCodeFrameView = qrCodeFrameView {
qrCodeFrameView.layer.borderColor = UIColor.greenColor().CGColor
qrCodeFrameView.layer.borderWidth = 2
view.addSubview(qrCodeFrameView)
view.bringSubviewToFront(qrCodeFrameView)
}
} catch {
print(error)
return
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]?, fromConnection connection: AVCaptureConnection!) {
if metadataObjects == nil || metadataObjects!.count == 0 {
qrCodeFrameView?.frame = CGRectZero
messageLabel.text = "No barcode/QR code is detected"
return
}
let metadataObj = metadataObjects![0] as! AVMetadataMachineReadableCodeObject
if supportedBarCodes.contains(metadataObj.type) {
let barCodeObject = videoPreviewLayer?.transformedMetadataObjectForMetadataObject(metadataObj)
qrCodeFrameView?.frame = barCodeObject!.bounds
if metadataObj.stringValue != nil {
dispatch_async(dispatch_get_main_queue()){
self.performSegueWithIdentifier("SendDataSegue", sender: self)
}
}
}
}
if let metadataObj = metadataObj {
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
if segue.identifier == "SendDataSegue" {
if let sendToDetailViewController = segue.destinationViewController as? DetailViewController {
sendToDetailViewController.viaSegue = metadataObj.stringValue
}
}
}
}
}
Hope you guys can help me.
Your metadataObj is declared inside of captureOutput() function. You need to move the declaration outside, same place as you have your captureSession etc.
For example:
var metadataObj: AVMetadataMachineReadableCodeObject?
Then you can use it in both prepareForSegue and captureOutput(). Check for nil before using in prepareForSegue:
if let metadataObj = metadataObj {
// use metadataObj
}

iOS Swift Barcode Reading

I have barcode scanning working in my app. After the barcode is detected I stop the capture session to allow processing of the barcode. However, after the barcode is processed I want the scanning controller to stay up and the next barcode scanned. I had assumed that starting the capture session (startRunning()) would do it but the image stays frozen. How can I start the capture session again?
To Stop The Session use this code
self.session.stopRunning()
To begin it agian, use this code
self.session.startRunning()
Here is all the code to implement a barcode scanner...
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
let session : AVCaptureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer!
var highlightView : UIView = UIView()
override func viewDidLoad() {
super.viewDidLoad()
// Allow the view to resize freely
self.highlightView.autoresizingMask =
UIViewAutoresizing.FlexibleTopMargin |
UIViewAutoresizing.FlexibleBottomMargin |
UIViewAutoresizing.FlexibleLeftMargin |
UIViewAutoresizing.FlexibleRightMargin
// Select the color you want for the completed scan reticle
self.highlightView.layer.borderColor = UIColor.greenColor().CGColor
self.highlightView.layer.borderWidth = 3
// Add it to our controller's view as a subview.
self.view.addSubview(self.highlightView)
// For the sake of discussion this is the camera
let device = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// Create a nilable NSError to hand off to the next method.
// Make sure to use the "var" keyword and not "let"
var error : NSError? = nil
let input : AVCaptureDeviceInput? =
AVCaptureDeviceInput.deviceInputWithDevice(device, error: &error)
as? AVCaptureDeviceInput
// If our input is not nil then add it to the session, otherwise we're kind of done!
if input != nil {
session.addInput(input)
} else {
// This is fine for a demo, do something real with this in your app. :)
println(error)
}
let output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
session.addOutput(output)
output.metadataObjectTypes = output.availableMetadataObjectTypes
previewLayer =
AVCaptureVideoPreviewLayer.layerWithSession(session)
as! AVCaptureVideoPreviewLayer
previewLayer.frame = self.view.bounds
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.layer.addSublayer(previewLayer)
// Start the scanner. You'll have to end it yourself later.
session.startRunning()
}
// This is called when we find a known barcode type with the camera.
func captureOutput(
captureOutput: AVCaptureOutput!,
didOutputMetadataObjects metadataObjects: [AnyObject]!,
fromConnection connection: AVCaptureConnection!) {
var highlightViewRect = CGRectZero
var barCodeObject : AVMetadataObject!
var detectionString : String!
let barCodeTypes = [AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypePDF417Code,
AVMetadataObjectTypeQRCode,
AVMetadataObjectTypeAztecCode]
// The scanner is capable of capturing multiple 2-dimensional barcodes in one scan.
for metadata in metadataObjects {
for barcodeType in barCodeTypes {
if metadata.type == barcodeType {
barCodeObject = self.previewLayer.transformedMetadataObjectForMetadataObject(metadata as! AVMetadataMachineReadableCodeObject)
highlightViewRect = barCodeObject.bounds
detectionString = (metadata as! AVMetadataMachineReadableCodeObject).stringValue
self.session.stopRunning()
self.alert(detectionString)
break
}
}
}
println(detectionString)
self.highlightView.frame = highlightViewRect
self.view.bringSubviewToFront(self.highlightView)
}
func alert(Code: String){
let actionSheet:UIAlertController =
UIAlertController(
title: "Barcode",
message: "\(Code)",
preferredStyle: UIAlertControllerStyle.Alert)
// for alert add .Alert instead of .Action Sheet
// start copy
let firstAlertAction:UIAlertAction =
UIAlertAction(
title: "OK",
style: UIAlertActionStyle.Default,
handler: { (alertAction: UIAlertAction!) in
// action when pressed
self.session.startRunning()
})
actionSheet.addAction(firstAlertAction)
// end copy
self.presentViewController(actionSheet, animated: true, completion: nil)
}
}
edited the above code for swift 2.0:
import UIKit
import AVFoundation
class BarCodeViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
let session : AVCaptureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer!
#IBOutlet weak var highlightView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
// Select the color you want for the completed scan reticle
self.highlightView.layer.borderColor = UIColor.greenColor().CGColor
self.highlightView.layer.borderWidth = 3
// Add it to our controller's view as a subview.
self.view.addSubview(self.highlightView)
// For the sake of discussion this is the camera
let device = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// Create a nilable NSError to hand off to the next method.
// Make sure to use the "var" keyword and not "let"
var error : NSError? = nil
var input: AVCaptureDeviceInput = AVCaptureDeviceInput()
do {
input = try AVCaptureDeviceInput(device: device) as AVCaptureDeviceInput
} catch let myJSONError {
print(myJSONError)
}
// If our input is not nil then add it to the session, otherwise we're kind of done!
if input != AVCaptureDeviceInput() {
session.addInput(input)
}
else {
// This is fine for a demo, do something real with this in your app. :)
print(error)
}
let output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
session.addOutput(output)
output.metadataObjectTypes = output.availableMetadataObjectTypes
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.frame = self.view.bounds
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.layer.addSublayer(previewLayer)
// Start the scanner. You'll have to end it yourself later.
session.startRunning()
}
// This is called when we find a known barcode type with the camera.
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
var highlightViewRect = CGRectZero
var barCodeObject : AVMetadataObject!
var detectionString : String!
let barCodeTypes = [AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypePDF417Code,
AVMetadataObjectTypeQRCode,
AVMetadataObjectTypeAztecCode
]
// The scanner is capable of capturing multiple 2-dimensional barcodes in one scan.
for metadata in metadataObjects {
for barcodeType in barCodeTypes {
if metadata.type == barcodeType {
barCodeObject = self.previewLayer.transformedMetadataObjectForMetadataObject(metadata as! AVMetadataMachineReadableCodeObject)
highlightViewRect = barCodeObject.bounds
detectionString = (metadata as! AVMetadataMachineReadableCodeObject).stringValue
self.session.stopRunning()
self.alert(detectionString)
break
}
}
}
print(detectionString)
self.highlightView.frame = highlightViewRect
self.view.bringSubviewToFront(self.highlightView)
}
func alert(Code: String){
let actionSheet:UIAlertController = UIAlertController(title: "Barcode", message: "\(Code)", preferredStyle: UIAlertControllerStyle.Alert)
// for alert add .Alert instead of .Action Sheet
// start copy
let firstAlertAction:UIAlertAction = UIAlertAction(title: "OK", style: UIAlertActionStyle.Default, handler: {
(alertAction:UIAlertAction!) in
// action when pressed
self.session.startRunning()
})
actionSheet.addAction(firstAlertAction)
// end copy
self.presentViewController(actionSheet, animated: true, completion: nil)
}
}
Swift 3.0 version:
import UIKit
import AVFoundation
class BarCodeViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
let session : AVCaptureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer!
#IBOutlet weak var highlightView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
self.highlightView.layer.borderColor = UIColor.green.cgColor
self.highlightView.layer.borderWidth = 3
// Add it to our controller's view as a subview.
self.view.addSubview(self.highlightView)
// For the sake of discussion this is the camera
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
// Create a nilable NSError to hand off to the next method.
// Make sure to use the "var" keyword and not "let"
var error : NSError? = nil
var input: AVCaptureDeviceInput = AVCaptureDeviceInput()
do {
input = try AVCaptureDeviceInput(device: device) as AVCaptureDeviceInput
} catch let myJSONError {
print(myJSONError)
}
// If our input is not nil then add it to the session, otherwise we're kind of done!
if input != AVCaptureDeviceInput() {
session.addInput(input)
}
else {
// This is fine for a demo, do something real with this in your app. :)
print(error!)
}
let output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
session.addOutput(output)
output.metadataObjectTypes = output.availableMetadataObjectTypes
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.frame = self.view.bounds
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.layer.addSublayer(previewLayer)
// Start the scanner. You'll have to end it yourself later.
session.startRunning()
}
// This is called when we find a known barcode type with the camera.
#nonobjc func captureOutput(
captureOutput: AVCaptureOutput!,
didOutputMetadataObjects metadataObjects: [AnyObject]!,
fromConnection connection: AVCaptureConnection!) {
var highlightViewRect = CGRect()
var barCodeObject : AVMetadataObject!
var detectionString : String!
let barCodeTypes = [AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypePDF417Code,
AVMetadataObjectTypeQRCode,
AVMetadataObjectTypeAztecCode]
// The scanner is capable of capturing multiple 2-dimensional barcodes in one scan.
for metadata in metadataObjects {
for barcodeType in barCodeTypes {
if metadata.type == barcodeType {
barCodeObject = self.previewLayer.transformedMetadataObject(for: metadata as! AVMetadataMachineReadableCodeObject)
highlightViewRect = barCodeObject.bounds
detectionString = (metadata as! AVMetadataMachineReadableCodeObject).stringValue
self.session.stopRunning()
self.alert(Code: detectionString)
break
}
}
}
print(detectionString)
self.highlightView.frame = highlightViewRect
self.view.bringSubview(toFront: self.highlightView)
}
func alert(Code: String){
let actionSheet:UIAlertController = UIAlertController(title: "Barcode", message: "\(Code)", preferredStyle: UIAlertControllerStyle.alert)
// for alert add .Alert instead of .Action Sheet
// start copy
let firstAlertAction:UIAlertAction = UIAlertAction(title: "OK", style: UIAlertActionStyle.default, handler: { (alertAction: UIAlertAction!) in
// action when pressed
self.session.startRunning()
})
actionSheet.addAction(firstAlertAction)
// end copy
self.present(actionSheet, animated: true, completion: nil)
}
}
Swift 5 update
func captureOutput(
captureOutput: AVCaptureOutput,
didOutputMetadataObjects metadataObjects: [AnyObject],
fromConnection connection: AVCaptureConnection,
barCodeType: [AVMetadataObject.ObjectType]) -> String? {
var detectionString: String?
var highlightViewRect = CGRect()
let barCodeTypes = barCodeType
if let metadataObject = metadataObjects.first {
for barcodeType in barCodeTypes {
if metadataObject.type == barcodeType {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return String()}
guard let barCodeObject = self.previewLayer.transformedMetadataObject(for: readableObject) else { return String()}
highlightViewRect = barCodeObject.bounds
detectionString = readableObject.stringValue
self.captureSession.stopRunning()
break
}
}
}
self.qrView.frame = highlightViewRect
self.view.bringSubviewToFront(self.qrView)
return detectionString
}

Resources