Access data outside its method in Swift - ios

I am having trouble with my swift code. The problem is that I need to have my prepareForSegue outside of the method, so the data inside the method cannot be used outside and in my prepareForSegue. How can I use the data outside so it will work in my prepareForSegue?
I am giving you all of my code, since it can be usefull all of it :-)
The error is marked with ////-- --//// down in the bottom of the code. It says: use of unresolved identifier "metadataObj".
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet weak var messageLabel:UILabel!
var captureSession:AVCaptureSession?
var videoPreviewLayer:AVCaptureVideoPreviewLayer?
var qrCodeFrameView:UIView?
$$$$$$$$ var metadataObj: AVMetadataMachineReadableCodeObject?
let supportedBarCodes = [AVMetadataObjectTypeQRCode, AVMetadataObjectTypeCode128Code, AVMetadataObjectTypeCode39Code, AVMetadataObjectTypeCode93Code, AVMetadataObjectTypeUPCECode, AVMetadataObjectTypePDF417Code, AVMetadataObjectTypeEAN13Code, AVMetadataObjectTypeAztecCode]
override func viewDidLoad() {
super.viewDidLoad()
let captureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
do {
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession = AVCaptureSession()
captureSession?.addInput(input)
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession?.addOutput(captureMetadataOutput)
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
captureMetadataOutput.metadataObjectTypes = supportedBarCodes
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
view.bringSubviewToFront(messageLabel)
qrCodeFrameView = UIView()
if let qrCodeFrameView = qrCodeFrameView {
qrCodeFrameView.layer.borderColor = UIColor.greenColor().CGColor
qrCodeFrameView.layer.borderWidth = 2
view.addSubview(qrCodeFrameView)
view.bringSubviewToFront(qrCodeFrameView)
}
} catch {
print(error)
return
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]?, fromConnection connection: AVCaptureConnection!) {
if metadataObjects == nil || metadataObjects!.count == 0 {
qrCodeFrameView?.frame = CGRectZero
messageLabel.text = "No barcode/QR code is detected"
return
}
let metadataObj = metadataObjects![0] as! AVMetadataMachineReadableCodeObject
if supportedBarCodes.contains(metadataObj.type) {
let barCodeObject = videoPreviewLayer?.transformedMetadataObjectForMetadataObject(metadataObj)
qrCodeFrameView?.frame = barCodeObject!.bounds
if metadataObj.stringValue != nil {
dispatch_async(dispatch_get_main_queue()){
self.performSegueWithIdentifier("SendDataSegue", sender: self)
}
}
}
}
if let metadataObj = metadataObj {
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject?) {
if segue.identifier == "SendDataSegue" {
if let sendToDetailViewController = segue.destinationViewController as? DetailViewController {
sendToDetailViewController.viaSegue = metadataObj.stringValue
}
}
}
}
}
Hope you guys can help me.

Your metadataObj is declared inside of captureOutput() function. You need to move the declaration outside, same place as you have your captureSession etc.
For example:
var metadataObj: AVMetadataMachineReadableCodeObject?
Then you can use it in both prepareForSegue and captureOutput(). Check for nil before using in prepareForSegue:
if let metadataObj = metadataObj {
// use metadataObj
}

Related

Why scan 1D Barcode only get result in middle of view by Swift?

I have a question about scan 1D barcode. Why I set the yellow border about scanning region view, only I put it in the middle of the region and get the result successfully?
It works fine when I put my 1D barcode in the green region by the following image. When I put the red region, and nothing happens. My app orientation only right and left.
What's wrong with my code?
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate, UIAlertViewDelegate {
var scanRectView:UIView!
var device:AVCaptureDevice!
var input:AVCaptureDeviceInput!
var output:AVCaptureMetadataOutput!
var session:AVCaptureSession!
var preview:AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidLayoutSubviews() {
self.configureVideoOrientation()
}
fileprivate func configureVideoOrientation() {
let previewLayer = self.preview
if let connection = previewLayer?.connection {
let orientation = UIDevice.current.orientation
if connection.isVideoOrientationSupported, let videoOrientation = AVCaptureVideoOrientation(rawValue: orientation.rawValue) {
previewLayer?.frame = self.view.bounds
connection.videoOrientation = videoOrientation
}
}
}
#IBAction func btnClicked(_ sender: Any) {
do{
self.device = AVCaptureDevice.default(for: AVMediaType.video)
self.input = try AVCaptureDeviceInput(device: device)
self.output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
self.session = AVCaptureSession()
if UIScreen.main.bounds.size.height < 500 {
self.session.sessionPreset = AVCaptureSession.Preset.vga640x480
}else{
self.session.sessionPreset = AVCaptureSession.Preset.high
}
self.session.addInput(self.input)
self.session.addOutput(self.output)
self.output.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
let windowSize = UIScreen.main.bounds.size
let scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/3)
var scanRect = CGRect(x:(windowSize.width-scanSize.width)/2,
y:(windowSize.height-scanSize.height)/2,
width:scanSize.width, height:scanSize.height)
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
self.output.rectOfInterest = scanRect
self.preview = AVCaptureVideoPreviewLayer(session:self.session)
self.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.preview.frame = UIScreen.main.bounds
self.view.layer.insertSublayer(self.preview, at:0)
self.scanRectView = UIView();
self.view.addSubview(self.scanRectView)
self.scanRectView.frame = CGRect(x:0, y:0, width:scanSize.width,
height:scanSize.height);
self.scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
self.scanRectView.layer.borderColor = UIColor.yellow.cgColor
self.scanRectView.layer.borderWidth = 5;
self.session.startRunning()
do {
try self.device!.lockForConfiguration()
} catch _ {
NSLog("Error: lockForConfiguration.");
}
self.device!.videoZoomFactor = 1.5
self.device!.unlockForConfiguration()
}catch _ {
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
var stringValue:String?
if metadataObjects.count > 0 {
let metadataObject = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
stringValue = metadataObject.stringValue
if stringValue != nil{
self.session.stopRunning()
}
}
self.session.stopRunning()
}
}

iOS Swift - How to use rectOfInterest correctly (barcode scanner)

I have an iOS App which scans a barcode.
Now I wanted to define a specific scan area. For this purpose I am using the rectOfInterest property and metadataOutputRectOfInterest method.
The problems I face
When I use just this code below nothing will be scanned, if I remove barcodeAreaView scanning works fine all over the display
class BarcodeReaderViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
// Camera view
var cameraView: AVCaptureVideoPreviewLayer?
// AV capture session and dispatch queue
let captureSession = AVCaptureSession()
let sessionQueue = DispatchQueue(label: AVCaptureSession.self.description(), attributes: [], target: nil)
var barcodeAreaView: UIView?
let supportedCodeTypes = [
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeInterleaved2of5Code
]
let metadataOutput = AVCaptureMetadataOutput()
var barcodeArea:CGRect!
override func viewDidLoad() {
super.viewDidLoad()
let width = 250
let height = 100
print(self.view.frame.size.width)
let xPos = (CGFloat(self.view.frame.size.width) / CGFloat(2)) - (CGFloat(width) / CGFloat(2))
let yPos = (CGFloat(self.view.frame.size.height) / CGFloat(2)) - (CGFloat(height) / CGFloat(2))
barcodeArea = CGRect(x: Int(xPos), y: Int(yPos), width: width, height: height)
barcodeAreaView = UIView()
barcodeAreaView?.layer.borderColor = UIColor.red.cgColor
barcodeAreaView?.layer.borderWidth = 1
barcodeAreaView?.frame = barcodeArea
view.addSubview(barcodeAreaView!)
captureSession.beginConfiguration()
let videoDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
if videoDevice != nil {
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice)
if videoDeviceInput != nil {
if captureSession.canAddInput(videoDeviceInput) {
captureSession.addInput(videoDeviceInput)
}
}
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
metadataOutput.metadataObjectTypes = supportedCodeTypes
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
cameraView = AVCaptureVideoPreviewLayer(session: captureSession)
cameraView?.videoGravity = AVLayerVideoGravityResizeAspectFill
cameraView?.frame = view.layer.bounds
metadataOutput.rectOfInterest = cameraView!.metadataOutputRectOfInterest(for: barcodeArea)
view.layer.addSublayer(cameraView!)
}
}
captureSession.commitConfiguration()
view.bringSubview(toFront: barcodeAreaView!)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
// Start AV capture session
sessionQueue.async {
self.captureSession.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
// Stop AV capture session
sessionQueue.async {
self.captureSession.stopRunning()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [Any]!, from connection: AVCaptureConnection!) {
if let metadataObject = metadataObjects.first {
let readableObject = metadataObject as! AVMetadataMachineReadableCodeObject
let message = readableObject.stringValue
print(message)
}
}
}
I already took a look on this question
How do I use the metadataOutputRectOfInterestForRect method and rectOfInterest property to scan a specific area? (QR Code)
The problem I face here, the scan is a bit laggy and my navigationController is not working (because the listener awaits a scan)
UPDATE:
solved the issue, as suggested in this answer
https://stackoverflow.com/a/37603743/2319900
I added the following line right after my self.captureSession.startRunning()
metadataOutput.rectOfInterest = cameraView!.metadataOutputRectOfInterest(for: barcodeArea)

Swift iOS, wondering if someone could explain why output is rotated to the right degrees?

I am following this link here How to apply filter to Video real-time using Swift and for whatever reason the UIImageOrientation is rotated to the left 90 degrees.I have tried to rectify this by setting the orientation to Up but it still appears the same. Does anyone have any idea why this is? I'm not sure if it is the image, previewlayer or image view that is causing this
Here is the code:
import UIKit
import AVFoundation
import CoreMedia
let noirFilter = CIFilter(name: "CIPhotoEffectNoir")!
let sepiaFilter = CIFilter(name: "CISepiaTone")!
let vignetteEffect = CIFilter(name: "CIVignetteEffect")!
let Filters = [noirFilter,sepiaFilter,vignetteEffect]
class TestViewController:UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var Photo: UIButton!
var sessionQueue: dispatch_queue_t!
var stillImageOutput: AVCaptureStillImageOutput?
var videoDeviceInput: AVCaptureDeviceInput?
var buttonTapped = false
override func viewDidLoad() {
super.viewDidLoad()
sessionQueue = dispatch_queue_create("com.bradleymackey.Backchat.sessionQueue",DISPATCH_QUEUE_SERIAL)
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var videoDeviceInput: AVCaptureVideoDeviceInput?
do
{
let input = try AVCaptureDeviceInput(device: backCamera)
captureSession.addInput(input)
self.videoDeviceInput = videoDeviceInput
}
catch
{
print("can't access camera")
return
}
// although we don't use this, it's required to get captureOutput invoked
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
imageView.contentMode = UIViewContentMode.ScaleAspectFill
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
let stillImageOutput: AVCaptureStillImageOutput = AVCaptureStillImageOutput()
if captureSession.canAddOutput(stillImageOutput) {
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
captureSession.addOutput(stillImageOutput)
self.stillImageOutput = stillImageOutput
}
}
override func viewWillLayoutSubviews() {
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
let filter = Filters[1]
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(CVPixelBuffer: pixelBuffer!)
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let filteredImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!)
var newImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!, scale:1.0, orientation: .Up)
dispatch_async(dispatch_get_main_queue())
{
self.imageView.image = newImage
}
}
#IBAction func snapStill(sender: UIButton) {
print("snapStillImage")
let previewController = PreviewViewController(nibName: "PreviewViewController", bundle: nil)
previewController.media = Media.Photo(image: imageView.image!)
previewController.isFrontCamera = false
self.presentViewController(previewController, animated: true, completion: nil)
}
override func viewDidLayoutSubviews()
{
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
The reason for this is that the orientation is not set.
You need to set the orientation in captureOutput()
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
connection.videoOrientation = .portrait
...
This should solve your problem.

Double Tap or Use Button To Switch Camera From Back to Front (Xcode 8, Swift 3)

So, lately I have been trying to implement the function of switching the camera view from back to front camera in Swift 3. However, with no luck.
Currently, my default view is from the back camera - I can take pictures with it and then retake. But can anyone help me and show how do I either double tap the screen to switch cameras or simply use the assigned button to switch them? Thank you!
import UIKit
import AVFoundation
import FirebaseDatabase
class CameraView: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
override var prefersStatusBarHidden: Bool {
return true
}
var captureSession : AVCaptureSession!
var stillImageOutput : AVCaptureStillImageOutput!
var previewLayer : AVCaptureVideoPreviewLayer!
#IBOutlet var cameraView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
previewLayer?.frame = cameraView.bounds
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPreset1920x1080
var backCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
var error : NSError?
do {
var input = try! AVCaptureDeviceInput(device: backCamera)
if (error == nil && captureSession?.canAddInput(input) != nil) {
captureSession?.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if (captureSession?.canAddOutput(stillImageOutput) != nil) {
captureSession?.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer (session: captureSession)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer)
captureSession?.startRunning() }
}
} catch {
}
}
#IBOutlet var tempImageView: UIImageView!
#IBAction func didPressTakePhoto(_ sender: UIButton) {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
videoConnection.videoOrientation = AVCaptureVideoOrientation.portrait
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: {
(sampleBuffer, error) in
if sampleBuffer != nil {
var imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
var dataProvider = CGDataProvider.init(data: imageData as! CFData)
var cgImageRef = CGImage.init(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
var image = UIImage (cgImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.right)
self.tempImageView.image = image
self.tempImageView.isHidden = false
}
})
}
}
var didTakePhoto = Bool()
#IBAction func didPressTakeAnother(_ sender: UIButton) {
if didTakePhoto == true {
tempImageView.isHidden = true
didTakePhoto = false
} else {
captureSession?.startRunning()
didTakePhoto = true
}
}
}
don't see here any problems - here is working solution:
import Foundation
import UIKit
import AVFoundation
class MainViewController: UIViewController {
var tempImage: UIImageView?
var captureSession: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var currentCaptureDevice: AVCaptureDevice?
var usingFrontCamera = false
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
loadCamera()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
videoPreviewLayer!.frame = self.cameraPreviewSurface.bounds
}
#IBAction func switchButtonAction(_ sender: Any) {
usingFrontCamera = !usingFrontCamera
loadCamera()
}
func getFrontCamera() -> AVCaptureDevice?{
let videoDevices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo)
for device in videoDevices!{
let device = device as! AVCaptureDevice
if device.position == AVCaptureDevicePosition.front {
return device
}
}
return nil
}
func getBackCamera() -> AVCaptureDevice{
return AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
}
func loadCamera() {
if(captureSession == nil){
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetPhoto
}
var error: NSError?
var input: AVCaptureDeviceInput!
currentCaptureDevice = (usingFrontCamera ? getFrontCamera() : getBackCamera())
do {
input = try AVCaptureDeviceInput(device: currentCaptureDevice)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
for i : AVCaptureDeviceInput in (self.captureSession?.inputs as! [AVCaptureDeviceInput]){
self.captureSession?.removeInput(i)
}
if error == nil && captureSession!.canAddInput(input) {
captureSession!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession!.canAddOutput(stillImageOutput) {
captureSession!.addOutput(stillImageOutput)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
//self.cameraPreviewSurface.layer.sublayers?.forEach { $0.removeFromSuperlayer() }
self.cameraPreviewSurface.layer.addSublayer(videoPreviewLayer!)
DispatchQueue.main.async {
self.captureSession!.startRunning()
}
}
}
}
}
some notes:
cameraPreviewSurface - this is your UIView where camera will show
don't reassign the session, don't just add input, but before add new - remove existing ones,
p.s. code done with swift 3.0.1 / xcode 8.1
Cheers )
Xcode Version : Version 10.1 (10B61)
Swift Version : Swift 4.2
Change AVCaptureSession Capture Source
Refers from Stepan Maksymov Solution
we can simplified by replacing captureSession.inputs
First Create an IBAction Outlet and Connect ViewController to Change Camera View
#IBAction private func changeCamera(_ cameraButton: UIButton) {
usingFrontCamera = !usingFrontCamera
do{
captureSession.removeInput(captureSession.inputs.first!)
if(usingFrontCamera){
captureDevice = getFrontCamera()
}else{
captureDevice = getBackCamera()
}
let captureDeviceInput1 = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput1)
}catch{
print(error.localizedDescription)
}
}
Second step Copy simplified AVCaptureDevice Setting || refer Stepan Maksymov
func getFrontCamera() -> AVCaptureDevice?{
return AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices.first
return nil
}
func getBackCamera() -> AVCaptureDevice?{
return AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices.first
return nil
}
You can replace #IBAction with function like so.
func changeCamera(){
usingFrontCamera = !usingFrontCamera
do{
captureSession.removeInput(captureSession.inputs.first!)
if(usingFrontCamera){
captureDevice = getFrontCamera()
}else{
captureDevice = getBackCamera()
}
let captureDeviceInput1 = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput1)
}catch{
print(error.localizedDescription)
}
}
In addition to Stepan Maksymov post, I sugest to add this function
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
And call it instead of his post lines:
for i : AVCaptureDeviceInput in (self.captureSession?.inputs as! [AVCaptureDeviceInput]){
self.captureSession?.removeInput(i)
}
This way the cameras will change quicker.

iOS Swift Barcode Reading

I have barcode scanning working in my app. After the barcode is detected I stop the capture session to allow processing of the barcode. However, after the barcode is processed I want the scanning controller to stay up and the next barcode scanned. I had assumed that starting the capture session (startRunning()) would do it but the image stays frozen. How can I start the capture session again?
To Stop The Session use this code
self.session.stopRunning()
To begin it agian, use this code
self.session.startRunning()
Here is all the code to implement a barcode scanner...
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
let session : AVCaptureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer!
var highlightView : UIView = UIView()
override func viewDidLoad() {
super.viewDidLoad()
// Allow the view to resize freely
self.highlightView.autoresizingMask =
UIViewAutoresizing.FlexibleTopMargin |
UIViewAutoresizing.FlexibleBottomMargin |
UIViewAutoresizing.FlexibleLeftMargin |
UIViewAutoresizing.FlexibleRightMargin
// Select the color you want for the completed scan reticle
self.highlightView.layer.borderColor = UIColor.greenColor().CGColor
self.highlightView.layer.borderWidth = 3
// Add it to our controller's view as a subview.
self.view.addSubview(self.highlightView)
// For the sake of discussion this is the camera
let device = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// Create a nilable NSError to hand off to the next method.
// Make sure to use the "var" keyword and not "let"
var error : NSError? = nil
let input : AVCaptureDeviceInput? =
AVCaptureDeviceInput.deviceInputWithDevice(device, error: &error)
as? AVCaptureDeviceInput
// If our input is not nil then add it to the session, otherwise we're kind of done!
if input != nil {
session.addInput(input)
} else {
// This is fine for a demo, do something real with this in your app. :)
println(error)
}
let output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
session.addOutput(output)
output.metadataObjectTypes = output.availableMetadataObjectTypes
previewLayer =
AVCaptureVideoPreviewLayer.layerWithSession(session)
as! AVCaptureVideoPreviewLayer
previewLayer.frame = self.view.bounds
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.layer.addSublayer(previewLayer)
// Start the scanner. You'll have to end it yourself later.
session.startRunning()
}
// This is called when we find a known barcode type with the camera.
func captureOutput(
captureOutput: AVCaptureOutput!,
didOutputMetadataObjects metadataObjects: [AnyObject]!,
fromConnection connection: AVCaptureConnection!) {
var highlightViewRect = CGRectZero
var barCodeObject : AVMetadataObject!
var detectionString : String!
let barCodeTypes = [AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypePDF417Code,
AVMetadataObjectTypeQRCode,
AVMetadataObjectTypeAztecCode]
// The scanner is capable of capturing multiple 2-dimensional barcodes in one scan.
for metadata in metadataObjects {
for barcodeType in barCodeTypes {
if metadata.type == barcodeType {
barCodeObject = self.previewLayer.transformedMetadataObjectForMetadataObject(metadata as! AVMetadataMachineReadableCodeObject)
highlightViewRect = barCodeObject.bounds
detectionString = (metadata as! AVMetadataMachineReadableCodeObject).stringValue
self.session.stopRunning()
self.alert(detectionString)
break
}
}
}
println(detectionString)
self.highlightView.frame = highlightViewRect
self.view.bringSubviewToFront(self.highlightView)
}
func alert(Code: String){
let actionSheet:UIAlertController =
UIAlertController(
title: "Barcode",
message: "\(Code)",
preferredStyle: UIAlertControllerStyle.Alert)
// for alert add .Alert instead of .Action Sheet
// start copy
let firstAlertAction:UIAlertAction =
UIAlertAction(
title: "OK",
style: UIAlertActionStyle.Default,
handler: { (alertAction: UIAlertAction!) in
// action when pressed
self.session.startRunning()
})
actionSheet.addAction(firstAlertAction)
// end copy
self.presentViewController(actionSheet, animated: true, completion: nil)
}
}
edited the above code for swift 2.0:
import UIKit
import AVFoundation
class BarCodeViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
let session : AVCaptureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer!
#IBOutlet weak var highlightView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
// Select the color you want for the completed scan reticle
self.highlightView.layer.borderColor = UIColor.greenColor().CGColor
self.highlightView.layer.borderWidth = 3
// Add it to our controller's view as a subview.
self.view.addSubview(self.highlightView)
// For the sake of discussion this is the camera
let device = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// Create a nilable NSError to hand off to the next method.
// Make sure to use the "var" keyword and not "let"
var error : NSError? = nil
var input: AVCaptureDeviceInput = AVCaptureDeviceInput()
do {
input = try AVCaptureDeviceInput(device: device) as AVCaptureDeviceInput
} catch let myJSONError {
print(myJSONError)
}
// If our input is not nil then add it to the session, otherwise we're kind of done!
if input != AVCaptureDeviceInput() {
session.addInput(input)
}
else {
// This is fine for a demo, do something real with this in your app. :)
print(error)
}
let output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
session.addOutput(output)
output.metadataObjectTypes = output.availableMetadataObjectTypes
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.frame = self.view.bounds
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.layer.addSublayer(previewLayer)
// Start the scanner. You'll have to end it yourself later.
session.startRunning()
}
// This is called when we find a known barcode type with the camera.
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
var highlightViewRect = CGRectZero
var barCodeObject : AVMetadataObject!
var detectionString : String!
let barCodeTypes = [AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypePDF417Code,
AVMetadataObjectTypeQRCode,
AVMetadataObjectTypeAztecCode
]
// The scanner is capable of capturing multiple 2-dimensional barcodes in one scan.
for metadata in metadataObjects {
for barcodeType in barCodeTypes {
if metadata.type == barcodeType {
barCodeObject = self.previewLayer.transformedMetadataObjectForMetadataObject(metadata as! AVMetadataMachineReadableCodeObject)
highlightViewRect = barCodeObject.bounds
detectionString = (metadata as! AVMetadataMachineReadableCodeObject).stringValue
self.session.stopRunning()
self.alert(detectionString)
break
}
}
}
print(detectionString)
self.highlightView.frame = highlightViewRect
self.view.bringSubviewToFront(self.highlightView)
}
func alert(Code: String){
let actionSheet:UIAlertController = UIAlertController(title: "Barcode", message: "\(Code)", preferredStyle: UIAlertControllerStyle.Alert)
// for alert add .Alert instead of .Action Sheet
// start copy
let firstAlertAction:UIAlertAction = UIAlertAction(title: "OK", style: UIAlertActionStyle.Default, handler: {
(alertAction:UIAlertAction!) in
// action when pressed
self.session.startRunning()
})
actionSheet.addAction(firstAlertAction)
// end copy
self.presentViewController(actionSheet, animated: true, completion: nil)
}
}
Swift 3.0 version:
import UIKit
import AVFoundation
class BarCodeViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
let session : AVCaptureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer!
#IBOutlet weak var highlightView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
self.highlightView.layer.borderColor = UIColor.green.cgColor
self.highlightView.layer.borderWidth = 3
// Add it to our controller's view as a subview.
self.view.addSubview(self.highlightView)
// For the sake of discussion this is the camera
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
// Create a nilable NSError to hand off to the next method.
// Make sure to use the "var" keyword and not "let"
var error : NSError? = nil
var input: AVCaptureDeviceInput = AVCaptureDeviceInput()
do {
input = try AVCaptureDeviceInput(device: device) as AVCaptureDeviceInput
} catch let myJSONError {
print(myJSONError)
}
// If our input is not nil then add it to the session, otherwise we're kind of done!
if input != AVCaptureDeviceInput() {
session.addInput(input)
}
else {
// This is fine for a demo, do something real with this in your app. :)
print(error!)
}
let output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
session.addOutput(output)
output.metadataObjectTypes = output.availableMetadataObjectTypes
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.frame = self.view.bounds
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.view.layer.addSublayer(previewLayer)
// Start the scanner. You'll have to end it yourself later.
session.startRunning()
}
// This is called when we find a known barcode type with the camera.
#nonobjc func captureOutput(
captureOutput: AVCaptureOutput!,
didOutputMetadataObjects metadataObjects: [AnyObject]!,
fromConnection connection: AVCaptureConnection!) {
var highlightViewRect = CGRect()
var barCodeObject : AVMetadataObject!
var detectionString : String!
let barCodeTypes = [AVMetadataObjectTypeUPCECode,
AVMetadataObjectTypeCode39Code,
AVMetadataObjectTypeCode39Mod43Code,
AVMetadataObjectTypeEAN13Code,
AVMetadataObjectTypeEAN8Code,
AVMetadataObjectTypeCode93Code,
AVMetadataObjectTypeCode128Code,
AVMetadataObjectTypePDF417Code,
AVMetadataObjectTypeQRCode,
AVMetadataObjectTypeAztecCode]
// The scanner is capable of capturing multiple 2-dimensional barcodes in one scan.
for metadata in metadataObjects {
for barcodeType in barCodeTypes {
if metadata.type == barcodeType {
barCodeObject = self.previewLayer.transformedMetadataObject(for: metadata as! AVMetadataMachineReadableCodeObject)
highlightViewRect = barCodeObject.bounds
detectionString = (metadata as! AVMetadataMachineReadableCodeObject).stringValue
self.session.stopRunning()
self.alert(Code: detectionString)
break
}
}
}
print(detectionString)
self.highlightView.frame = highlightViewRect
self.view.bringSubview(toFront: self.highlightView)
}
func alert(Code: String){
let actionSheet:UIAlertController = UIAlertController(title: "Barcode", message: "\(Code)", preferredStyle: UIAlertControllerStyle.alert)
// for alert add .Alert instead of .Action Sheet
// start copy
let firstAlertAction:UIAlertAction = UIAlertAction(title: "OK", style: UIAlertActionStyle.default, handler: { (alertAction: UIAlertAction!) in
// action when pressed
self.session.startRunning()
})
actionSheet.addAction(firstAlertAction)
// end copy
self.present(actionSheet, animated: true, completion: nil)
}
}
Swift 5 update
func captureOutput(
captureOutput: AVCaptureOutput,
didOutputMetadataObjects metadataObjects: [AnyObject],
fromConnection connection: AVCaptureConnection,
barCodeType: [AVMetadataObject.ObjectType]) -> String? {
var detectionString: String?
var highlightViewRect = CGRect()
let barCodeTypes = barCodeType
if let metadataObject = metadataObjects.first {
for barcodeType in barCodeTypes {
if metadataObject.type == barcodeType {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return String()}
guard let barCodeObject = self.previewLayer.transformedMetadataObject(for: readableObject) else { return String()}
highlightViewRect = barCodeObject.bounds
detectionString = readableObject.stringValue
self.captureSession.stopRunning()
break
}
}
}
self.qrView.frame = highlightViewRect
self.view.bringSubviewToFront(self.qrView)
return detectionString
}

Resources