I am currently working on a QR Scan View in my Swift application.
I want to center the VideoPreview in the middle of my view.
The view looks like this:
The view (white) is called ScanView and I want to make the image preview the same size as the ScanView and center it in it.
Code snippet:
Thanks for every help!
here is a working solution:
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet weak var innerView: UIView!
var session: AVCaptureSession?
var input: AVCaptureDeviceInput?
var previewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
createSession()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
self.previewLayer?.frame.size = self.innerView.frame.size
}
private func createSession() {
do {
self.session = AVCaptureSession()
if let device = AVCaptureDevice.default(for: AVMediaType.video) {
self.input = try AVCaptureDeviceInput(device: device)
self.session?.addInput(self.input!)
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.session!)
self.previewLayer?.frame.size = self.innerView.frame.size
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.innerView.layer.addSublayer(self.previewLayer!)
//______ 1. solution with Video camera ______//
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
self.session?.canAddOutput(videoOutput)
self.session?.addOutput(videoOutput)
self.session?.startRunning()
//______ 2. solution with QR code ______//
let videoOutput = AVCaptureMetadataOutput()
videoOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
self.session?.canAddOutput(videoOutput)
self.session?.addOutput(videoOutput)
// explanation here: https://stackoverflow.com/a/35642852/2450755
videoOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]
self.session?.startRunning()
}
} catch _ {
}
}
//MARK: AVCaptureVideoDataOutputSampleBufferDelegate
public func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
// awesome stuff here
}
}
//MARK: AVCaptureMetadataOutputObjectsDelegate
func setMetadataObjectsDelegate(_ objectsDelegate: AVCaptureMetadataOutputObjectsDelegate?, queue objectsCallbackQueue: DispatchQueue?) {
}
}
requirements:
setup: Privacy - Camera Usage Description
innerView must be initialized, I did by Storyboard with the following constraints:
here the result:
I have the same problem like Philip Dz. Finally fix the issue by moving the setupVideo() function from viewDidLoad to viewDidAppear:
call setupVideo() in viewDidLoad
call setupVideo() in viewDidAppear:
Perhaps I am chiming in a bit late but I have just implemented QRScanner and, depending on device that is running a video stream can be zoomed. This is achieved via AVCaptureDevice.videoZoomFactor property. So, in order to enhance the user experience for a small square QRScanner, the above code can be slightly modified by inserting the following line device.zoomFactor = min(YOUR_ZOOM_FACTOR_VALUE, device.activeFormat.videoMaxZoomFactor) before self.session?.startRunning()
Related
I'm not too experienced with Swift or Xcode so any help would be appreciated!
I have made a separate .swift file for my QR/Camera Controller. I found this tutorial online on how to make a QR Code Reader and I typed in the Code provided and everything is fine except the Camera View isn't appearing properly on the Screen (using iPhone 8). How can I adjust the Video View?
Code:
import UIKit
import AVFoundation
class CameraController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate, AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var lblOutput: UILabel!
var imageOrientation: AVCaptureVideoOrientation?
var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var capturePhotoOutput: AVCapturePhotoOutput?
override func viewDidLoad() {
super.viewDidLoad()
// Get an instance of the AVCaptureDevice class to initialize a
// device object and provide the video as the media type parameter
guard let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) else {
fatalError("No video device found")
}
// handler chiamato quando viene cambiato orientamento
self.imageOrientation = AVCaptureVideoOrientation.portrait
do {
// Get an instance of the AVCaptureDeviceInput class using the previous deivce object
let input = try AVCaptureDeviceInput(device: captureDevice)
// Initialize the captureSession object
captureSession = AVCaptureSession()
// Set the input device on the capture session
captureSession?.addInput(input)
// Get an instance of ACCapturePhotoOutput class
capturePhotoOutput = AVCapturePhotoOutput()
capturePhotoOutput?.isHighResolutionCaptureEnabled = true
// Set the output on the capture session
captureSession?.addOutput(capturePhotoOutput!)
captureSession?.sessionPreset = .high
// Initialize a AVCaptureMetadataOutput object and set it as the input device
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession?.addOutput(captureMetadataOutput)
// Set delegate and use the default dispatch queue to execute the call back
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]
//Initialise the video preview layer and add it as a sublayer to the viewPreview view's layer
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
previewView.layer.addSublayer(videoPreviewLayer!)
//start video capture
captureSession?.startRunning()
} catch {
//If any error occurs, simply print it out
print(error)
return
}
}
override func viewWillAppear(_ animated: Bool) {
navigationController?.setNavigationBarHidden(true, animated: false)
self.captureSession?.startRunning()
}
// Find a camera with the specified AVCaptureDevicePosition, returning nil if one is not found
func cameraWithPosition(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .unspecified)
for device in discoverySession.devices {
if device.position == position {
return device
}
}
return nil
}
func metadataOutput(_ captureOutput: AVCaptureMetadataOutput,
didOutput metadataObjects: [AVMetadataObject],
from connection: AVCaptureConnection) {
// Check if the metadataObjects array is contains at least one object.
if metadataObjects.count == 0 {
return
}
//self.captureSession?.stopRunning()
// Get the metadata object.
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if metadataObj.type == AVMetadataObject.ObjectType.qr {
if let outputString = metadataObj.stringValue {
DispatchQueue.main.async {
print(outputString)
self.lblOutput.text = outputString
}
}
}
}
}
Image of current view:
The highlighted white box is the UIView
The mistake is you use frame of view but add videoPreviewLayer to the previewView which is smaller (like you showed in storyboard).
Replace the line with viewPreviewLayer frame configuration.
//Initialise the video preview layer and add it as a sublayer to the viewPreview view's layer
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
previewView.layer.addSublayer(videoPreviewLayer!)
this line
videoPreviewLayer?.frame = view.layer.bounds
to
videoPreviewLayer?.frame = previewView.layer.bounds
You should use NSLayoutConstraint from the storyboard.
Step #1
this is your current state
step #2
add top, leading, trailing and bottom constraint
step #3
final result
I would expect one of the following happens:
- You didn't setup your constraints properly
- Your view resizes
- You used incorrect view to set size of your layer
Setting up constraints is nearly impossible to explain by writing it. There are many ways of setting them up so I made a very short video that explains one way (or two) about setting up constraints.
The second and third can be explained in this snippet:
override func viewDidLoad() {
super.viewDidLoad()
...
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
previewView.layer.addSublayer(videoPreviewLayer!)
updatePreviewLayerFrame()
...
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
updatePreviewLayerFrame()
}
private func updatePreviewLayerFrame() {
videoPreviewLayer?.frame = previewView.bounds
}
Overriding viewDidLayoutSubviews should resize your layer as this method is called whenever the view controller "resizes". It is also called shortly after the viewDidLoad. Also note that a previewView is used to determine the frame: videoPreviewLayer?.frame = previewView.bounds.
Layers do not automatically resize with their parent view. That means your videoPreviewLayer gets the frame from the original (not yet layouted) previewView and never changes it. To update the layer, you can override this method:
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
// you need to keep a reference for that
self.videoPreviewLayer.frame = self.previewView.bounds
}
Alternatively, and I think that's better, you can check out how the preview view is implemented in Apple's AVCam example app. Resizing will be handled by Auto Layout when using their approach.
I looked through a lot of documentation. I can't find an answer at all.
Every time I rotate my camera my UI rotates obviously but then my camera stays with that rotation.
It's like the coordinates are local still instead of facing global north.
What else can I look up?
import UIKit
import AVFoundation
class scannerViewController: UIViewController {
var session : AVCaptureSession!
var input : AVCaptureInput!
var previewLayer : AVCaptureVideoPreviewLayer!
var camera : AVCaptureDevice!
#IBOutlet weak var cameraPreview: UIView!
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
session = AVCaptureSession()
camera = AVCaptureDevice.default(for: AVMediaType.video)
input = try? AVCaptureDeviceInput(device: camera!)
session.addInput(input)
previewLayer = AVCaptureVideoPreviewLayer(session: session)
cameraPreview.layer.addSublayer(previewLayer)
DispatchQueue.global(qos: .userInitiated).async {
self.session.startRunning()
DispatchQueue.main.async {
self.previewLayer.frame = self.cameraPreview.bounds
}
}
}
}
I am currently creating a QR scanner/ reader application in Xcode 10. I have a Tab controller that has two tab bar items.
1.) Home
2.) QR Scanner
Although, my code works and I can scan a QR code in the "second view controller", whenever I switch to the "first view controller", it still scans the QR code when i don't want it to. If you have any tips on how I can fix this issue, it would be much appreciated.
First View Controller:
import UIKit
class FirstViewController: UIViewController{
override func viewDidLoad() {
super.viewDidLoad()
}
}
Second View Controller:
import UIKit
import AVFoundation
class SecondViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var video = AVCaptureVideoPreviewLayer()
#IBOutlet weak var square: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
//Creating session
let session = AVCaptureSession()
//Define Capture device
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
do {
let input = try AVCaptureDeviceInput(device: captureDevice!)
session.addInput(input)
} catch {
print (">>>>Error")
}
let output = AVCaptureMetadataOutput ()
session.addOutput(output)
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
output.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]
video = AVCaptureVideoPreviewLayer(session: session)
video.frame = view.layer.bounds
view.layer.addSublayer(video)
self.view.bringSubviewToFront(square)
session.startRunning()
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if metadataObjects != nil && metadataObjects.count != 0 {
if let object = metadataObjects[0] as? AVMetadataMachineReadableCodeObject{
if object.type == AVMetadataObject.ObjectType.qr{
let alert = UIAlertController(title: "QR code", message: object.stringValue, preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "Retake", style: .default, handler: nil))
present(alert,animated: true, completion: nil)
}
}
}
}
}
I would recommend adding the session as a member variable in your class instead of only the viewDidLoad() method.
Now in your viewWillAppear() method you can call the session.startRunning() and in viewWillDisappear you want to call session.stopRunning()
Here is a basic layout:
class SecondViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
let session = AVCaptureSession()
override func viewDidLoad() {
// any setup you may need
}
override func viewWillAppear() {
// ensure everything is setup correctly
session.startRunning()
}
override func viewWillDisappear() {
// anything before navigating away
session.stopRunning()
}
}
You have started the AVCaptureSession session but you don't explicitly stop it. Whenever switching between controllers the system might keep the controller in memory.
You will want to execute session.stopRunning() prior to navigating away from the controller and clean up. E.g. in viewWillDisappear or simiilar.
Ref: https://developer.apple.com/documentation/avfoundation/avcapturesession/1385661-stoprunning
Thank you everyone for your assistance. After reviewing my code I realized that what I had to do was override the viewDidAppear() and viewDidDisappear().
Working Code:
class SecondViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
let session = AVCaptureSession()
override func viewDidLoad() {
// any setup you may need
}
override func viewWillAppear(_ animated: Bool) {
session.startRunning()
print("Start running")
}
override func viewDidDisappear(_ animated: Bool) {
session.stopRunning()
print ("Stop running")
}
}
Why can't I see my button when I run my program but in Xcode 8 I can see it. It's over a view and looks like the button you press to take a photo with in snapchat.
I'm new to Xcode and swift so if there is anything I need to know with Xcode or the storyboard that can help me with these problems please tell me.
The code for the view:
import UIKit
import AVFoundation
import QuartzCore
class View1: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate{
let captureSession = AVCaptureSession()
var previewLayer: CALayer!
var captureDevice: AVCaptureDevice!
#IBOutlet weak var cameraView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
}
/*
This is a function to prepair the camera
and check that there is a camera.
If there isn't a camera on the device
then get you will get a error.
*/
func prepareCamera()
{
captureSession.sessionPreset = AVCaptureSessionPreset1920x1080
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: AVMediaTypeVideo,
position: .back).devices
{
captureDevice = availableDevices.first
beginSession()
}
}
func beginSession()
{
do
{
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
}
catch
{
print(error.localizedDescription)
/*
Figure out what to do here
*/
}
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
{
self.previewLayer = previewLayer
self.view.layer.addSublayer(
self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
self.previewLayer.frame.size = self.view.layer.frame.size
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString
): NSNumber(value: kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput)
{
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.PhotoAllergy.captureQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
}
//func captureOutput(_ captureOutput: AVCaptureOutput!, didDrop sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
//yeeye
//}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
prepareCamera()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
}
Add constraints.
Follow these images
Good luck, you can comment here if you have any questions about this.
do the following
select your button in the storyboard view controller and follow the steps in the screen shot
press the button add constraints
run the app
Please make the viewController as the initial view controller as specified in the image below
We have implemented reading QRCodes on a receipt using AVCaptureSession. One issue that we have noticed is that it is rather finicky in our hit rate of it reading the QRCode. You really have to work at it to get it to recognize that there is a QRCode on the receipt. I have paddled/googled around trying to see if anyone has published material on how to increase the hit rate. I have implemented AutoFocus and that has helped marginally.
I did take a look at: Reading on QRCodes on iOS with AVCaptureSession -- alignment issues? but there was no actionable suggestions there.
Any thoughts on other approaches?
Here is the reader code:
import UIKit
import AVFoundation
final class BarcodeReader: NSObject {
fileprivate var captureSession: AVCaptureSession?
fileprivate var videoPreviewLayer: AVCaptureVideoPreviewLayer?
fileprivate unowned let barcodeReaderDelegate: BarcodeReaderDelegate
init(barcodeReaderDelegate: BarcodeReaderDelegate) {
self.barcodeReaderDelegate = barcodeReaderDelegate
}
func start(in view: UIView) throws {
if captureSession == nil {
captureSession = try configuredCaptureSession()
}
if videoPreviewLayer == nil {
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer!.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
}
captureSession!.startRunning()
}
fileprivate func configuredCaptureSession() throws -> AVCaptureSession {
let captureSession = AVCaptureSession()
// NOTE: Remember to add a message in your Info.plist file under the
// key NSCameraUsageDescription or this will crash the app.
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
// make sure that auto focus is turned on
if (device?.isFocusModeSupported(.continuousAutoFocus))! {
try device?.lockForConfiguration()
device?.focusMode = .continuousAutoFocus
device?.unlockForConfiguration()
}
let input = try AVCaptureDeviceInput(device: device)
captureSession.addInput(input)
let output = AVCaptureMetadataOutput()
captureSession.addOutput(output)
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
output.metadataObjectTypes = [AVMetadataObjectTypeQRCode]
return captureSession
}
func stop() {
captureSession?.stopRunning()
}
}
extension BarcodeReader: AVCaptureMetadataOutputObjectsDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [Any]!, from connection: AVCaptureConnection!) {
guard let metadataObject = metadataObjects.first as? AVMetadataMachineReadableCodeObject else { return }
guard metadataObject.type == AVMetadataObjectTypeQRCode else { return
}
stop()
barcodeReaderDelegate.barcodeReader(self, found: metadataObject.stringValue)
}
}
protocol BarcodeReaderDelegate: class {
func barcodeReader(_ barcodeReader: BarcodeReader, found code: String)
}