Video Buffer Output with Swift - ios

My goal is to take the video buffer and ultimately convert it to NSData but I do not understand how to access the buffer properly. I have the captureOutput function but I have not been successful if converting the buffer and I'm not sure I am actually collecting anything in the buffer. This is all using swift code, I have found some examples using Objective-C but I am not able to understand the Obj-c code well enough to figure it out.
var captureDevice : AVCaptureDevice?
var videoCaptureOutput = AVCaptureVideoDataOutput()
var bounds: CGRect = UIScreen.mainScreen().bounds
let captureSession = AVCaptureSession()
var captureConnection: AVCaptureMovieFileOutput?
override func viewDidLoad() {
super.viewDidLoad()
captureSession.sessionPreset = AVCaptureSessionPreset640x480
let devices = AVCaptureDevice.devices()
for device in devices {
if (device.hasMediaType(AVMediaTypeVideo)) {
if device.position == AVCaptureDevicePosition.Back {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
beginSession()
}
}
}
}
}
func beginSession() {
var screenWidth:CGFloat = bounds.size.width
var screenHeight:CGFloat = bounds.size.height
var err : NSError? = nil
captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err)!)
if err != nil {
println("Error: \(err?.localizedDescription)")
}
videoCaptureOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey:kCVPixelFormatType_32BGRA]
videoCaptureOutput.alwaysDiscardsLateVideoFrames = true
captureSession.addOutput(videoCaptureOutput)
videoCaptureOutput.setSampleBufferDelegate(self, queue: dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL))
if captureSession.canAddOutput(self.videoCaptureOutput) {
captureSession.addOutput(self.videoCaptureOutput)
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
// I think this is where I can get the buffer info.
}

In the AVCaptureVideoDataOutputSampleBufferDelegate method, captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!), you can get the buffer info
let formatDescription: CMFormatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)
let imageBuffer: CVImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
CVPixelBufferLockBaseAddress(imageBuffer, 0)
var imagePointer: UnsafeMutablePointer<Void> = CVPixelBufferGetBaseAddress(imageBuffer)
let bufferSize: (width: Int, height: Int) = (CVPixelBufferGetHeight(imageBuffer), CVPixelBufferGetWidth(imageBuffer))
println("Buffer Size: \(bufferSize.width):\(bufferSize.height)")

Related

Swift, Firebase - Use CMSampleBufferRef with live feed of camera

I'm currently trying to implement the MLKit from Firebase to use text recognition.
So far, I've got the code for the camera, which shows its live feed inside of an UIView. My intention is now to recognize text in this live feed, which I reckon is possible with the help of CMSampleBufferRef (let image = VisionImage(buffer: bufferRef) - see linked Firebase tutorial, Step 2).
How am I able to create such CMSampleBufferRef and to make it hold the live feed of the camera (UIView)?
My code for the camera:
#IBOutlet weak var cameraView: UIView!
var session: AVCaptureSession?
var device: AVCaptureDevice?
var input: AVCaptureDeviceInput?
var output: AVCaptureMetadataOutput?
var prevLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
prevLayer?.frame.size = cameraView.frame.size
}
func createSession() {
session = AVCaptureSession()
device = AVCaptureDevice.default(for: AVMediaType.video)
do{
input = try AVCaptureDeviceInput(device: device!)
}
catch{
print(error)
}
if let input = input{
session?.addInput(input)
}
prevLayer = AVCaptureVideoPreviewLayer(session: session!)
prevLayer?.frame.size = cameraView.frame.size
prevLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
prevLayer?.connection?.videoOrientation = transformOrientation(orientation: UIInterfaceOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!)
cameraView.layer.addSublayer(prevLayer!)
session?.startRunning()
}
func cameraWithPosition(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera, .builtInTelephotoCamera, .builtInTrueDepthCamera, .builtInWideAngleCamera, ], mediaType: .video, position: position)
if let device = deviceDiscoverySession.devices.first {
return device
}
return nil
}
override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) {
coordinator.animate(alongsideTransition: { (context) -> Void in
self.prevLayer?.connection?.videoOrientation = self.transformOrientation(orientation: UIInterfaceOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!)
self.prevLayer?.frame.size = self.cameraView.frame.size
}, completion: { (context) -> Void in
})
super.viewWillTransition(to: size, with: coordinator)
}
func transformOrientation(orientation: UIInterfaceOrientation) -> AVCaptureVideoOrientation {
switch orientation {
case .landscapeLeft:
return .landscapeLeft
case .landscapeRight:
return .landscapeRight
case .portraitUpsideDown:
return .portraitUpsideDown
default:
return .portrait
}
}
Edit: I have added a functional Swift sample matching your language requirement:
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var cameraView: UIView!
var session: AVCaptureSession!
var device: AVCaptureDevice?
var input: AVCaptureDeviceInput?
var videoOutput: AVCaptureVideoDataOutput!
var output: AVCaptureMetadataOutput?
var prevLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
session = AVCaptureSession()
device = AVCaptureDevice.default(for: AVMediaType.video)
do{
input = try AVCaptureDeviceInput(device: device!)
}
catch{
print(error)
return
}
if let input = input {
if session.canAddInput(input) {
session.addInput(input)
}
}
videoOutput = AVCaptureVideoDataOutput()
videoOutput.videoSettings = [
String(kCVPixelBufferPixelFormatTypeKey): NSNumber(value: kCVPixelFormatType_32BGRA)
]
videoOutput.alwaysDiscardsLateVideoFrames = true
let queue = DispatchQueue(label: "video-frame-sampler")
videoOutput!.setSampleBufferDelegate(self, queue: queue)
if session.canAddOutput(videoOutput) {
session.addOutput(videoOutput)
if let connection = videoOutput.connection(with: .video) {
connection.videoOrientation = videoOrientationFromInterfaceOrientation()
if connection.isVideoStabilizationSupported {
connection.preferredVideoStabilizationMode = .auto
}
}
}
prevLayer = AVCaptureVideoPreviewLayer(session: session)
prevLayer.frame.size = cameraView.frame.size
prevLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraView.layer.addSublayer(prevLayer!)
session.startRunning()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//pass your sampleBuffer to vision API
//I recommend not to pass every frame however, skip some frames until camera is steady and focused
print("frame received")
}
func videoOrientationFromInterfaceOrientation() -> AVCaptureVideoOrientation {
return AVCaptureVideoOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!
}
}
I see that you already have set up your input and preview layer but you need to set up your video capture output, as well, to capture your CMSampleBufferRef frames.
To do this set up an object of type AVCaptureVideoDataOutput with the following steps:
Create instance of AVCaptureVideoDataOutput and configure
AVCaptureVideoDataOutput* videoOutput = [[AVCaptureVideoDataOutput new] autorelease];
videoOutput.videoSettings = #{(id)kCVPixelBufferPixelFormatTypeKey:#(kCVPixelFormatType_32BGRA)};
videoOutput.alwaysDiscardsLateVideoFrames = YES;
Set up frame capture (sample buffer) delegate of the configured output and add it to the session
dispatch_queue_t queue = dispatch_queue_create("video-frame-sampler", 0);
[videoOutput setSampleBufferDelegate:self queue:queue];
if ([self.session canAddOutput:videoOutput]) {
[self.session addOutput:videoOutput];
AVCaptureConnection* connection = [videoOutput connectionWithMediaType:AVMediaTypeVideo];
connection.videoOrientation = [self videoOrientationFromDeviceOrientation];
if (connection.supportsVideoStabilization) {
connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationModeAuto;
}
}
Implement captureOutput:didOutputSampleBuffer:fromConnection: method where you are going to get your required CMSampleBufferRef
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
//pass your sampleBuffer to vision API
//I recommend not to pass every frame however, skip some frames until camera is steady and focused
}
I'm a plain old Objective-C developer, but you can easily convert the code to Swift as per your need.
Additionally, here is the code for videoOrientationFromDeviceOrientation method:
-(AVCaptureVideoOrientation)videoOrientationFromDeviceOrientation {
UIDeviceOrientation orientation = [UIDevice currentDevice].orientation;
AVCaptureVideoOrientation result = (AVCaptureVideoOrientation)orientation;
if ( orientation == UIDeviceOrientationLandscapeLeft )
result = AVCaptureVideoOrientationLandscapeRight;
else if ( orientation == UIDeviceOrientationLandscapeRight )
result = AVCaptureVideoOrientationLandscapeLeft;
return result;
}

Record depth map from iPhone as sequence

I want to create an application on IOS that can record and save RGB+Depth data. I have been able to capture both data from the dual-camera and preview on the screen in real-time. Now I want to save it as two sequences in the library (one RGB sequence and one depth map sequence).
So my question is how can I save this depth information on the iPhone gallery as a video or sequence, saving at the same time the RGB info, for future deep processing?
I am working with Xcode 10.2, Swift 5 and an iPhone XS.
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var previewView: UIImageView!
#IBOutlet weak var previewModeControl: UISegmentedControl!
var previewMode = PreviewMode.original //Original(RGB) or Depth
let session = AVCaptureSession()
let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
var background: CIImage?
var depthMap: CIImage?
var scale: CGFloat = 0.0
override func viewDidLoad() {
super.viewDidLoad()
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
configureCaptureSession()
session.startRunning()
}
override var shouldAutorotate: Bool {
return false
}
func configureCaptureSession() {
session.beginConfiguration()
//Add input to the session
guard let camera = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .unspecified) else {
fatalError("No depth video camera available")
}
session.sessionPreset = .photo
do{
let cameraInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(cameraInput){
session.addInput(cameraInput)
}else{
fatalError("Error adding input device to session")
}
}catch{
fatalError(error.localizedDescription)
}
//Add output to the session
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
if session.canAddOutput(videoOutput){
session.addOutput(videoOutput)
}else{
fatalError("Error adding output to session")
}
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
//Add output to the session DEPTH
let depthOutput = AVCaptureDepthDataOutput()
//Set the current view controller as the delegate for the new object
depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
depthOutput.isFilteringEnabled = true //take advantge of holesin the data
if session.canAddOutput(depthOutput){
session.addOutput(depthOutput)
}else{
fatalError("Error adding output to session")
}
let depthConnection = depthOutput.connection(with: .depthData)
depthConnection?.videoOrientation = .portrait
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
do{
try camera.lockForConfiguration()
if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
camera.activeVideoMinFrameDuration = frameDuration
}
camera.unlockForConfiguration()
}catch{
fatalError(error.localizedDescription)
}
session.commitConfiguration()
}
#IBAction func previewModeChanged(_ sender: UISegmentedControl) {
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
switch previewMode {
case .original:
previewImage = image
case .depth:
previewImage = depthMap ?? image
//default:
//previewImage = image
}
let displayImage = UIImage(ciImage: previewImage)
DispatchQueue.main.async {
[weak self] in self?.previewView.image = displayImage
}
}
}
extension ViewController: AVCaptureDepthDataOutputDelegate{
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
if previewMode == .original{
return
}
var convertedDepth: AVDepthData
if depthData.depthDataType != kCVPixelFormatType_DisparityFloat32{
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DisparityFloat32)
}else{
convertedDepth = depthData
}
let pixelBuffer = convertedDepth.depthDataMap
pixelBuffer.clamp()
let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
DispatchQueue.main.async {
[weak self] in self?.depthMap = depthMap
}
}
}
Actual result preview on screen in real-time the different CIImage selected on the UI (image or depthMap)

CMSampleBufferGetImageBuffer(sampleBuffer) return nil

I use this code to capture video from camera, but the CMSampleBufferGetImageBuffer(sampleBuffer) always return nil. What is the problem?. Here is the code, I modify the code from this source to adapt for Swift 4 https://github.com/FlexMonkey/CoreImageHelpers/blob/master/CoreImageHelpers/coreImageHelpers/CameraCaptureHelper.swift
import AVFoundation
import CoreMedia
import CoreImage
import UIKit
class CameraCaptureHelper: NSObject
{
let captureSession = AVCaptureSession()
let cameraPosition: AVCaptureDevice.Position
weak var delegate: CameraCaptureHelperDelegate?
required init(cameraPosition: AVCaptureDevice.Position)
{
self.cameraPosition = cameraPosition
super.init()
initialiseCaptureSession()
}
fileprivate func initialiseCaptureSession()
{
captureSession.sessionPreset = AVCaptureSession.Preset.photo
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: cameraPosition)
else {
fatalError("Unable to access camera")
}
do
{
let input = try AVCaptureDeviceInput(device: camera)
captureSession.addInput(input)
}
catch
{
fatalError("Unable to access back camera")
}
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self,
queue: DispatchQueue(label: "sample buffer delegate", attributes: []))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
}
}
extension CameraCaptureHelper: AVCaptureVideoDataOutputSampleBufferDelegate
{
func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
connection.videoOrientation = .landscapeRight //AVCaptureVideoOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else
{
return
}
DispatchQueue.main.async
{
self.delegate?.newCameraImage(self,
image: CIImage(cvPixelBuffer: pixelBuffer))
}
}
}
protocol CameraCaptureHelperDelegate: class
{
func newCameraImage(_ cameraCaptureHelper: CameraCaptureHelper, image: CIImage)
}
You're trying to access the pixel buffer from the "just dropped a sample buffer" callback. The header file says:
CMSampleBuffer object passed to this delegate method will contain metadata about the dropped video frame, such as its duration and presentation time stamp, but will contain no actual video data.
You should be doing that from the didOutputSampleBuffer: delegate callback.

Swift iOS, wondering if someone could explain why output is rotated to the right degrees?

I am following this link here How to apply filter to Video real-time using Swift and for whatever reason the UIImageOrientation is rotated to the left 90 degrees.I have tried to rectify this by setting the orientation to Up but it still appears the same. Does anyone have any idea why this is? I'm not sure if it is the image, previewlayer or image view that is causing this
Here is the code:
import UIKit
import AVFoundation
import CoreMedia
let noirFilter = CIFilter(name: "CIPhotoEffectNoir")!
let sepiaFilter = CIFilter(name: "CISepiaTone")!
let vignetteEffect = CIFilter(name: "CIVignetteEffect")!
let Filters = [noirFilter,sepiaFilter,vignetteEffect]
class TestViewController:UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var Photo: UIButton!
var sessionQueue: dispatch_queue_t!
var stillImageOutput: AVCaptureStillImageOutput?
var videoDeviceInput: AVCaptureDeviceInput?
var buttonTapped = false
override func viewDidLoad() {
super.viewDidLoad()
sessionQueue = dispatch_queue_create("com.bradleymackey.Backchat.sessionQueue",DISPATCH_QUEUE_SERIAL)
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var videoDeviceInput: AVCaptureVideoDeviceInput?
do
{
let input = try AVCaptureDeviceInput(device: backCamera)
captureSession.addInput(input)
self.videoDeviceInput = videoDeviceInput
}
catch
{
print("can't access camera")
return
}
// although we don't use this, it's required to get captureOutput invoked
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
imageView.contentMode = UIViewContentMode.ScaleAspectFill
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
let stillImageOutput: AVCaptureStillImageOutput = AVCaptureStillImageOutput()
if captureSession.canAddOutput(stillImageOutput) {
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
captureSession.addOutput(stillImageOutput)
self.stillImageOutput = stillImageOutput
}
}
override func viewWillLayoutSubviews() {
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
let filter = Filters[1]
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(CVPixelBuffer: pixelBuffer!)
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let filteredImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!)
var newImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!, scale:1.0, orientation: .Up)
dispatch_async(dispatch_get_main_queue())
{
self.imageView.image = newImage
}
}
#IBAction func snapStill(sender: UIButton) {
print("snapStillImage")
let previewController = PreviewViewController(nibName: "PreviewViewController", bundle: nil)
previewController.media = Media.Photo(image: imageView.image!)
previewController.isFrontCamera = false
self.presentViewController(previewController, animated: true, completion: nil)
}
override func viewDidLayoutSubviews()
{
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
The reason for this is that the orientation is not set.
You need to set the orientation in captureOutput()
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
connection.videoOrientation = .portrait
...
This should solve your problem.

I can't fix size AVcapture with this code

I need to fix screen capture on horizontal But this my code can't fix size screen capture follow code > previewLayer.frame = CGRectMake(20, 40, 500, 100).
see on below this screen picture not match with CGRectMake(20, 40, 500, 100).
This is small size not match follow code(CGRectMake(20, 40, 500, 100))
please let my idea or example code for custom size this thank you.
This some code
import UIKit
import AVFoundation
protocol BarcodeDelegate {
func barcodeReaded(barcode: String)
}
class barcodeCapViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var delegate: BarcodeDelegate?
var captureSession: AVCaptureSession!
var code: String?
override func viewDidLoad() {
super.viewDidLoad()
self.captureSession = AVCaptureSession();
let videoCaptureDevice: AVCaptureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
do {
let videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
if self.captureSession.canAddInput(videoInput) {
self.captureSession.addInput(videoInput)
} else {
print("Could not add video input")
}
let metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(metadataOutput) {
self.captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
metadataOutput.metadataObjectTypes = [AVMetadataObjectTypeQRCode, AVMetadataObjectTypeEAN13Code]
} else {
print("Could not add metadata output")
}
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = CGRectMake(20, 40, 500, 100)
self.view.layer .addSublayer(previewLayer)
self.captureSession.startRunning()
} catch let error as NSError {
print("Error while creating vide input device: \(error.localizedDescription)")
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
for metadata in metadataObjects {
let readableObject = metadata as! AVMetadataMachineReadableCodeObject
let code = readableObject.stringValue
if !code.isEmpty {
self.captureSession.stopRunning()
self.dismissViewControllerAnimated(true, completion: nil)
self.delegate?.barcodeReaded(code)
}
}
}
}
Set the preview layer frame in viewDidAppear or in viewWillLayoutSubview

Resources