I need to fix screen capture on horizontal But this my code can't fix size screen capture follow code > previewLayer.frame = CGRectMake(20, 40, 500, 100).
see on below this screen picture not match with CGRectMake(20, 40, 500, 100).
This is small size not match follow code(CGRectMake(20, 40, 500, 100))
please let my idea or example code for custom size this thank you.
This some code
import UIKit
import AVFoundation
protocol BarcodeDelegate {
func barcodeReaded(barcode: String)
}
class barcodeCapViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var delegate: BarcodeDelegate?
var captureSession: AVCaptureSession!
var code: String?
override func viewDidLoad() {
super.viewDidLoad()
self.captureSession = AVCaptureSession();
let videoCaptureDevice: AVCaptureDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
do {
let videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
if self.captureSession.canAddInput(videoInput) {
self.captureSession.addInput(videoInput)
} else {
print("Could not add video input")
}
let metadataOutput = AVCaptureMetadataOutput()
if self.captureSession.canAddOutput(metadataOutput) {
self.captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: dispatch_get_main_queue())
metadataOutput.metadataObjectTypes = [AVMetadataObjectTypeQRCode, AVMetadataObjectTypeEAN13Code]
} else {
print("Could not add metadata output")
}
let previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = CGRectMake(20, 40, 500, 100)
self.view.layer .addSublayer(previewLayer)
self.captureSession.startRunning()
} catch let error as NSError {
print("Error while creating vide input device: \(error.localizedDescription)")
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [AnyObject]!, fromConnection connection: AVCaptureConnection!) {
for metadata in metadataObjects {
let readableObject = metadata as! AVMetadataMachineReadableCodeObject
let code = readableObject.stringValue
if !code.isEmpty {
self.captureSession.stopRunning()
self.dismissViewControllerAnimated(true, completion: nil)
self.delegate?.barcodeReaded(code)
}
}
}
}
Set the preview layer frame in viewDidAppear or in viewWillLayoutSubview
Related
I have an app that uses a CGRect(x: 0, y: 0, width: 335, height: 150) to show the camera for barcode scanning. However when presented a barcode off camera (not in the CGRect) will get scanned. How can I limit the area for scanning to the CGRect in my preview layer? This is using Vision.
let captureSession = AVCaptureSession()
var previewLayer: AVCaptureVideoPreviewLayer!
var activeInput: AVCaptureDeviceInput!
lazy var detectBarcodeRequest = VNDetectBarcodesRequest { request, error in
guard error == nil else {
print("Barcode Error: \(String(describing: error?.localizedDescription))")
return
}
self.processBarCode(request)
}
override func viewDidLoad() {
super.viewDidLoad()
setupSession()
setupPreview()
startSession()
}
func setupSession() {
guard let camera = AVCaptureDevice.default(for: .video) else {
return
}
do {
let videoInput = try AVCaptureDeviceInput(device: camera)
for input in [videoInput] {
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
}
activeInput = videoInput
} catch {
print("Error setting device input: \(error)")
return
}
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
captureSession.addOutput(captureOutput)
}
func setupPreview() {
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = CGRect(x: 0, y: 0, width: 335, height: 150)
view.layer.insertSublayer(previewLayer, at: 0)
}//setupPreview
func startSession() {
if !captureSession.isRunning {
DispatchQueue.global(qos: .default).async { [weak self] in
self?.captureSession.startRunning()
}
}
}
// MARK: - processBarCode
func processBarCode(_ request: VNRequest) {
DispatchQueue.main.async {
guard let results = request.results as? [VNBarcodeObservation] else {
return
}
if let payload = results.first?.payloadStringValue, let symbology = results.first?.symbology {
print("payload is \(payload) \(symbology) ")
}
}
}//processBarCode
Edit:
// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate
extension CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let imageRequestHandler = VNImageRequestHandler(
cvPixelBuffer: pixelBuffer,
orientation: .up)
do {
try imageRequestHandler.perform([detectBarcodeRequest])
} catch {
print(error)
}
}//captureOutput
}//extension
extension AVCaptureVideoPreviewLayer {
func rectOfInterestConverted(parentRect: CGRect, fromLayerRect: CGRect) -> CGRect {
let parentWidth = parentRect.width
let parentHeight = parentRect.height
let newX = (parentWidth - fromLayerRect.maxX)/parentWidth
let newY = 1 - (parentHeight - fromLayerRect.minY)/parentHeight
let width = 1 - (fromLayerRect.minX/parentWidth + newX)
let height = (fromLayerRect.maxY/parentHeight) - newY
return CGRect(x: newX, y: newY, width: width, height: height)
}
}
Usage:
if let rect = videoPreviewLayer?.rectOfInterestConverted(parentRect: self.view.frame, fromLayerRect: scanAreaView.frame) {
captureMetadataOutput.rectOfInterest = rect
}
In func captureOutput(_:didOutput:from:) you most likely passing whole image into VNImageRequestHandler.
Instead you need to crop image to your visible rect.
Another approach would be lock Vision to only visible part of image via regionOfInterest as #HurricaneOnTheMoon proposed below.
You can use a property regionOfInterest of VNDetectBarcodesRequest to setup a detection region.
The default value is { { 0, 0 }, { 1, 1 } } according to the Apple documentation
I'd like to scan QRcode through the camera. There is no problem scanning QRcode,
but I want to scan only certain areas. How can I do this?
I am currently aware of the QR code anywhere in the entire camera area.
import Foundation
import UIKit
import AVFoundation
class ScannerViewController : UIViewController, AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet weak var qrcodeView: UIView!
#IBOutlet weak var mainText: UITextView!
#IBOutlet weak var headerBar: UINavigationBar!
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
self.qrcodeView.backgroundColor = UIColor.black.withAlphaComponent(0.5)
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr]
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.insertSublayer(previewLayer, at: 0)
captureSession.startRunning()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// let scanRect = CGRect(x: 0, y: 0, width: 200, height: 200)
// let rectOfInterest = layer.metadataOutputRectConverted(fromLayerRect: scanRect)
// metadataObjects.rectOfInterest = rectOfInterest
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
} else {
print("not support")
}
}
func found(code: String) {
print(code)
self.dismiss(animated: true, completion: nil)
}
func failed() {
captureSession = nil
}
}
Like the picture above, I would like to scan only within the square area.
I desperately need this.
Thanks in advance.
You can use rectOfInterest property to achieve this
add following code after captureSession.startRunning()
First you need to convert using rect using
let rectOfInterest = videoPreviewLayer?.metadataOutputRectConverted(fromLayerRect: self.viewAreaOfScan.frame) // videoPreviewLayer is AVCaptureVideoPreviewLayer
after that you can assign it to rectOfInterest of metadataOutput
metadataOutput.rectOfInterest = rectOfInterest ?? CGRect(x: 0, y: 0, width: 1, height: 1)
Using Xcode 9 Beta for iOS 11:
I've followed a walkthrough on how to extract frames from an AVCaptureSession, but have not been able to get the capture to appear. While I have included the camera permissions in the info.plist file, the app seems to stall after opening and I get the following errors:
[App Name] does not have sandbox access for frZQaeyWLUvLjeuEK43hmg and IS NOT appropriately entitled
[MC] System group container for systemgroup.com.apple.configurationprofiles path is /private/var/containers/Shared/SystemGroup/systemgroup.com.apple.configurationprofiles
[MC] Reading from public effective user settings.
Here is the code for FrameExtractor.swift for reference:
import UIKit
import AVFoundation
protocol FrameExtractorDelegate: class {
func captured(image: UIImage)
}
class FrameExtractor: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
private let position = AVCaptureDevice.Position.front
private let quality = AVCaptureSession.Preset.medium
private var permissionGranted = false
private let sessionQueue = DispatchQueue(label: "session queue")
private let captureSession = AVCaptureSession()
private let context = CIContext()
weak var delegate: FrameExtractorDelegate?
override init() {
super.init()
checkPermission()
sessionQueue.async { [unowned self] in
self.configureSession()
self.captureSession.startRunning()
}
}
// MARK: AVSession configuration
private func checkPermission() {
switch AVCaptureDevice.authorizationStatus(for: AVMediaType.video) {
case .authorized:
permissionGranted = true
case .notDetermined:
requestPermission()
default:
permissionGranted = false
}
}
private func requestPermission() {
sessionQueue.suspend()
AVCaptureDevice.requestAccess(for: AVMediaType.video) { [unowned self] granted in
self.permissionGranted = granted
self.sessionQueue.resume()
}
}
private func configureSession() {
guard permissionGranted else { return }
captureSession.sessionPreset = quality
guard let captureDevice = selectCaptureDevice() else { return }
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: captureDevice) else { return }
guard captureSession.canAddInput(captureDeviceInput) else { return }
captureSession.addInput(captureDeviceInput)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sample buffer"))
guard captureSession.canAddOutput(videoOutput) else { return }
captureSession.addOutput(videoOutput)
guard let connection = videoOutput.connection(with: AVFoundation.AVMediaType.video) else { return }
guard connection.isVideoOrientationSupported else { return }
guard connection.isVideoMirroringSupported else { return }
connection.videoOrientation = .portrait
connection.isVideoMirrored = position == .front
}
private func selectCaptureDevice() -> AVCaptureDevice? {
return AVCaptureDevice.default(for: AVMediaType.video)
// return AVCaptureDevice.devices().filter {
// ($0 as AnyObject).hasMediaType(AVMediaType.video) &&
// ($0 as AnyObject).position == position
// }.first
}
// MARK: Sample buffer to UIImage conversion
private func imageFromSampleBuffer(sampleBuffer: CMSampleBuffer) -> UIImage? {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return nil }
let ciImage = CIImage(cvPixelBuffer: imageBuffer)
guard let cgImage = context.createCGImage(ciImage, from: ciImage.extent) else { return nil }
return UIImage(cgImage: cgImage)
}
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Got a Frame!")
guard let uiImage = imageFromSampleBuffer(sampleBuffer: sampleBuffer) else { return }
DispatchQueue.main.async { [unowned self] in
self.delegate?.captured(image: uiImage)
}
}
}
And for ViewController.swift:
import UIKit
class ViewController: UIViewController, FrameExtractorDelegate{
#IBOutlet var imageView: UIImageView!
var frameExtractor: FrameExtractor!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
frameExtractor = FrameExtractor()
frameExtractor.delegate = self
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func captured(image: UIImage) {
imageView.image = image
}
}`
The issue is in a different function call in captureOutput. This is the new function call in iOS 11 for captureOutput in AVCaptureVideoDataOutputSampleBufferDelegate:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let uiImage = imageFromSampleBuffer(sampleBuffer: sampleBuffer) else { return }
DispatchQueue.main.async { [unowned self] in
self.delegate?.captured(image: uiImage)
}
}
Notice the change between "didOutput sampleBuffer:" and "didOutputSampleBuffer sampleBuffer:"
I am following this link here How to apply filter to Video real-time using Swift and for whatever reason the UIImageOrientation is rotated to the left 90 degrees.I have tried to rectify this by setting the orientation to Up but it still appears the same. Does anyone have any idea why this is? I'm not sure if it is the image, previewlayer or image view that is causing this
Here is the code:
import UIKit
import AVFoundation
import CoreMedia
let noirFilter = CIFilter(name: "CIPhotoEffectNoir")!
let sepiaFilter = CIFilter(name: "CISepiaTone")!
let vignetteEffect = CIFilter(name: "CIVignetteEffect")!
let Filters = [noirFilter,sepiaFilter,vignetteEffect]
class TestViewController:UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var Photo: UIButton!
var sessionQueue: dispatch_queue_t!
var stillImageOutput: AVCaptureStillImageOutput?
var videoDeviceInput: AVCaptureDeviceInput?
var buttonTapped = false
override func viewDidLoad() {
super.viewDidLoad()
sessionQueue = dispatch_queue_create("com.bradleymackey.Backchat.sessionQueue",DISPATCH_QUEUE_SERIAL)
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var videoDeviceInput: AVCaptureVideoDeviceInput?
do
{
let input = try AVCaptureDeviceInput(device: backCamera)
captureSession.addInput(input)
self.videoDeviceInput = videoDeviceInput
}
catch
{
print("can't access camera")
return
}
// although we don't use this, it's required to get captureOutput invoked
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
imageView.contentMode = UIViewContentMode.ScaleAspectFill
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
let stillImageOutput: AVCaptureStillImageOutput = AVCaptureStillImageOutput()
if captureSession.canAddOutput(stillImageOutput) {
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
captureSession.addOutput(stillImageOutput)
self.stillImageOutput = stillImageOutput
}
}
override func viewWillLayoutSubviews() {
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
let filter = Filters[1]
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(CVPixelBuffer: pixelBuffer!)
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let filteredImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!)
var newImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!, scale:1.0, orientation: .Up)
dispatch_async(dispatch_get_main_queue())
{
self.imageView.image = newImage
}
}
#IBAction func snapStill(sender: UIButton) {
print("snapStillImage")
let previewController = PreviewViewController(nibName: "PreviewViewController", bundle: nil)
previewController.media = Media.Photo(image: imageView.image!)
previewController.isFrontCamera = false
self.presentViewController(previewController, animated: true, completion: nil)
}
override func viewDidLayoutSubviews()
{
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
The reason for this is that the orientation is not set.
You need to set the orientation in captureOutput()
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
connection.videoOrientation = .portrait
...
This should solve your problem.
I am working on swift my requirement is to create rectangular area on
camera. where I have to capture only those portion that is inside rectangle
and remaining portion should displayed as blur.
I tried many links but most of them are in Obj-C.i know I have to add UI layer over AVCapture layer.
This
[Click Here] link helps me but i could not achieve my objectives.
I tried by reducing the size of ImageView in storyboard but in this case my camera adjust whole image in this small image view.
here is sample image.
Here is my existing code for camera :
class VideoFeedMicr: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate
{
let outputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL)
let device: AVCaptureDevice? = {
let devices = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo) as! [AVCaptureDevice]
var camera: AVCaptureDevice? = nil
for device in devices {
if device.position == .Back {
camera = device
}
}
return camera
}()
var input: AVCaptureDeviceInput? = nil
var delegate: VideoFeedDelegateMicr? = nil
let session: AVCaptureSession = {
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPresetHigh
return session
}()
let videoDataOutput: AVCaptureVideoDataOutput = {
let output = AVCaptureVideoDataOutput()
output.videoSettings = [ kCVPixelBufferPixelFormatTypeKey: NSNumber(unsignedInt: kCMPixelFormat_32BGRA) ]
output.alwaysDiscardsLateVideoFrames = true
return output
}()
func start() throws {
var error: NSError! = NSError(domain: "Migrator", code: 0, userInfo: nil)
do {
try configure()
session.startRunning()
return
} catch let error1 as NSError {
error = error1
}
throw error
}
func stop() {
session.stopRunning()
}
private func configure() throws {
var error: NSError! = NSError(domain: "Migrator", code: 0, userInfo: nil)
do {
let maybeInput: AnyObject = try AVCaptureDeviceInput(device: device!)
input = maybeInput as? AVCaptureDeviceInput
if session.canAddInput(input) {
session.addInput(input)
videoDataOutput.setSampleBufferDelegate(self, queue: outputQueue);
if session.canAddOutput(videoDataOutput) {
session.addOutput(videoDataOutput)
let connection = videoDataOutput.connectionWithMediaType(AVMediaTypeVideo)
connection.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
return
} else {
print("Video output error.");
}
} else {
print("Video input error. Maybe unauthorised or no camera.")
}
} catch let error1 as NSError {
error = error1
print("Failed to start capturing video with error: \(error)")
}
throw error
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
// Update the delegate
if delegate != nil {
delegate!.videoFeedMicr(self, didUpdateWithSampleBuffer: sampleBuffer)
}
}
}
here i am calling this
class ViewMicrScanactivity: UIViewController,VideoFeedDelegateMicr
{
// #IBOutlet weak var button: UIButton!
#IBOutlet weak var button: UIButton!
// #IBOutlet weak var imageView: UIImageView!
let feed: VideoFeedMicr = VideoFeedMicr()
var chequefound :Bool = false;
var accountnumber = ""
var amountlimit = ""
#IBOutlet weak var chequeimage: UIImageView!
override func viewDidLoad()
{
super.viewDidLoad()
let value = UIInterfaceOrientation.LandscapeRight.rawValue
UIDevice.currentDevice().setValue(value, forKey: "orientation")
}
override func shouldAutorotate() -> Bool {
return true;
}
override func awakeFromNib() {
super.awakeFromNib()
feed.delegate = self
}
override func viewWillAppear(animated: Bool) {
super.viewWillAppear(animated)
startVideoFeed()
}
override func viewDidDisappear(animated: Bool) {
super.viewDidDisappear(animated)
feed.stop()
}
func startVideoFeed() {
do {
try feed.start()
print("Video started.")
}
catch {
// alert?
// need to look into device permissions
}
}
func videoFeedMicr(videoFeed: VideoFeedMicr, didUpdateWithSampleBuffer sampleBuffer: CMSampleBuffer!)
{
let filter = FaceObscurationFilterMicr(sampleBuffer: sampleBuffer)
if(!chequefound)
{
chequefound = filter.process()
dispatch_async(dispatch_get_main_queue(), { () -> Void in
self.chequeimage.image = filter.inputuiimage!
if(self.chequefound)
{
filter.cropmicr = filter.cropToBounds(filter.inputuiimage! , X:0.0 , Y:Double(filter.inputuiimage!.size.height) - Double(90.0) ,width:Double(filter.inputuiimage!.size.width) , height:Double(60.0));
self.chequeimage.image = filter.cropmicr
// let image = UIImage(named: filter.cropmicr )
//let scaledImage = scaleImage(image!, maxDimension: 640)
self.performImageRecognitionnew(filter.cropmicr!)
}
// self.chequeimage.image = filter.cropmicr!
})
}
else
{
print("chequefound = true")
}
}
}