I'm using AVCaptureDevice API for scanning barcode and it works very well on iPhone, but very similar code I have doesn't work on iPad and I'm not quite sure why (not detecting any barcode at all). The main differences are the size of scan area, position and orientation. I tested using iPhone 12 mini (iOS 15 beta) and the original iPad Pro 9.7" (iOS 14.6). Not sure if that matters.
Below is the code for the scanner. Please let me know if you noticed something that should be changed.
import Foundation
import AVFoundation
import UIKit
class ScannerViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
var barcodeCallback: (String) -> Void
var cameraScanDismissedCallback: (Bool) -> Void
var scanned = false
var currentDevice: AVCaptureDevice!
var scanRectView: UIView!
init(barcodeCallback: #escaping (String) -> Void, cameraScanDismissedCallback: #escaping (Bool) -> Void) {
self.barcodeCallback = barcodeCallback;
self.cameraScanDismissedCallback = cameraScanDismissedCallback;
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
} else {
failed()
return
}
let windowSize = UIScreen.main.bounds.size
var scanSize: CGSize!;
var scanRect: CGRect!;
if(UIDevice.current.userInterfaceIdiom == .pad){
scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/7);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}else{
scanSize = CGSize(width:windowSize.width*2/3, height:windowSize.width*1/3);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
metadataOutput.rectOfInterest = scanRect
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if(UIDevice.current.userInterfaceIdiom == .pad){
let orientation: UIDeviceOrientation = UIDevice.current.orientation
previewLayer.connection?.videoOrientation = {
switch (orientation) {
case .faceUp:
return .landscapeLeft
case .portrait:
return .portrait
case .landscapeRight:
return .landscapeLeft
case .landscapeLeft:
return .landscapeRight
default:
return .portrait
}
}()
}
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
scanRectView = UIView();
view.addSubview(self.scanRectView)
scanRectView.frame = CGRect(x:0, y:0, width: scanSize.width,
height: scanSize.height);
if(UIDevice.current.userInterfaceIdiom == .pad){
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX - scanSize.width/2,
y:UIScreen.main.bounds.midY - scanSize.height/2)
}else{
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
}
scanRectView.layer.borderColor = UIColor.yellow.cgColor
scanRectView.layer.borderWidth = 5;
currentDevice = videoCaptureDevice
captureSession.startRunning()
toggleTorch(on: true)
}
func toggleTorch(on: Bool) {
guard let device = currentDevice else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if(UIDevice.current.userInterfaceIdiom == .pad){
device.videoZoomFactor = 1.3
}else{
device.videoZoomFactor = 1.5
}
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func failed() {
let ac = UIAlertController(title: "Scanning not supported", message: "Your device does not support scanning a code from an item. Please use a device with a camera.", preferredStyle: .alert)
ac.addAction(UIAlertAction(title: "OK", style: .default))
present(ac, animated: true)
captureSession = nil
toggleTorch(on: false)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
toggleTorch(on: true)
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
toggleTorch(on: false)
}
cameraScanDismissedCallback(scanned)
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
scanned = true
}
dismiss(animated: true)
}
func found(code: String) {
print(code)
barcodeCallback(code)
}
override var prefersStatusBarHidden: Bool {
return true
}
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
if(UIDevice.current.userInterfaceIdiom == .pad){
return .landscape
}else{
return .portrait
}
}
}
Just in case anyone ran into a similar issue, check for the following:
Orientation of the video
The position of AVCaptureMetadataOutput.rectOfInterest
I'm still having an issue with the rectOfInterest not positioned at the center, but it works. Once I can figure out how to center it, I will post the solution here.
it isn't center probably because of your navigationBar.
Try set rectOfInterest as below, remember that camer use different coordinate than UIView, posX and posY are in UIView coordinates
let aimRect = CGRect(x: (posY - navBar.height) / UIScreen.main.bounds.height,
y: posX / UIScreen.main.bounds.width,
width: rectHeight / UIScreen.main.bounds.height,
height: rectWidth / UIScreen.main.bounds.width)
Related
I have a question about scan 1D barcode. Why I set the yellow border about scanning region view, only I put it in the middle of the region and get the result successfully?
It works fine when I put my 1D barcode in the green region by the following image. When I put the red region, and nothing happens. My app orientation only right and left.
What's wrong with my code?
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate, UIAlertViewDelegate {
var scanRectView:UIView!
var device:AVCaptureDevice!
var input:AVCaptureDeviceInput!
var output:AVCaptureMetadataOutput!
var session:AVCaptureSession!
var preview:AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidLayoutSubviews() {
self.configureVideoOrientation()
}
fileprivate func configureVideoOrientation() {
let previewLayer = self.preview
if let connection = previewLayer?.connection {
let orientation = UIDevice.current.orientation
if connection.isVideoOrientationSupported, let videoOrientation = AVCaptureVideoOrientation(rawValue: orientation.rawValue) {
previewLayer?.frame = self.view.bounds
connection.videoOrientation = videoOrientation
}
}
}
#IBAction func btnClicked(_ sender: Any) {
do{
self.device = AVCaptureDevice.default(for: AVMediaType.video)
self.input = try AVCaptureDeviceInput(device: device)
self.output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
self.session = AVCaptureSession()
if UIScreen.main.bounds.size.height < 500 {
self.session.sessionPreset = AVCaptureSession.Preset.vga640x480
}else{
self.session.sessionPreset = AVCaptureSession.Preset.high
}
self.session.addInput(self.input)
self.session.addOutput(self.output)
self.output.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
let windowSize = UIScreen.main.bounds.size
let scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/3)
var scanRect = CGRect(x:(windowSize.width-scanSize.width)/2,
y:(windowSize.height-scanSize.height)/2,
width:scanSize.width, height:scanSize.height)
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
self.output.rectOfInterest = scanRect
self.preview = AVCaptureVideoPreviewLayer(session:self.session)
self.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.preview.frame = UIScreen.main.bounds
self.view.layer.insertSublayer(self.preview, at:0)
self.scanRectView = UIView();
self.view.addSubview(self.scanRectView)
self.scanRectView.frame = CGRect(x:0, y:0, width:scanSize.width,
height:scanSize.height);
self.scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
self.scanRectView.layer.borderColor = UIColor.yellow.cgColor
self.scanRectView.layer.borderWidth = 5;
self.session.startRunning()
do {
try self.device!.lockForConfiguration()
} catch _ {
NSLog("Error: lockForConfiguration.");
}
self.device!.videoZoomFactor = 1.5
self.device!.unlockForConfiguration()
}catch _ {
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
var stringValue:String?
if metadataObjects.count > 0 {
let metadataObject = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
stringValue = metadataObject.stringValue
if stringValue != nil{
self.session.stopRunning()
}
}
self.session.stopRunning()
}
}
I'd like to scan QRcode through the camera. There is no problem scanning QRcode,
but I want to scan only certain areas. How can I do this?
I am currently aware of the QR code anywhere in the entire camera area.
import Foundation
import UIKit
import AVFoundation
class ScannerViewController : UIViewController, AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet weak var qrcodeView: UIView!
#IBOutlet weak var mainText: UITextView!
#IBOutlet weak var headerBar: UINavigationBar!
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
self.qrcodeView.backgroundColor = UIColor.black.withAlphaComponent(0.5)
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr]
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.insertSublayer(previewLayer, at: 0)
captureSession.startRunning()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// let scanRect = CGRect(x: 0, y: 0, width: 200, height: 200)
// let rectOfInterest = layer.metadataOutputRectConverted(fromLayerRect: scanRect)
// metadataObjects.rectOfInterest = rectOfInterest
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
} else {
print("not support")
}
}
func found(code: String) {
print(code)
self.dismiss(animated: true, completion: nil)
}
func failed() {
captureSession = nil
}
}
Like the picture above, I would like to scan only within the square area.
I desperately need this.
Thanks in advance.
You can use rectOfInterest property to achieve this
add following code after captureSession.startRunning()
First you need to convert using rect using
let rectOfInterest = videoPreviewLayer?.metadataOutputRectConverted(fromLayerRect: self.viewAreaOfScan.frame) // videoPreviewLayer is AVCaptureVideoPreviewLayer
after that you can assign it to rectOfInterest of metadataOutput
metadataOutput.rectOfInterest = rectOfInterest ?? CGRect(x: 0, y: 0, width: 1, height: 1)
I have created a custom camera and have implemented below code to crop the taken image, I have shown guides in the preview layer so I want to crop the image which appears in that area.
func imageByCropToRect(rect:CGRect, scale:Bool) -> UIImage {
var rect = rect
var scaleFactor: CGFloat = 1.0
if scale {
scaleFactor = self.scale
rect.origin.x *= scaleFactor
rect.origin.y *= scaleFactor
rect.size.width *= scaleFactor
rect.size.height *= scaleFactor
}
var image: UIImage? = nil;
if rect.size.width > 0 && rect.size.height > 0 {
let imageRef = self.cgImage!.cropping(to: rect)
image = UIImage(cgImage: imageRef!, scale: scaleFactor, orientation: self.imageOrientation)
}
return image!
}
This code just works fine when & give the exact cropped image when the below line of code is commented, though I want the image streaming to be full screen so I have to use the below line of code. The image comes zoomed out sort of.
(self.previewLayer as! AVCaptureVideoPreviewLayer).videoGravity = AVLayerVideoGravity.resizeAspectFill
How do I solve this issue? Is the cropping code wrong?
Here is the full Class code
import UIKit
import AVFoundation
class CameraViewController: UIViewController {
#IBOutlet weak var guideImageView: UIImageView!
#IBOutlet weak var guidesView: UIView!
#IBOutlet weak var cameraPreviewView: UIView!
#IBOutlet weak var cameraButtonView: UIView!
#IBOutlet weak var captureButton: UIButton!
var captureSession = AVCaptureSession()
var previewLayer: CALayer!
var captureDevice: AVCaptureDevice!
/// This will be true when the user clicks on the click photo button.
var takePhoto = false
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
previewLayer = CALayer()
takePhoto = false
requestAuthorization()
}
private func userinteractionToButton(_ interaction: Bool) {
captureButton.isEnabled = interaction
}
/// This function will request authorization, If authorized then start the camera.
private func requestAuthorization() {
switch AVCaptureDevice.authorizationStatus(for: AVMediaType.video) {
case .authorized:
prepareCamera()
case .denied, .restricted, .notDetermined:
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (granted) in
if !Thread.isMainThread {
DispatchQueue.main.async {
if granted {
self.prepareCamera()
} else {
let alert = UIAlertController(title: "unable_to_access_the_Camera", message: "to_enable_access_go_to_setting_privacy_camera_and_turn_on_camera_access_for_this_app", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "ok", style: .default, handler: {_ in
self.navigationController?.popToRootViewController(animated: true)
}))
self.present(alert, animated: true, completion: nil)
}
}
} else {
if granted {
self.prepareCamera()
} else {
let alert = UIAlertController(title: "unable_to_access_the_Camera", message: "to_enable_access_go_to_setting_privacy_camera_and_turn_on_camera_access_for_this_app", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "ok", style: .default, handler: {_ in
self.navigationController?.popToRootViewController(animated: true)
}))
self.present(alert, animated: true, completion: nil)
}
}
})
}
}
/// Will see if the primary camera is avilable, If found will call method which will asign the available device to the AVCaptureDevice.
private func prepareCamera() {
// Resets the session.
self.captureSession.sessionPreset = AVCaptureSession.Preset.photo
if #available(iOS 10.0, *) {
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices
self.assignCamera(availableDevices)
} else {
// Fallback on earlier versions
// development, need to test this on iOS 8
if let availableDevices = AVCaptureDevice.default(for: AVMediaType.video) {
self.assignCamera([availableDevices])
} else {
self.showAlert()
}
}
}
/// Assigns AVCaptureDevice to the respected the variable, will begin the session.
///
/// - Parameter availableDevices: [AVCaptureDevice]
private func assignCamera(_ availableDevices: [AVCaptureDevice]) {
if availableDevices.first != nil {
captureDevice = availableDevices.first
beginSession()
} else {
self.showAlert()
}
}
/// Configures the camera settings and begins the session, this function will be responsible for showing the image on the UI.
private func beginSession() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer = previewLayer
self.cameraPreviewView.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
self.previewLayer.frame.origin.y = +self.cameraPreviewView.frame.origin.y
(self.previewLayer as! AVCaptureVideoPreviewLayer).videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer.masksToBounds = true
self.cameraPreviewView.clipsToBounds = true
captureSession.startRunning()
self.view.bringSubview(toFront: self.cameraPreviewView)
self.view.bringSubview(toFront: self.cameraButtonView)
self.view.bringSubview(toFront: self.guidesView)
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [((kCVPixelBufferPixelFormatTypeKey as NSString) as String):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.letsappit.camera")
dataOutput.setSampleBufferDelegate(self, queue: queue)
self.userinteractionToButton(true)
}
/// Get the UIImage from the given CMSampleBuffer.
///
/// - Parameter buffer: CMSampleBuffer
/// - Returns: UIImage?
func getImageFromSampleBuffer(buffer:CMSampleBuffer, orientation: UIImageOrientation) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: orientation)
}
}
return nil
}
/// This function will destroy the capture session.
func stopCaptureSession() {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
func showAlert() {
let alert = UIAlertController(title: "Unable to access the camera", message: "It appears that either your device doesn't have camera or its broken", preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "cancel", style: .cancel, handler: {_ in
self.navigationController?.dismiss(animated: true, completion: nil)
}))
self.present(alert, animated: true, completion: nil)
}
#IBAction func didTapClick(_ sender: Any) {
userinteractionToButton(false)
takePhoto = true
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showImage" {
let vc = segue.destination as! ShowImageViewController
vc.image = sender as! UIImage
}
}
}
extension CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
}
if takePhoto {
takePhoto = false
// Rotation should be unlocked to work.
var orientation = UIImageOrientation.up
switch UIDevice.current.orientation {
case .landscapeLeft:
orientation = .left
case .landscapeRight:
orientation = .right
case .portraitUpsideDown:
orientation = .down
default:
orientation = .up
}
if let image = self.getImageFromSampleBuffer(buffer: sampleBuffer, orientation: orientation) {
DispatchQueue.main.async {
let newImage = image.imageByCropToRect(rect: self.guideImageView.frame, scale: true)
self.stopCaptureSession()
self.previewLayer.removeFromSuperlayer()
self.performSegue(withIdentifier: "showImage", sender: newImage)
}
}
}
}
}
Here is the view hierarchy image
It's not clear where the problem is. I would either use the debugger or some print statements to figure out whether the issue is with the image or the view displaying the image. Print out size of the cropped image to make sure it is correct.
Then, print out the image view size in the ShowImageViewController in viewDidAppear to make sure it is correct.
For the correction of zooming out of cropped image, you have to change your crop function to this by using image orientation.
func croppedInRect(rect: CGRect) -> UIImage? {
func rad(_ degree: Double) -> CGFloat {
return CGFloat(degree / 180.0 * .pi)
}
var rectTransform: CGAffineTransform
switch imageOrientation {
case .left:
rectTransform = CGAffineTransform(rotationAngle: rad(90)).translatedBy(x: 0, y: -self.size.height)
case .right:
rectTransform = CGAffineTransform(rotationAngle: rad(-90)).translatedBy(x: -self.size.width, y: 0)
case .down:
rectTransform = CGAffineTransform(rotationAngle: rad(-180)).translatedBy(x: -self.size.width, y: -self.size.height)
default:
rectTransform = .identity
}
rectTransform = rectTransform.scaledBy(x: self.scale, y: self.scale)
var cgImage = self.cgImage
if cgImage == nil{
let ciContext = CIContext()
if let ciImage = self.ciImage{
cgImage = ciContext.createCGImage(ciImage, from: ciImage.extent)
}
}
if let imageRef = cgImage?.cropping(to: rect.applying(rectTransform)){
let result = UIImage(cgImage: imageRef, scale: self.scale, orientation: self.imageOrientation)
return result
}
return nil
}
I want to "stream" the preview layer to my server, however, I only want specific frames to be sent. Basically, I want to take a snapshot of the AVCaptureVideoPreviewLayer, scale it down to 28*28, turn it into an intensity array, and send it to my socket layer where my python backend handles the rest.
Problem here is that AVCapturePhotoOutput's capture function is insanely slow. I can't repeatedly call the function. Not to mention it always makes a camera shutter sound haha.
The other problem is that taking a snapshot of AVCaptureVideoPreviewLayer is really difficult. Using UIGraphicsBeginImageContext almost always returns a blank/clear image.
Help a brother out, thanks!
Basically instead of using AVCaptureVideoPreviewLayer for grabbing frames you should use AVCaptureVideoDataOutputSampleBufferDelegate.
Here is example:
import Foundation
import UIKit
import AVFoundation
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let input = try! AVCaptureDeviceInput(device: device)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Update: To process images you should implement a processCapturedImage method of the CaptureManagerDelegate protocol in any other class where you want, like:
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
#ninjaproger's answer was great! Simply writing this as a Swift 4 version of the answer for future reference.
import UIKit
import AVFoundation
var customPreviewLayer: AVCaptureVideoPreviewLayer?
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let input = try! AVCaptureDeviceInput(device: device!)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Details
Xcode 10.2.1 (10E1001), Swift 5
Features
This solution allow:
to check camera access
to select front or back camera
if no access to the camera show alert with link to the app settings page
to make o photo
to play standard capture photo sound
Solution
CameraService
import UIKit
import AVFoundation
import Vision
class CameraService: NSObject {
private weak var previewView: UIView?
private(set) var cameraIsReadyToUse = false
private let session = AVCaptureSession()
private weak var previewLayer: AVCaptureVideoPreviewLayer?
private lazy var sequenceHandler = VNSequenceRequestHandler()
private lazy var capturePhotoOutput = AVCapturePhotoOutput()
private lazy var dataOutputQueue = DispatchQueue(label: "FaceDetectionService",
qos: .userInitiated, attributes: [],
autoreleaseFrequency: .workItem)
private var captureCompletionBlock: ((UIImage) -> Void)?
private var preparingCompletionHandler: ((Bool) -> Void)?
private var snapshotImageOrientation = UIImage.Orientation.upMirrored
private var cameraPosition = AVCaptureDevice.Position.front {
didSet {
switch cameraPosition {
case .front: snapshotImageOrientation = .upMirrored
case .unspecified, .back: fallthrough
#unknown default: snapshotImageOrientation = .up
}
}
}
func prepare(previewView: UIView,
cameraPosition: AVCaptureDevice.Position,
completion: ((Bool) -> Void)?) {
self.previewView = previewView
self.preparingCompletionHandler = completion
self.cameraPosition = cameraPosition
checkCameraAccess { allowed in
if allowed { self.setup() }
completion?(allowed)
self.preparingCompletionHandler = nil
}
}
private func setup() { configureCaptureSession() }
func start() { if cameraIsReadyToUse { session.startRunning() } }
func stop() { session.stopRunning() }
}
extension CameraService {
private func askUserForCameraPermission(_ completion: ((Bool) -> Void)?) {
AVCaptureDevice.requestAccess(for: AVMediaType.video) { (allowedAccess) -> Void in
DispatchQueue.main.async { completion?(allowedAccess) }
}
}
private func checkCameraAccess(completion: ((Bool) -> Void)?) {
askUserForCameraPermission { [weak self] allowed in
guard let self = self, let completion = completion else { return }
self.cameraIsReadyToUse = allowed
if allowed {
completion(true)
} else {
self.showDisabledCameraAlert(completion: completion)
}
}
}
private func configureCaptureSession() {
guard let previewView = previewView else { return }
// Define the capture device we want to use
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: cameraPosition) else {
let error = NSError(domain: "", code: 0, userInfo: [NSLocalizedDescriptionKey : "No front camera available"])
show(error: error)
return
}
// Connect the camera to the capture session input
do {
try camera.lockForConfiguration()
defer { camera.unlockForConfiguration() }
if camera.isFocusModeSupported(.continuousAutoFocus) {
camera.focusMode = .continuousAutoFocus
}
if camera.isExposureModeSupported(.continuousAutoExposure) {
camera.exposureMode = .continuousAutoExposure
}
let cameraInput = try AVCaptureDeviceInput(device: camera)
session.addInput(cameraInput)
} catch {
show(error: error as NSError)
return
}
// Create the video data output
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
// Add the video output to the capture session
session.addOutput(videoOutput)
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
// Configure the preview layer
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.frame = previewView.bounds
previewView.layer.insertSublayer(previewLayer, at: 0)
self.previewLayer = previewLayer
}
}
extension CameraService: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard captureCompletionBlock != nil,
let outputImage = UIImage(sampleBuffer: sampleBuffer, orientation: snapshotImageOrientation) else { return }
DispatchQueue.main.async { [weak self] in
guard let self = self else { return }
if let captureCompletionBlock = self.captureCompletionBlock{
captureCompletionBlock(outputImage)
AudioServicesPlayAlertSound(SystemSoundID(1108))
}
self.captureCompletionBlock = nil
}
}
}
// Navigation
extension CameraService {
private func show(alert: UIAlertController) {
DispatchQueue.main.async {
UIApplication.topViewController?.present(alert, animated: true, completion: nil)
}
}
private func showDisabledCameraAlert(completion: ((Bool) -> Void)?) {
let alertVC = UIAlertController(title: "Enable Camera Access",
message: "Please provide access to your camera",
preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Go to Settings", style: .default, handler: { action in
guard let previewView = self.previewView,
let settingsUrl = URL(string: UIApplication.openSettingsURLString),
UIApplication.shared.canOpenURL(settingsUrl) else { return }
UIApplication.shared.open(settingsUrl) { [weak self] _ in
guard let self = self else { return }
self.prepare(previewView: previewView,
cameraPosition: self.cameraPosition,
completion: self.preparingCompletionHandler)
}
}))
alertVC.addAction(UIAlertAction(title: "Cancel", style: .cancel, handler: { _ in completion?(false) }))
show(alert: alertVC)
}
private func show(error: NSError) {
let alertVC = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Ok", style: .cancel, handler: nil ))
show(alert: alertVC)
}
}
extension CameraService: AVCapturePhotoCaptureDelegate {
func capturePhoto(completion: ((UIImage) -> Void)?) { captureCompletionBlock = completion }
}
Helpers
///////////////////////////////////////////////////////////////////////////
import UIKit
import AVFoundation
extension UIImage {
convenience init?(sampleBuffer: CMSampleBuffer, orientation: UIImage.Orientation = .upMirrored) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return nil }
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
defer { CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly) }
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height,
bitsPerComponent: 8, bytesPerRow: bytesPerRow,
space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else { return nil }
guard let cgImage = context.makeImage() else { return nil }
self.init(cgImage: cgImage, scale: 1, orientation: orientation)
}
}
///////////////////////////////////////////////////////////////////////////
import UIKit
extension UIApplication {
private class func topViewController(controller: UIViewController? = UIApplication.shared.keyWindow?.rootViewController) -> UIViewController? {
if let navigationController = controller as? UINavigationController {
return topViewController(controller: navigationController.visibleViewController)
}
if let tabController = controller as? UITabBarController {
if let selected = tabController.selectedViewController {
return topViewController(controller: selected)
}
}
if let presented = controller?.presentedViewController {
return topViewController(controller: presented)
}
return controller
}
class var topViewController: UIViewController? { return topViewController() }
}
Usage
private lazy var cameraService = CameraService()
//...
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
//...
cameraService.capturePhoto { [weak self] image in
//...
}
Full sample
import UIKit
class ViewController: UIViewController {
private lazy var cameraService = CameraService()
private weak var button: UIButton?
private weak var imagePreviewView: UIImageView?
private var cameraInited = false
private enum ButtonState { case cancel, makeSnapshot }
private var buttonState = ButtonState.makeSnapshot {
didSet {
switch buttonState {
case .makeSnapshot: button?.setTitle("Make a photo", for: .normal)
case .cancel: button?.setTitle("Cancel", for: .normal)
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
setupCameraPreviewView()
setupButton()
// Do any additional setup after loading the view.
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
cameraService.start()
}
override func viewDidDisappear(_ animated: Bool) {
super.viewDidDisappear(animated)
cameraService.stop()
}
// Ensure that the interface stays locked in Portrait.
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
return .portrait
}
// Ensure that the interface stays locked in Portrait.
override var preferredInterfaceOrientationForPresentation: UIInterfaceOrientation {
return .portrait
}
}
extension ViewController {
private func setupCameraPreviewView() {
let previewView = UIView(frame: .zero)
view.addSubview(previewView)
previewView.translatesAutoresizingMaskIntoConstraints = false
previewView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
previewView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
previewView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
previewView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
previewView.layoutIfNeeded()
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
}
private func setupButton() {
let button = UIButton(frame: .zero)
button.addTarget(self, action: #selector(buttonTouchedUpInside), for: .touchUpInside)
view.addSubview(button)
self.button = button
buttonState = .makeSnapshot
button.translatesAutoresizingMaskIntoConstraints = false
button.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
button.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
button.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
button.heightAnchor.constraint(equalToConstant: 44).isActive = true
button.backgroundColor = UIColor.black.withAlphaComponent(0.4)
}
private func show(image: UIImage) {
let imageView = UIImageView(frame: .zero)
view.insertSubview(imageView, at: 1)
imagePreviewView = imageView
imageView.translatesAutoresizingMaskIntoConstraints = false
imageView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
imageView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
imageView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
imageView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
imageView.image = image
}
#objc func buttonTouchedUpInside() {
switch buttonState {
case .makeSnapshot:
cameraService.capturePhoto { [weak self] image in
guard let self = self else {return }
self.cameraService.stop()
self.buttonState = .cancel
self.show(image: image)
}
case .cancel:
buttonState = .makeSnapshot
cameraService.start()
imagePreviewView?.removeFromSuperview()
}
}
}
before i post my entire code of my viewControllor that controls the main view of my cameraApp, i'll tell you what happens, what works and what don't ATM. The buttons for snapping the photo and saving it, for changing the cameras (front/back), setting the flash from auto/off. There are other things in this code that are for now set up but unused, but i don't think that they are causing this problem, i think that my ingnorance is the main problem. What i want to achieve through the help of you guys is that when i press the cameraButton, if the flash is set to auto, the app use the flash if needed and then saves the picture. I hope that someone can help me, the apple documents tell you all of the commands, but not where to put them in a easy way. Thanks in advance, here's my code:
import UIKit
import AVFoundation
import PageMenu
class randomController: UIViewController, UIImagePickerControllerDelegate,UINavigationControllerDelegate, CAPSPageMenuDelegate, AVCapturePhotoCaptureDelegate {
#IBOutlet weak var cameraPreview: UIImageView!
#IBAction func cameraButton(_ sender: Any) {
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [
kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: 160,
kCVPixelBufferHeightKey as String: 160
]
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
if (device?.hasTorch)! {
do {
print("diosotterrato")
try device?.lockForConfiguration()
//device?.torchMode = AVCaptureTorchMode.on
// device?.torchMode = AVCaptureTorchMode.Off
device?.torchMode = AVCaptureTorchMode.auto
device?.unlockForConfiguration()
} catch {
print(error)
}
}
settings.previewPhotoFormat = previewFormat
sessionOutput.capturePhoto(with: settings, delegate: self)
}
#IBOutlet weak var backgroundMarrone: UIImageView!
func askPermission() {
print("here")
let cameraPermissionStatus = AVCaptureDevice.authorizationStatus(forMediaType: AVMediaTypeVideo)
switch cameraPermissionStatus {
case .authorized:
print("Already Authorized")
case .denied:
print("denied")
let alert = UIAlertController(title: "Sorry :(" , message: "But could you please grant permission for camera within device settings", preferredStyle: .alert)
let action = UIAlertAction(title: "Ok", style: .cancel, handler: nil)
alert.addAction(action)
present(alert, animated: true, completion: nil)
case .restricted:
print("restricted")
default:
AVCaptureDevice.requestAccess(forMediaType: AVMediaTypeVideo, completionHandler: {
[weak self]
(granted :Bool) -> Void in
if granted == true {
// User granted
print("User granted")
DispatchQueue.main.async(){
//Do smth that you need in main thread
}
}
else {
// User Rejected
print("User Rejected")
DispatchQueue.main.async(){
let alert = UIAlertController(title: "WHY?" , message: "Camera it is the main feature of our application", preferredStyle: .alert)
let action = UIAlertAction(title: "Ok", style: .cancel, handler: nil)
alert.addAction(action)
self?.present(alert, animated: true, completion: nil)
}
}
});
}
}
#IBOutlet weak var cameracapture: UIImageView!
var pageMenu : CAPSPageMenu?
var captureSession = AVCaptureSession();
var sessionOutput = AVCapturePhotoOutput();
var sessionOutputSetting = AVCapturePhotoSettings(format: [AVVideoCodecKey:AVVideoCodecJPEG]);
var previewLayer = AVCaptureVideoPreviewLayer();
var flashButton = UIButton()
override func viewDidLoad() {
super.viewDidLoad()
pageMenu?.delegate = self
var controllerArray : [UIViewController] = []
backgroundMarrone.frame = CGRect(x: 0, y:(view.frame.maxY/10*5), width: self.view.frame.width, height: self.view.frame.height )
cameraPreview.frame = CGRect(x: 0, y: 0, width: self.view.frame.width, height: backgroundMarrone.frame.minY )
let switchCameraButton = UIButton(frame: CGRect(x: 5, y: cameraPreview.frame.maxY/12*9.8, width: 50, height: 50))
switchCameraButton.backgroundColor = UIColor(red: 100.0/255.0, green: 39.0/255.0, blue: 87.0/255.0, alpha: 0.0)
switchCameraButton.addTarget(self, action: #selector(pressButton(button:)), for: .touchUpInside)
switchCameraButton.setImage(#imageLiteral(resourceName: "btn_reverse_camera copy.png"), for: UIControlState.normal)
self.view.addSubview(switchCameraButton)
flashButton = UIButton(frame: CGRect(x: cameraPreview.frame.maxX - 55, y: cameraPreview.frame.maxY/12*9.8, width: 50, height: 50))
flashButton.backgroundColor = UIColor(red: 100.0/255.0, green: 39.0/255.0, blue: 87.0/255.0, alpha: 0.0)
flashButton.addTarget(self, action: #selector(pressButton1(button:)), for: .touchUpInside)
flashButton.setImage(#imageLiteral(resourceName: "btn_flash copy.png"), for: UIControlState.normal)
self.view.addSubview(flashButton)
let libraryController : UIViewController = UIStoryboard(name: "Main", bundle: nil).instantiateViewController(withIdentifier: "libraryController") as! libraryController
libraryController.title = "Libreria"
controllerArray.append(libraryController)
let photoController : UIViewController = UIStoryboard(name: "Main", bundle: nil).instantiateViewController(withIdentifier: "photoController") as! photoController
photoController.title = "Foto"
controllerArray.append(photoController)
let videoController : UIViewController = UIStoryboard(name: "Main", bundle: nil).instantiateViewController(withIdentifier: "videoController") as! videoController
videoController.title = "Video"
controllerArray.append(videoController)
let parameters: [CAPSPageMenuOption] = [
.menuItemSeparatorWidth(0),
.useMenuLikeSegmentedControl(true),
.menuItemSeparatorPercentageHeight(0.1),
.centerMenuItems (true)
]
pageMenu = CAPSPageMenu(viewControllers: controllerArray, frame: CGRect(x: 0, y: cameraPreview.frame.maxY , width: self.view.frame.width, height: view.frame.height ), pageMenuOptions: parameters)
self.view.addSubview(pageMenu!.view)
}
var ciaone = false
override func viewWillAppear(_ animated: Bool) {
let deviceDiscoverySession = AVCaptureDeviceDiscoverySession(deviceTypes: [AVCaptureDeviceType.builtInDualCamera, AVCaptureDeviceType.builtInTelephotoCamera,AVCaptureDeviceType.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: AVCaptureDevicePosition.unspecified)
for device in (deviceDiscoverySession?.devices)! {
print("tipo di device: \(device.position)")
if(device.position == AVCaptureDevicePosition.front){
do{
let input = try AVCaptureDeviceInput(device: device)
if(captureSession.canAddInput(input)){
captureSession.addInput(input);
captureSession.startRunning()
print("entrato1")
if(captureSession.canAddOutput(sessionOutput)){
captureSession.addOutput(sessionOutput);
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession);
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.portrait;
cameraPreview.layer.addSublayer(previewLayer);
print("entrato2")
}
}
}
catch{
print("exception!");
}
}
}
}
func beginSession(captureDevice : AVCaptureDevice?) {
ciaone = false
if captureSession.isRunning {
captureSession.beginConfiguration()
let currentInput : AVCaptureInput = captureSession.inputs[0] as! AVCaptureInput
captureSession.removeInput(currentInput)
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
} catch {
print("Error adding video input device")
}
captureSession.commitConfiguration()
} else {
// Setup the camera and layer for the first time.
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
} catch {
print("Error adding video input device")
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.view.layer.insertSublayer(previewLayer, at: 0)
previewLayer.frame = cameraPreview.bounds
captureSession.startRunning()
}
}
func capture(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhotoSampleBuffer photoSampleBuffer: CMSampleBuffer?, previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let error = error {
print("error occure : \(error.localizedDescription)")
}
if let sampleBuffer = photoSampleBuffer,
let previewBuffer = previewPhotoSampleBuffer,
let dataImage = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer) {
print(UIImage(data: dataImage)?.size as Any)
let dataProvider = CGDataProvider(data: dataImage as CFData)
let cgImageRef: CGImage! = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
let image = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.right)
UIImageWriteToSavedPhotosAlbum(image, self, nil, nil)
self.cameracapture.image = image
} else {
print("some error here")
}
}
var a = 0
func pressButton(button: UIButton) {
if (a == 0) {
print("dioschifoso!")
beginSession(captureDevice: AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo))
a = 1
} else {
beginSession(captureDevice: AVCaptureDevice.defaultDevice(withDeviceType: AVCaptureDeviceType.builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: AVCaptureDevicePosition.front))
a = 0
}
}
var flash = 0
func toggleTorch(on: Bool) {
//guard let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo) else { return }
/*
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
*/
}
func pressButton1(button: UIButton) {
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
if (device?.hasTorch)! {
do {
try device?.lockForConfiguration()
//device?.torchMode = AVCaptureTorchMode.on
// device?.torchMode = AVCaptureTorchMode.Off
device?.torchMode = AVCaptureTorchMode.off
device?.unlockForConfiguration()
} catch {
print(error)
}
}
if (a == 0) {
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
if (device?.hasTorch)! {
do {
try device?.lockForConfiguration()
device?.torchMode = AVCaptureTorchMode.auto
// device?.torchMode = AVCaptureTorchMode.Off
//device?.torchMode = AVCaptureTorchMode.auto
device?.unlockForConfiguration()
} catch {
print(error)
}
}
//toggleTorch(on: true)
flashButton.setImage(#imageLiteral(resourceName: "btn_flash_off.png"), for: UIControlState.normal)
a = 1
} else {
//toggleTorch(on: false)
flashButton.setImage(#imageLiteral(resourceName: "btn_flash copy.png"), for: UIControlState.normal)
a = 0
}
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
previewLayer.frame = cameraPreview.bounds
}
func capturePicture(){
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
Swift 3.0 and Xcode >= 8
#IBAction func flashTapped() {
// check if flashlight available
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
if (device?.hasTorch)! && (device?.hasFlash)! {
do {
try device?.lockForConfiguration()
} catch {
// handle error
return
}
if device?.torchMode == .auto {
device?.torchMode = .on
device?.flashMode = .on
}
else if device?.torchMode == .on {
device?.torchMode = .off
device?.flashMode = .off
}
else {
device?.torchMode = .auto
device?.flashMode = .auto
}
device?.unlockForConfiguration()
}
}