How to scan only certain areas through the camera with Swift5? - ios

I'd like to scan QRcode through the camera. There is no problem scanning QRcode,
but I want to scan only certain areas. How can I do this?
I am currently aware of the QR code anywhere in the entire camera area.
import Foundation
import UIKit
import AVFoundation
class ScannerViewController : UIViewController, AVCaptureMetadataOutputObjectsDelegate {
#IBOutlet weak var qrcodeView: UIView!
#IBOutlet weak var mainText: UITextView!
#IBOutlet weak var headerBar: UINavigationBar!
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
self.qrcodeView.backgroundColor = UIColor.black.withAlphaComponent(0.5)
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr]
} else {
failed()
return
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.insertSublayer(previewLayer, at: 0)
captureSession.startRunning()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// let scanRect = CGRect(x: 0, y: 0, width: 200, height: 200)
// let rectOfInterest = layer.metadataOutputRectConverted(fromLayerRect: scanRect)
// metadataObjects.rectOfInterest = rectOfInterest
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
} else {
print("not support")
}
}
func found(code: String) {
print(code)
self.dismiss(animated: true, completion: nil)
}
func failed() {
captureSession = nil
}
}
Like the picture above, I would like to scan only within the square area.
I desperately need this.
Thanks in advance.

You can use rectOfInterest property to achieve this
add following code after captureSession.startRunning()
First you need to convert using rect using
let rectOfInterest = videoPreviewLayer?.metadataOutputRectConverted(fromLayerRect: self.viewAreaOfScan.frame) // videoPreviewLayer is AVCaptureVideoPreviewLayer
after that you can assign it to rectOfInterest of metadataOutput
metadataOutput.rectOfInterest = rectOfInterest ?? CGRect(x: 0, y: 0, width: 1, height: 1)

Related

Camera Barcode Scanning Works on iPhone, But Not iPad

I'm using AVCaptureDevice API for scanning barcode and it works very well on iPhone, but very similar code I have doesn't work on iPad and I'm not quite sure why (not detecting any barcode at all). The main differences are the size of scan area, position and orientation. I tested using iPhone 12 mini (iOS 15 beta) and the original iPad Pro 9.7" (iOS 14.6). Not sure if that matters.
Below is the code for the scanner. Please let me know if you noticed something that should be changed.
import Foundation
import AVFoundation
import UIKit
class ScannerViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
var barcodeCallback: (String) -> Void
var cameraScanDismissedCallback: (Bool) -> Void
var scanned = false
var currentDevice: AVCaptureDevice!
var scanRectView: UIView!
init(barcodeCallback: #escaping (String) -> Void, cameraScanDismissedCallback: #escaping (Bool) -> Void) {
self.barcodeCallback = barcodeCallback;
self.cameraScanDismissedCallback = cameraScanDismissedCallback;
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
} else {
failed()
return
}
let windowSize = UIScreen.main.bounds.size
var scanSize: CGSize!;
var scanRect: CGRect!;
if(UIDevice.current.userInterfaceIdiom == .pad){
scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/7);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}else{
scanSize = CGSize(width:windowSize.width*2/3, height:windowSize.width*1/3);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
metadataOutput.rectOfInterest = scanRect
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if(UIDevice.current.userInterfaceIdiom == .pad){
let orientation: UIDeviceOrientation = UIDevice.current.orientation
previewLayer.connection?.videoOrientation = {
switch (orientation) {
case .faceUp:
return .landscapeLeft
case .portrait:
return .portrait
case .landscapeRight:
return .landscapeLeft
case .landscapeLeft:
return .landscapeRight
default:
return .portrait
}
}()
}
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
scanRectView = UIView();
view.addSubview(self.scanRectView)
scanRectView.frame = CGRect(x:0, y:0, width: scanSize.width,
height: scanSize.height);
if(UIDevice.current.userInterfaceIdiom == .pad){
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX - scanSize.width/2,
y:UIScreen.main.bounds.midY - scanSize.height/2)
}else{
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
}
scanRectView.layer.borderColor = UIColor.yellow.cgColor
scanRectView.layer.borderWidth = 5;
currentDevice = videoCaptureDevice
captureSession.startRunning()
toggleTorch(on: true)
}
func toggleTorch(on: Bool) {
guard let device = currentDevice else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if(UIDevice.current.userInterfaceIdiom == .pad){
device.videoZoomFactor = 1.3
}else{
device.videoZoomFactor = 1.5
}
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func failed() {
let ac = UIAlertController(title: "Scanning not supported", message: "Your device does not support scanning a code from an item. Please use a device with a camera.", preferredStyle: .alert)
ac.addAction(UIAlertAction(title: "OK", style: .default))
present(ac, animated: true)
captureSession = nil
toggleTorch(on: false)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
toggleTorch(on: true)
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
toggleTorch(on: false)
}
cameraScanDismissedCallback(scanned)
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
scanned = true
}
dismiss(animated: true)
}
func found(code: String) {
print(code)
barcodeCallback(code)
}
override var prefersStatusBarHidden: Bool {
return true
}
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
if(UIDevice.current.userInterfaceIdiom == .pad){
return .landscape
}else{
return .portrait
}
}
}
Just in case anyone ran into a similar issue, check for the following:
Orientation of the video
The position of AVCaptureMetadataOutput.rectOfInterest
I'm still having an issue with the rectOfInterest not positioned at the center, but it works. Once I can figure out how to center it, I will post the solution here.
it isn't center probably because of your navigationBar.
Try set rectOfInterest as below, remember that camer use different coordinate than UIView, posX and posY are in UIView coordinates
let aimRect = CGRect(x: (posY - navBar.height) / UIScreen.main.bounds.height,
y: posX / UIScreen.main.bounds.width,
width: rectHeight / UIScreen.main.bounds.height,
height: rectWidth / UIScreen.main.bounds.width)

Why scan 1D Barcode only get result in middle of view by Swift?

I have a question about scan 1D barcode. Why I set the yellow border about scanning region view, only I put it in the middle of the region and get the result successfully?
It works fine when I put my 1D barcode in the green region by the following image. When I put the red region, and nothing happens. My app orientation only right and left.
What's wrong with my code?
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate, UIAlertViewDelegate {
var scanRectView:UIView!
var device:AVCaptureDevice!
var input:AVCaptureDeviceInput!
var output:AVCaptureMetadataOutput!
var session:AVCaptureSession!
var preview:AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidLayoutSubviews() {
self.configureVideoOrientation()
}
fileprivate func configureVideoOrientation() {
let previewLayer = self.preview
if let connection = previewLayer?.connection {
let orientation = UIDevice.current.orientation
if connection.isVideoOrientationSupported, let videoOrientation = AVCaptureVideoOrientation(rawValue: orientation.rawValue) {
previewLayer?.frame = self.view.bounds
connection.videoOrientation = videoOrientation
}
}
}
#IBAction func btnClicked(_ sender: Any) {
do{
self.device = AVCaptureDevice.default(for: AVMediaType.video)
self.input = try AVCaptureDeviceInput(device: device)
self.output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
self.session = AVCaptureSession()
if UIScreen.main.bounds.size.height < 500 {
self.session.sessionPreset = AVCaptureSession.Preset.vga640x480
}else{
self.session.sessionPreset = AVCaptureSession.Preset.high
}
self.session.addInput(self.input)
self.session.addOutput(self.output)
self.output.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
let windowSize = UIScreen.main.bounds.size
let scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/3)
var scanRect = CGRect(x:(windowSize.width-scanSize.width)/2,
y:(windowSize.height-scanSize.height)/2,
width:scanSize.width, height:scanSize.height)
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
self.output.rectOfInterest = scanRect
self.preview = AVCaptureVideoPreviewLayer(session:self.session)
self.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.preview.frame = UIScreen.main.bounds
self.view.layer.insertSublayer(self.preview, at:0)
self.scanRectView = UIView();
self.view.addSubview(self.scanRectView)
self.scanRectView.frame = CGRect(x:0, y:0, width:scanSize.width,
height:scanSize.height);
self.scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
self.scanRectView.layer.borderColor = UIColor.yellow.cgColor
self.scanRectView.layer.borderWidth = 5;
self.session.startRunning()
do {
try self.device!.lockForConfiguration()
} catch _ {
NSLog("Error: lockForConfiguration.");
}
self.device!.videoZoomFactor = 1.5
self.device!.unlockForConfiguration()
}catch _ {
}
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
var stringValue:String?
if metadataObjects.count > 0 {
let metadataObject = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
stringValue = metadataObject.stringValue
if stringValue != nil{
self.session.stopRunning()
}
}
self.session.stopRunning()
}
}

Switching from the front camera to the back camera causes my the app to crash?

The app crashes with an error
No active and enabled video connection'
After taking a picture with the back camera segueing to another screen dismissing the screen and returning back to the camera screen flipping from the back camera to the front camera and taking another picture the app crashes below is the code for the camera screen.
import UIKit
import AVFoundation
protocol gestureDelegate{
func gestureDelegate()
}
protocol previewSegueDelegate {
func previewSegueDelegate(image:UIImage,device:AVCaptureDevice)
}
class MainCameraCollectionViewCell: UICollectionViewCell {
var gdelegate: gestureDelegate?
var pdelegate: previewSegueDelegate?
#IBOutlet weak var myView: UIView!
var captureSession = AVCaptureSession()
private var sessionQueue: DispatchQueue!
var captureConnection = AVCaptureConnection()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutPut: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var image: UIImage?
var usingFrontCamera = false
override func awakeFromNib() {
super.awakeFromNib()
setupCaptureSession()
setupDevice()
setupInput()
setupPreviewLayer()
startRunningCaptureSession()
print("Inside of camera cell")
let pinchGesture = UIPinchGestureRecognizer(target: self, action: #selector(MainCameraCollectionViewCell.tapEdit(sender:)))
addGestureRecognizer(pinchGesture)
}
#objc func tapEdit(sender: UIPinchGestureRecognizer){
gdelegate?.gestureDelegate()
guard let device = currentCamera else { return }
if sender.state == .changed {
let maxZoomFactor = device.activeFormat.videoMaxZoomFactor
let pinchVelocityDividerFactor: CGFloat = 5.0
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
let desiredZoomFactor = device.videoZoomFactor + atan2(sender.velocity, pinchVelocityDividerFactor)
device.videoZoomFactor = max(1.0, min(desiredZoomFactor, maxZoomFactor))
} catch {
print(error)
}
}
}
func setupCaptureSession(){
captureSession.sessionPreset = AVCaptureSession.Preset.photo
sessionQueue = DispatchQueue(label: "session queue")
}
func setupDevice(usingFrontCamera:Bool = false){
sessionQueue.async {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices{
if usingFrontCamera && device.position == AVCaptureDevice.Position.front {
self.currentCamera = device
} else if device.position == AVCaptureDevice.Position.back {
self.currentCamera = device
}
}
}
}
func setupInput() {
sessionQueue.async {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: self.currentCamera!)
if self.captureSession.canAddInput(captureDeviceInput) {
self.captureSession.addInput(captureDeviceInput)
}
self.photoOutPut = AVCapturePhotoOutput()
self.photoOutPut?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format:[AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
if self.captureSession.canAddOutput(self.photoOutPut!) {
self.captureSession.addOutput(self.photoOutPut!)
}
} catch {
print(error)
}
}
}
func setupPreviewLayer(){
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
self.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession(){
captureSession.startRunning()
}
#IBAction func cameraButton_TouchUpInside(_ sender: Any) {
let settings = AVCapturePhotoSettings()
photoOutPut?.capturePhoto(with: settings, delegate: self as! AVCapturePhotoCaptureDelegate)
}
#IBAction func FlipThe_camera(_ sender: UIButton) {
print("Flip Touched")
captureSession.beginConfiguration()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
captureSession.removeInput(input)
}
}
usingFrontCamera = !usingFrontCamera
setupCaptureSession()
setupDevice(usingFrontCamera: usingFrontCamera)
setupInput()
captureSession.commitConfiguration()
startRunningCaptureSession()
}
}
extension MainCameraCollectionViewCell: AVCapturePhotoCaptureDelegate{
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation(){
print(imageData)
image = UIImage(data: imageData)
// performSegue(withIdentifier: "showPhoto_segue", sender: nil)
if(self.image == nil){
print("The image is empty")
}
pdelegate?.previewSegueDelegate(image: self.image!, device: currentCamera!)
}
}
}

UIImageOrientation with camera

This is the code of my camera, change the front camera to the rear camera, if I take a picture with the back camera, the orientation of the photo is good (original), but if i take a photo with the front camera get the image with the bad orientation.
class TakeSelfieViewController: UIViewController, AVCapturePhotoCaptureDelegate {
var captureSession = AVCaptureSession()
var photoOutput = AVCapturePhotoOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
var captureDevice : AVCaptureDevice?
var sessionOutputSetting = AVCapturePhotoSettings(format: [AVVideoCodecKey:AVVideoCodecJPEG])
var toggle = false
#IBOutlet weak var cameraView: UIView!
#IBOutlet weak var tempImageView: UIImageView!
#IBOutlet weak var adorButton: UIButton!
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
previewLayer?.frame = cameraView.bounds
let blurEffect = UIBlurEffect(style: UIBlurEffectStyle.light)
let blurEffectView = UIVisualEffectView(effect: blurEffect)
blurEffectView.frame = adorButton.bounds
blurEffectView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
adorButton.addSubview(blurEffectView)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
pickCamera(which: toggle)
}
func pickCamera(which: Bool) {
if (which == true) {
let deviceDescovery = AVCaptureDeviceDiscoverySession(deviceTypes: [AVCaptureDeviceType.builtInDualCamera, AVCaptureDeviceType.builtInTelephotoCamera,AVCaptureDeviceType.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: AVCaptureDevicePosition.back)
print("back camera")
startCamera(deviceDesc: deviceDescovery!)
toggle = true
} else if (which == false) {
let deviceDescovery = AVCaptureDeviceDiscoverySession(deviceTypes: [AVCaptureDeviceType.builtInDualCamera, AVCaptureDeviceType.builtInTelephotoCamera,AVCaptureDeviceType.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: AVCaptureDevicePosition.front)
print("front camera")
startCamera(deviceDesc: deviceDescovery!)
toggle = false
}
}
func startCamera(deviceDesc: AVCaptureDeviceDiscoverySession!) {
for device in (deviceDesc.devices)! {
if device.position == AVCaptureDevicePosition.back {
do {
let input = try AVCaptureDeviceInput(device: device)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
if captureSession.canAddOutput(photoOutput) {
captureSession.addOutput(photoOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer!)
captureSession.startRunning()
print("ADD Back")
} else { print("Cannot add input - back") }
}
} catch {
print("Error")
}
} else if (device.position == AVCaptureDevicePosition.front) {
do {
let input = try AVCaptureDeviceInput(device: device)
print(input)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
if captureSession.canAddOutput(photoOutput) {
captureSession.addOutput(photoOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer!)
captureSession.startRunning()
print("ADD Front")
}
} else { print("Cannot add input - front") }
} catch {
print(error)
}
}
}
}
func didPressTakePhoto() {
if let videoConnection = photoOutput.connection(withMediaType: AVMediaTypeVideo) {
videoConnection.videoOrientation = AVCaptureVideoOrientation.portrait
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecJPEG])
photoOutput.capturePhoto(with: settings, delegate: self)
}
}
func capture(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhotoSampleBuffer photoSampleBuffer: CMSampleBuffer?, previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
let imageData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSampleBuffer!, previewPhotoSampleBuffer: previewPhotoSampleBuffer)
let dataProvider = CGDataProvider(data: imageData as! CFData)
let cgImageRef = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)
let image = UIImage(cgImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.right)
self.tempImageView.image = image
self.tempImageView.isHidden = false
self.yellowButton.isHidden = true
self.toggleAction.isHidden = true
self.adorButton.isHidden = true
print("Hola")
}
var didTakePhoto = Bool()
#IBOutlet weak var yellowButton: UIButton!
#IBOutlet weak var toggleAction: UIButton!
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
if didTakePhoto {
tempImageView.isHidden = true
yellowButton.isHidden = false
toggleAction.isHidden = false
adorButton.isHidden = false
didTakePhoto = false
print("🍊")
}
}
#IBAction func yellowPressed(_ sender: UIButton) {
captureSession.startRunning()
didTakePhoto = true
didPressTakePhoto()
print("🐶")
}
#IBAction func toggleCamera(_ sender: Any) {
if (toggle == false) {
print("Changing to back camera")
let currentCameraInput: AVCaptureInput = captureSession.inputs[0] as! AVCaptureInput
captureSession.removeInput(currentCameraInput)
toggle = true
pickCamera(which: toggle)
} else if (toggle == true) {
print("Changing to front camera")
let currentCameraInput: AVCaptureInput = captureSession.inputs[0] as! AVCaptureInput
captureSession.removeInput(currentCameraInput)
toggle = false
pickCamera(which: toggle)
}
}
override var prefersStatusBarHidden: Bool {
return true
}
}
How do I solve this? 🤔
Images taken with front camera are mirrored, when you take a picture the image orientation is taken inside the EXIF dictionary of it, or passed inside a dictionary of metadata.
Most of the time when you pass it around as JPG or PNG this value is not taken into account if you don't deal directly with it.
You should experience a similar problem if you take a picture in landscape.
In your capture method, it seems that you are forcing orientation to a fixed value when you should take care of it.

Double Tap or Use Button To Switch Camera From Back to Front (Xcode 8, Swift 3)

So, lately I have been trying to implement the function of switching the camera view from back to front camera in Swift 3. However, with no luck.
Currently, my default view is from the back camera - I can take pictures with it and then retake. But can anyone help me and show how do I either double tap the screen to switch cameras or simply use the assigned button to switch them? Thank you!
import UIKit
import AVFoundation
import FirebaseDatabase
class CameraView: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
override var prefersStatusBarHidden: Bool {
return true
}
var captureSession : AVCaptureSession!
var stillImageOutput : AVCaptureStillImageOutput!
var previewLayer : AVCaptureVideoPreviewLayer!
#IBOutlet var cameraView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
previewLayer?.frame = cameraView.bounds
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPreset1920x1080
var backCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
var error : NSError?
do {
var input = try! AVCaptureDeviceInput(device: backCamera)
if (error == nil && captureSession?.canAddInput(input) != nil) {
captureSession?.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if (captureSession?.canAddOutput(stillImageOutput) != nil) {
captureSession?.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer (session: captureSession)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer)
captureSession?.startRunning() }
}
} catch {
}
}
#IBOutlet var tempImageView: UIImageView!
#IBAction func didPressTakePhoto(_ sender: UIButton) {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
videoConnection.videoOrientation = AVCaptureVideoOrientation.portrait
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: {
(sampleBuffer, error) in
if sampleBuffer != nil {
var imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
var dataProvider = CGDataProvider.init(data: imageData as! CFData)
var cgImageRef = CGImage.init(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
var image = UIImage (cgImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.right)
self.tempImageView.image = image
self.tempImageView.isHidden = false
}
})
}
}
var didTakePhoto = Bool()
#IBAction func didPressTakeAnother(_ sender: UIButton) {
if didTakePhoto == true {
tempImageView.isHidden = true
didTakePhoto = false
} else {
captureSession?.startRunning()
didTakePhoto = true
}
}
}
don't see here any problems - here is working solution:
import Foundation
import UIKit
import AVFoundation
class MainViewController: UIViewController {
var tempImage: UIImageView?
var captureSession: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var currentCaptureDevice: AVCaptureDevice?
var usingFrontCamera = false
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
loadCamera()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
videoPreviewLayer!.frame = self.cameraPreviewSurface.bounds
}
#IBAction func switchButtonAction(_ sender: Any) {
usingFrontCamera = !usingFrontCamera
loadCamera()
}
func getFrontCamera() -> AVCaptureDevice?{
let videoDevices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo)
for device in videoDevices!{
let device = device as! AVCaptureDevice
if device.position == AVCaptureDevicePosition.front {
return device
}
}
return nil
}
func getBackCamera() -> AVCaptureDevice{
return AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
}
func loadCamera() {
if(captureSession == nil){
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetPhoto
}
var error: NSError?
var input: AVCaptureDeviceInput!
currentCaptureDevice = (usingFrontCamera ? getFrontCamera() : getBackCamera())
do {
input = try AVCaptureDeviceInput(device: currentCaptureDevice)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
for i : AVCaptureDeviceInput in (self.captureSession?.inputs as! [AVCaptureDeviceInput]){
self.captureSession?.removeInput(i)
}
if error == nil && captureSession!.canAddInput(input) {
captureSession!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession!.canAddOutput(stillImageOutput) {
captureSession!.addOutput(stillImageOutput)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
//self.cameraPreviewSurface.layer.sublayers?.forEach { $0.removeFromSuperlayer() }
self.cameraPreviewSurface.layer.addSublayer(videoPreviewLayer!)
DispatchQueue.main.async {
self.captureSession!.startRunning()
}
}
}
}
}
some notes:
cameraPreviewSurface - this is your UIView where camera will show
don't reassign the session, don't just add input, but before add new - remove existing ones,
p.s. code done with swift 3.0.1 / xcode 8.1
Cheers )
Xcode Version : Version 10.1 (10B61)
Swift Version : Swift 4.2
Change AVCaptureSession Capture Source
Refers from Stepan Maksymov Solution
we can simplified by replacing captureSession.inputs
First Create an IBAction Outlet and Connect ViewController to Change Camera View
#IBAction private func changeCamera(_ cameraButton: UIButton) {
usingFrontCamera = !usingFrontCamera
do{
captureSession.removeInput(captureSession.inputs.first!)
if(usingFrontCamera){
captureDevice = getFrontCamera()
}else{
captureDevice = getBackCamera()
}
let captureDeviceInput1 = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput1)
}catch{
print(error.localizedDescription)
}
}
Second step Copy simplified AVCaptureDevice Setting || refer Stepan Maksymov
func getFrontCamera() -> AVCaptureDevice?{
return AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices.first
return nil
}
func getBackCamera() -> AVCaptureDevice?{
return AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices.first
return nil
}
You can replace #IBAction with function like so.
func changeCamera(){
usingFrontCamera = !usingFrontCamera
do{
captureSession.removeInput(captureSession.inputs.first!)
if(usingFrontCamera){
captureDevice = getFrontCamera()
}else{
captureDevice = getBackCamera()
}
let captureDeviceInput1 = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput1)
}catch{
print(error.localizedDescription)
}
}
In addition to Stepan Maksymov post, I sugest to add this function
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
And call it instead of his post lines:
for i : AVCaptureDeviceInput in (self.captureSession?.inputs as! [AVCaptureDeviceInput]){
self.captureSession?.removeInput(i)
}
This way the cameras will change quicker.

Resources