AVCapturePhotoOutput iOS Camera Super Dark - ios

I have an app setup to use the camera for a photo (on a timer basis) to detect the presence of a face. The detection process works fairly well when I feed the app a photo that I have added to assets. However, when I attempt to use the output of the camera directly or even after saving the image to a file, the resulting image is so dark that the face recognition is completely unreliable.
If I display the image as seen by the camera, it looks correct. I captured the following two images - one from the camera as seen live, the other of the same view after the image was created from AVCapturePhotoOutput. The same darkness happens if I simply display the captured image in an image view.
Note the comment: "I put the breakpoint here and took a screen shot". Then I took the second screen shot when the code completed. These were taken in HIGH light.
Here's the basic code:
class CRSFaceRecognitionViewController: UIViewController, UIImagePickerControllerDelegate {
var sentBy : String?
//timers
var faceTimer : Timer?
var frvcTimer : Timer?
//capture
var captureSession = AVCaptureSession()
var settings = AVCapturePhotoSettings()
var backCamera : AVCaptureDevice?
var frontCamera : AVCaptureDevice?
var currentCamera : AVCaptureDevice?
var photoOutput : AVCapturePhotoOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
var image : UIImage?
var outputImage : UIImage?
#IBOutlet weak var imageView: UIImageView!
//MARK: - Setup
override func viewDidLoad() {
super.viewDidLoad()
}//viewDidLoad
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(true)
}//viewWillAppear
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(true)
//check for camera
if (UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)) {
setupCaptureSession()
setupDevices()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
} else {
print("Camera not present")
}
}//viewDidAppear
//MARK: - Video
#objc func showFaceRecognitionViewController() {
//all this does is present the image in a new ViewController imageView
performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
}//showThePhotoView
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession
func setupDevices() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}//if else
}//for in
currentCamera = frontCamera
}//setupDevices
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
print("in photoOutput completion handler")
})
captureSession.addOutput(photoOutput!)
} catch {
print("Error creating AVCaptureDeviceInput:", error)
}//do catch
}//setupInputOutput
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session : captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}//setupPreviewLayer
func startRunningCaptureSession() {
captureSession.startRunning()
}//startRunningCaptureSession
//MARK: - Segue
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showSavedCameraPhoto" {
let controller = segue.destination as! JustToSeeThePhotoViewController
controller.inImage = outputImage
}//if segue
}//prepare
//MARK: - Look for Faces
func findTheFaces() {
let myView : UIView = self.view
guard let outImage = outputImage else {return}
let imageView = UIImageView(image: outImage)
imageView.contentMode = .scaleAspectFit
let scaledHeight = myView.frame.width / outImage.size.width * outImage.size.height
imageView.frame = CGRect(x: 0, y: 0, width: myView.frame.width, height: myView.frame.height)
imageView.backgroundColor = UIColor.blue
myView.addSubview(imageView)
let request = VNDetectFaceRectanglesRequest { (req, err) in
if let err = err {
print("VNDetectFaceRectanglesRequest failed to run:", err)
return
}//if let err
print(req.results ?? "req.results is empty")
req.results?.forEach({ (res) in
DispatchQueue.main.async {
guard let faceObservation = res as? VNFaceObservation else {return}
let x = myView.frame.width * faceObservation.boundingBox.origin.x
let width = myView.frame.width * faceObservation.boundingBox.width
let height = scaledHeight * faceObservation.boundingBox.height
let y = scaledHeight * (1 - faceObservation.boundingBox.origin.y) - height
let redView = UIView()
redView.backgroundColor = .red
redView.alpha = 0.4
redView.frame = CGRect(x: x, y: y, width: width, height: height)
myView.addSubview(redView)
print("faceObservation bounding box:")
print(faceObservation.boundingBox)
//if you get here, then you have a face bounding box
}//main
})//forEach block
}//let request
guard let cgImage = outImage.cgImage else {return}
DispatchQueue.global(qos: .utility).async {
let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
do {
try handler.perform([request])
print("handler request was successful")
self.performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
} catch let reqErr {
print("Failed to perform request:", reqErr)
}
}//DispatchQueue
}//findTheFaces
//MARK: - Memory
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}//didReceiveMemoryWarning
}//class
extension CRSFaceRecognitionViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
print(imageData)
outputImage = UIImage(data : imageData)
//
//I put breakpoint here and took a screen shot
//
if let outImage = outputImage?.updateImageOrientionUpSide() {
self.outputImage = outImage
}
DispatchQueue.main.async {
self.findTheFaces()
}
}//if let imageData
}//photoOutput
}//extension
extension UIImage {
//you need to do this to ensure that the image is in portrait mode
//the face recognition method will not work if the face is horizontal
func updateImageOrientionUpSide() -> UIImage? {
if self.imageOrientation == .up {
return self
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
if let normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext() {
UIGraphicsEndImageContext()
return normalizedImage
}
UIGraphicsEndImageContext()
return nil
}//updateImageOrientionUpSide
}//image
I must be doing something wrong with the camera capture. Any help would be appreciated. Swift 4, iOS 11.2.5, Xcode 9.2

I would try adding a delay between startRunningCaptureSession() and photoOutput?.capturePhoto(with:settings, delegate: self)
For example,
DispatchQueue.main.asyncAfter(deadline: .now() + .seconds(4), execute: {
// take a photo
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
})

It appears as though I have too many async pieces. I broke the code into separate functions for each major piece - async or not and put them all into a DispatchGroup. That seems to have solved the issue.

Related

How to detect barcode using Apple Vision in CGRect only?

I have an app that uses a CGRect(x: 0, y: 0, width: 335, height: 150) to show the camera for barcode scanning. However when presented a barcode off camera (not in the CGRect) will get scanned. How can I limit the area for scanning to the CGRect in my preview layer? This is using Vision.
let captureSession = AVCaptureSession()
var previewLayer: AVCaptureVideoPreviewLayer!
var activeInput: AVCaptureDeviceInput!
lazy var detectBarcodeRequest = VNDetectBarcodesRequest { request, error in
guard error == nil else {
print("Barcode Error: \(String(describing: error?.localizedDescription))")
return
}
self.processBarCode(request)
}
override func viewDidLoad() {
super.viewDidLoad()
setupSession()
setupPreview()
startSession()
}
func setupSession() {
guard let camera = AVCaptureDevice.default(for: .video) else {
return
}
do {
let videoInput = try AVCaptureDeviceInput(device: camera)
for input in [videoInput] {
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
}
activeInput = videoInput
} catch {
print("Error setting device input: \(error)")
return
}
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
captureSession.addOutput(captureOutput)
}
func setupPreview() {
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = CGRect(x: 0, y: 0, width: 335, height: 150)
view.layer.insertSublayer(previewLayer, at: 0)
}//setupPreview
func startSession() {
if !captureSession.isRunning {
DispatchQueue.global(qos: .default).async { [weak self] in
self?.captureSession.startRunning()
}
}
}
// MARK: - processBarCode
func processBarCode(_ request: VNRequest) {
DispatchQueue.main.async {
guard let results = request.results as? [VNBarcodeObservation] else {
return
}
if let payload = results.first?.payloadStringValue, let symbology = results.first?.symbology {
print("payload is \(payload) \(symbology) ")
}
}
}//processBarCode
Edit:
// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate
extension CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let imageRequestHandler = VNImageRequestHandler(
cvPixelBuffer: pixelBuffer,
orientation: .up)
do {
try imageRequestHandler.perform([detectBarcodeRequest])
} catch {
print(error)
}
}//captureOutput
}//extension
extension AVCaptureVideoPreviewLayer {
func rectOfInterestConverted(parentRect: CGRect, fromLayerRect: CGRect) -> CGRect {
let parentWidth = parentRect.width
let parentHeight = parentRect.height
let newX = (parentWidth - fromLayerRect.maxX)/parentWidth
let newY = 1 - (parentHeight - fromLayerRect.minY)/parentHeight
let width = 1 - (fromLayerRect.minX/parentWidth + newX)
let height = (fromLayerRect.maxY/parentHeight) - newY
return CGRect(x: newX, y: newY, width: width, height: height)
}
}
Usage:
if let rect = videoPreviewLayer?.rectOfInterestConverted(parentRect: self.view.frame, fromLayerRect: scanAreaView.frame) {
captureMetadataOutput.rectOfInterest = rect
}
In func captureOutput(_:didOutput:from:) you most likely passing whole image into VNImageRequestHandler.
Instead you need to crop image to your visible rect.
Another approach would be lock Vision to only visible part of image via regionOfInterest as #HurricaneOnTheMoon proposed below.
You can use a property regionOfInterest of VNDetectBarcodesRequest to setup a detection region.
The default value is { { 0, 0 }, { 1, 1 } } according to the Apple documentation

Custom Camera and Crop image in Swift

I have created a custom camera and have implemented below code to crop the taken image, I have shown guides in the preview layer so I want to crop the image which appears in that area.
func imageByCropToRect(rect:CGRect, scale:Bool) -> UIImage {
var rect = rect
var scaleFactor: CGFloat = 1.0
if scale {
scaleFactor = self.scale
rect.origin.x *= scaleFactor
rect.origin.y *= scaleFactor
rect.size.width *= scaleFactor
rect.size.height *= scaleFactor
}
var image: UIImage? = nil;
if rect.size.width > 0 && rect.size.height > 0 {
let imageRef = self.cgImage!.cropping(to: rect)
image = UIImage(cgImage: imageRef!, scale: scaleFactor, orientation: self.imageOrientation)
}
return image!
}
This code just works fine when & give the exact cropped image when the below line of code is commented, though I want the image streaming to be full screen so I have to use the below line of code. The image comes zoomed out sort of.
(self.previewLayer as! AVCaptureVideoPreviewLayer).videoGravity = AVLayerVideoGravity.resizeAspectFill
How do I solve this issue? Is the cropping code wrong?
Here is the full Class code
import UIKit
import AVFoundation
class CameraViewController: UIViewController {
#IBOutlet weak var guideImageView: UIImageView!
#IBOutlet weak var guidesView: UIView!
#IBOutlet weak var cameraPreviewView: UIView!
#IBOutlet weak var cameraButtonView: UIView!
#IBOutlet weak var captureButton: UIButton!
var captureSession = AVCaptureSession()
var previewLayer: CALayer!
var captureDevice: AVCaptureDevice!
/// This will be true when the user clicks on the click photo button.
var takePhoto = false
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
previewLayer = CALayer()
takePhoto = false
requestAuthorization()
}
private func userinteractionToButton(_ interaction: Bool) {
captureButton.isEnabled = interaction
}
/// This function will request authorization, If authorized then start the camera.
private func requestAuthorization() {
switch AVCaptureDevice.authorizationStatus(for: AVMediaType.video) {
case .authorized:
prepareCamera()
case .denied, .restricted, .notDetermined:
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (granted) in
if !Thread.isMainThread {
DispatchQueue.main.async {
if granted {
self.prepareCamera()
} else {
let alert = UIAlertController(title: "unable_to_access_the_Camera", message: "to_enable_access_go_to_setting_privacy_camera_and_turn_on_camera_access_for_this_app", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "ok", style: .default, handler: {_ in
self.navigationController?.popToRootViewController(animated: true)
}))
self.present(alert, animated: true, completion: nil)
}
}
} else {
if granted {
self.prepareCamera()
} else {
let alert = UIAlertController(title: "unable_to_access_the_Camera", message: "to_enable_access_go_to_setting_privacy_camera_and_turn_on_camera_access_for_this_app", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "ok", style: .default, handler: {_ in
self.navigationController?.popToRootViewController(animated: true)
}))
self.present(alert, animated: true, completion: nil)
}
}
})
}
}
/// Will see if the primary camera is avilable, If found will call method which will asign the available device to the AVCaptureDevice.
private func prepareCamera() {
// Resets the session.
self.captureSession.sessionPreset = AVCaptureSession.Preset.photo
if #available(iOS 10.0, *) {
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices
self.assignCamera(availableDevices)
} else {
// Fallback on earlier versions
// development, need to test this on iOS 8
if let availableDevices = AVCaptureDevice.default(for: AVMediaType.video) {
self.assignCamera([availableDevices])
} else {
self.showAlert()
}
}
}
/// Assigns AVCaptureDevice to the respected the variable, will begin the session.
///
/// - Parameter availableDevices: [AVCaptureDevice]
private func assignCamera(_ availableDevices: [AVCaptureDevice]) {
if availableDevices.first != nil {
captureDevice = availableDevices.first
beginSession()
} else {
self.showAlert()
}
}
/// Configures the camera settings and begins the session, this function will be responsible for showing the image on the UI.
private func beginSession() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer = previewLayer
self.cameraPreviewView.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
self.previewLayer.frame.origin.y = +self.cameraPreviewView.frame.origin.y
(self.previewLayer as! AVCaptureVideoPreviewLayer).videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer.masksToBounds = true
self.cameraPreviewView.clipsToBounds = true
captureSession.startRunning()
self.view.bringSubview(toFront: self.cameraPreviewView)
self.view.bringSubview(toFront: self.cameraButtonView)
self.view.bringSubview(toFront: self.guidesView)
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [((kCVPixelBufferPixelFormatTypeKey as NSString) as String):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.letsappit.camera")
dataOutput.setSampleBufferDelegate(self, queue: queue)
self.userinteractionToButton(true)
}
/// Get the UIImage from the given CMSampleBuffer.
///
/// - Parameter buffer: CMSampleBuffer
/// - Returns: UIImage?
func getImageFromSampleBuffer(buffer:CMSampleBuffer, orientation: UIImageOrientation) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: orientation)
}
}
return nil
}
/// This function will destroy the capture session.
func stopCaptureSession() {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
func showAlert() {
let alert = UIAlertController(title: "Unable to access the camera", message: "It appears that either your device doesn't have camera or its broken", preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "cancel", style: .cancel, handler: {_ in
self.navigationController?.dismiss(animated: true, completion: nil)
}))
self.present(alert, animated: true, completion: nil)
}
#IBAction func didTapClick(_ sender: Any) {
userinteractionToButton(false)
takePhoto = true
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showImage" {
let vc = segue.destination as! ShowImageViewController
vc.image = sender as! UIImage
}
}
}
extension CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
}
if takePhoto {
takePhoto = false
// Rotation should be unlocked to work.
var orientation = UIImageOrientation.up
switch UIDevice.current.orientation {
case .landscapeLeft:
orientation = .left
case .landscapeRight:
orientation = .right
case .portraitUpsideDown:
orientation = .down
default:
orientation = .up
}
if let image = self.getImageFromSampleBuffer(buffer: sampleBuffer, orientation: orientation) {
DispatchQueue.main.async {
let newImage = image.imageByCropToRect(rect: self.guideImageView.frame, scale: true)
self.stopCaptureSession()
self.previewLayer.removeFromSuperlayer()
self.performSegue(withIdentifier: "showImage", sender: newImage)
}
}
}
}
}
Here is the view hierarchy image
It's not clear where the problem is. I would either use the debugger or some print statements to figure out whether the issue is with the image or the view displaying the image. Print out size of the cropped image to make sure it is correct.
Then, print out the image view size in the ShowImageViewController in viewDidAppear to make sure it is correct.
For the correction of zooming out of cropped image, you have to change your crop function to this by using image orientation.
func croppedInRect(rect: CGRect) -> UIImage? {
func rad(_ degree: Double) -> CGFloat {
return CGFloat(degree / 180.0 * .pi)
}
var rectTransform: CGAffineTransform
switch imageOrientation {
case .left:
rectTransform = CGAffineTransform(rotationAngle: rad(90)).translatedBy(x: 0, y: -self.size.height)
case .right:
rectTransform = CGAffineTransform(rotationAngle: rad(-90)).translatedBy(x: -self.size.width, y: 0)
case .down:
rectTransform = CGAffineTransform(rotationAngle: rad(-180)).translatedBy(x: -self.size.width, y: -self.size.height)
default:
rectTransform = .identity
}
rectTransform = rectTransform.scaledBy(x: self.scale, y: self.scale)
var cgImage = self.cgImage
if cgImage == nil{
let ciContext = CIContext()
if let ciImage = self.ciImage{
cgImage = ciContext.createCGImage(ciImage, from: ciImage.extent)
}
}
if let imageRef = cgImage?.cropping(to: rect.applying(rectTransform)){
let result = UIImage(cgImage: imageRef, scale: self.scale, orientation: self.imageOrientation)
return result
}
return nil
}

Switching from the front camera to the back camera causes my the app to crash?

The app crashes with an error
No active and enabled video connection'
After taking a picture with the back camera segueing to another screen dismissing the screen and returning back to the camera screen flipping from the back camera to the front camera and taking another picture the app crashes below is the code for the camera screen.
import UIKit
import AVFoundation
protocol gestureDelegate{
func gestureDelegate()
}
protocol previewSegueDelegate {
func previewSegueDelegate(image:UIImage,device:AVCaptureDevice)
}
class MainCameraCollectionViewCell: UICollectionViewCell {
var gdelegate: gestureDelegate?
var pdelegate: previewSegueDelegate?
#IBOutlet weak var myView: UIView!
var captureSession = AVCaptureSession()
private var sessionQueue: DispatchQueue!
var captureConnection = AVCaptureConnection()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutPut: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var image: UIImage?
var usingFrontCamera = false
override func awakeFromNib() {
super.awakeFromNib()
setupCaptureSession()
setupDevice()
setupInput()
setupPreviewLayer()
startRunningCaptureSession()
print("Inside of camera cell")
let pinchGesture = UIPinchGestureRecognizer(target: self, action: #selector(MainCameraCollectionViewCell.tapEdit(sender:)))
addGestureRecognizer(pinchGesture)
}
#objc func tapEdit(sender: UIPinchGestureRecognizer){
gdelegate?.gestureDelegate()
guard let device = currentCamera else { return }
if sender.state == .changed {
let maxZoomFactor = device.activeFormat.videoMaxZoomFactor
let pinchVelocityDividerFactor: CGFloat = 5.0
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
let desiredZoomFactor = device.videoZoomFactor + atan2(sender.velocity, pinchVelocityDividerFactor)
device.videoZoomFactor = max(1.0, min(desiredZoomFactor, maxZoomFactor))
} catch {
print(error)
}
}
}
func setupCaptureSession(){
captureSession.sessionPreset = AVCaptureSession.Preset.photo
sessionQueue = DispatchQueue(label: "session queue")
}
func setupDevice(usingFrontCamera:Bool = false){
sessionQueue.async {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices{
if usingFrontCamera && device.position == AVCaptureDevice.Position.front {
self.currentCamera = device
} else if device.position == AVCaptureDevice.Position.back {
self.currentCamera = device
}
}
}
}
func setupInput() {
sessionQueue.async {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: self.currentCamera!)
if self.captureSession.canAddInput(captureDeviceInput) {
self.captureSession.addInput(captureDeviceInput)
}
self.photoOutPut = AVCapturePhotoOutput()
self.photoOutPut?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format:[AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
if self.captureSession.canAddOutput(self.photoOutPut!) {
self.captureSession.addOutput(self.photoOutPut!)
}
} catch {
print(error)
}
}
}
func setupPreviewLayer(){
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
self.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession(){
captureSession.startRunning()
}
#IBAction func cameraButton_TouchUpInside(_ sender: Any) {
let settings = AVCapturePhotoSettings()
photoOutPut?.capturePhoto(with: settings, delegate: self as! AVCapturePhotoCaptureDelegate)
}
#IBAction func FlipThe_camera(_ sender: UIButton) {
print("Flip Touched")
captureSession.beginConfiguration()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
captureSession.removeInput(input)
}
}
usingFrontCamera = !usingFrontCamera
setupCaptureSession()
setupDevice(usingFrontCamera: usingFrontCamera)
setupInput()
captureSession.commitConfiguration()
startRunningCaptureSession()
}
}
extension MainCameraCollectionViewCell: AVCapturePhotoCaptureDelegate{
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation(){
print(imageData)
image = UIImage(data: imageData)
// performSegue(withIdentifier: "showPhoto_segue", sender: nil)
if(self.image == nil){
print("The image is empty")
}
pdelegate?.previewSegueDelegate(image: self.image!, device: currentCamera!)
}
}
}

Back camera doesn't really work in Swift

I'm trying to create a custom camera on my app. It's really simple, I want to take a picture and send the captured image to the next view controller. However, I don't know why, but the camera is ok when I take the picture with the front camera, but when it's the back camera, it's not.
When I press the button to capture the photo, there is a delay of maybe 7 seconds, and the App crashes one time in two.
This is my code:
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var image: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession(){
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice(){
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}
}
currentCamera = backCamera
}
func setupInputOutput(){
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
if #available(iOS 11.0, *) {
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
} else {
// Fallback on earlier versions
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecJPEG])], completionHandler: nil)
}
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer(){
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = CGRect(x: 0, y: 0, width: self.cameraView.frame.size.width, height: self.cameraView.frame.size.height)
cameraView.layer.addSublayer(cameraPreviewLayer!)
//self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession(){
captureSession.startRunning()
}
#IBAction func switchCameraAction(_ sender: Any) {
swapCamera()
}
/// Swap camera and reconfigures camera session with new input
fileprivate func swapCamera() {
// Get current input
guard let input = captureSession.inputs[0] as? AVCaptureDeviceInput else { return }
// Begin new session configuration and defer commit
captureSession.beginConfiguration()
defer { captureSession.commitConfiguration() }
// Create new capture device
var newDevice: AVCaptureDevice?
if input.device.position == .back {
newDevice = captureDevice(with: .front)
isFront = true
} else {
newDevice = captureDevice(with: .back)
isFront = false
}
// Create new capture input
var deviceInput: AVCaptureDeviceInput!
do {
deviceInput = try AVCaptureDeviceInput(device: newDevice!)
} catch let error {
print(error.localizedDescription)
return
}
// Swap capture device inputs
captureSession.removeInput(input)
captureSession.addInput(deviceInput)
}
fileprivate func captureDevice(with position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let devices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: AVCaptureDevice.Position.unspecified).devices
for device in devices {
if device.position == position {
return device
}
}
return nil
}
func crop(_ image: UIImage, withWidth width: Double, andHeight height: Double) -> UIImage? {
if let cgImage = image.cgImage {
let contextImage: UIImage = UIImage(cgImage: cgImage)
let contextSize: CGSize = contextImage.size
var posX: CGFloat = 0.0
var posY: CGFloat = 0.0
var cgwidth: CGFloat = CGFloat(width)
var cgheight: CGFloat = CGFloat(height)
// See what size is longer and create the center off of that
if contextSize.width > contextSize.height {
posX = ((contextSize.width - contextSize.height) / 2)
posY = 0
cgwidth = contextSize.height
cgheight = contextSize.height
} else {
posX = 0
posY = ((contextSize.height - contextSize.width) / 2)
cgwidth = contextSize.width
cgheight = contextSize.width
}
let rect: CGRect = CGRect(x: posX, y: posY, width: cgwidth, height: cgheight)
// Create bitmap image from context using the rect
var croppedContextImage: CGImage? = nil
if let contextImage = contextImage.cgImage {
if let croppedImage = contextImage.cropping(to: rect) {
croppedContextImage = croppedImage
}
}
// Create a new image based on the imageRef and rotate back to the original orientation
if let croppedImage:CGImage = croppedContextImage {
let image: UIImage = UIImage(cgImage: croppedImage, scale: image.scale, orientation: image.imageOrientation)
return image
}
}
return nil
}
#IBAction func takePhotoAction(_ sender: Any) {
var settingsCamera = AVCapturePhotoSettings()
let previewPixelType = settingsCamera.availablePreviewPhotoPixelFormatTypes.first
settingsCamera.flashMode = .off
if isFront == false && hasFlash{
settingsCamera.flashMode = .on
}
let previewFormat = [kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: 160,
kCVPixelBufferHeightKey as String: 160]
settingsCamera.previewPhotoFormat = previewFormat
photoOutput?.capturePhoto(with: settingsCamera, delegate: self)
}
}
extension UploadViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let error = error {
print("error occure : \(error.localizedDescription)")
}
if let sampleBuffer = photoSampleBuffer,
let previewBuffer = previewPhotoSampleBuffer,
let dataImage = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer) {
let dataProvider = CGDataProvider(data: dataImage as CFData)
let cgImageRef: CGImage! = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
var orientation = UIImageOrientation(rawValue: 0)
if isFront {
orientation = UIImageOrientation.leftMirrored
} else {
orientation = UIImageOrientation.right
}
let image = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: orientation!)
// var flippedImage = UIImage(CGImage: picture.CGImage!, scale: picture.scale, orientation: .leftMirrored)
let timage = crop(image, withWidth: 100, andHeight: 100)
let photoSecondVC = self.storyboard?.instantiateViewController(withIdentifier: "uploadSecondVC") as! UploadSecondViewController
photoSecondVC.imageData = timage!
photoSecondVC.isFront = isFront
self.navigationController?.pushViewController(photoSecondVC, animated: false)
} else {
print("some error here")
}
}
}
This is what I have when the App crashes (I don't know if there is a link):
2017-08-25 16:50:36.125052+0200 Fitshare[93231:3349636] Failed to set (keyPath) user defined inspected property on (UITabBarItem): [ setValue:forUndefinedKey:]: this class is not key value coding-compliant for the key keyPath.
2017-08-25 16:50:36.125584+0200 Fitshare[93231:3349636] Failed to set (keyPath) user defined inspected property on (UITabBarItem): [ setValue:forUndefinedKey:]: this class is not key value coding-compliant for the key keyPath.
2017-08-25 16:50:36.300650+0200 Fitshare[93231:3349636] [MC] Lazy loading NSBundle MobileCoreServices.framework
2017-08-25 16:50:36.302462+0200 Fitshare[93231:3349636] [MC] Loaded MobileCoreServices.framework
2017-08-25 16:50:36.311211+0200 Fitshare[93231:3349636] [MC] System group container for systemgroup.com.apple.configurationprofiles path is /Users/kevinboirel/Library/Developer/CoreSimulator/Devices/B48E7503-47C0-4A75-AC5C-C3DEF6CC8507/data/Containers/Shared/SystemGroup/systemgroup.com.apple.configurationprofiles
2017-08-25 16:50:36.363022+0200 Fitshare[93231:3349636] [Snapshotting] Snapshotting a view (0x7fbb38f2b0c0, Fitshare.ALThreeCircleSpinner) that has not been rendered at least once requires afterScreenUpdates:YES.
2017-08-25 16:50:36.469416+0200 Fitshare[93231:3349636] +[CATransaction synchronize] called within transaction
2017-08-25 16:50:36.469800+0200 Fitshare[93231:3349636] +[CATransaction synchronize] called within transaction
And the weird fact it's that I have this message error in XCode:
You need two protocol delegates, which are: UIImagePickerControllerDelegate and UINavigationControllerDelegate
// MARK: - Global Declaration
#IBOutlet var imgProfile: UIImageView!
var imagePicker = UIImagePickerController()
// MARK: - Camera Methods
func PickingImageFromCamera()
{
let picker = UIImagePickerController()
picker.delegate = self
picker.allowsEditing = false
picker.sourceType = .camera
picker.cameraCaptureMode = .photo
present(picker, animated: true, completion: nil)
}
//----------------------------------
func imagePickerController(_ picker: UIImagePickerController,
didFinishPickingMediaWithInfo info: [String : Any])
{
if let pickedImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
imgProfile.contentMode = .scaleToFill
imgProfile.image = pickedImage
}
dismiss(animated: true, completion: nil)
}
//----------------------------------
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
dismiss(animated: true, completion: nil)
}
//----------------------------------
I think this code will help you...
It was due to the UploadSecondViewController ...
I just resized the picture sent to the second ViewController to a smaller size and the app is now ok !
Thanks for your answers !

AVCaptureVideoPreviewLayer looks different than captured Image

The image presented in my PreviewLayer has decent color/light levels, but when I capture the image, and put it in a UIImageView it looks super dark.
Why are the light levels so different?
And here's the controller code
//Parent Class
class CameraViewController: UIViewController {
#IBOutlet var photoPreviewView: UIView!
let captureSession = AVCaptureSession()
var previewLayer: AVCaptureVideoPreviewLayer?
var captureDevice: AVCaptureDevice?
var imageData: NSData?
override func viewDidLoad() {
super.viewDidLoad()
captureSession.sessionPreset = AVCaptureSessionPresetMedium
let devices = AVCaptureDevice.devices()
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice
}
}
}
if captureDevice != nil {
beginSession()
}
}
func captureImage(callback:(NSData)->()) {
let stillImageOutput = AVCaptureStillImageOutput()
captureSession.addOutput(stillImageOutput)
let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo)
if videoConnection != nil {
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) { (imageDataSampleBuffer, error) -> Void in
callback(AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageDataSampleBuffer))
}
} else {
println("couldn't find video connection")
}
}
func beginSession() {
var err : NSError? = nil
let captureDeviceInput = AVCaptureDeviceInput(device: captureDevice, error: &err)
captureSession.addInput(captureDeviceInput)
if err != nil {
println("error: \(err?.localizedDescription)")
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
photoPreviewView.layer.addSublayer(previewLayer)
//FIXME Preview layer is not being positioned as expected. This is an arbitrary hack to make it "look right" on my iphone6
previewLayer.frame = CGRect(x: -64, y: 0, width: 504, height: 504)
captureSession.startRunning()
}
}
class SubmitGuessViewController: CameraViewController {
override func prepareForSegue(segue: UIStoryboardSegue, sender: AnyObject!) -> Void {
super.prepareForSegue(segue, sender: sender)
let createImageFromData = {(imageData: NSData) -> () in
if ( imageData.length > 0 ) {
let checkGuessViewController = segue.destinationViewController as CheckGuessViewController
checkGuessViewController.submittedImage = UIImage(data: imageData)
} else {
println("Image Data not captured")
}
}
self.captureImage(createImageFromData)
}
}

Resources