Hi I am getting this error.
It should be because of this code (it should switch between front and back camera in my custom camera). I am able to take a picture and everything works fine except this code...
#IBAction func switchCamera(sender: UIButton) {
var session:AVCaptureSession!
let currentCameraInput: AVCaptureInput = session.inputs[0] as! AVCaptureInput
session.removeInput(currentCameraInput)
do {
let newCamera: AVCaptureDevice?
if(captureDevice!.position == AVCaptureDevicePosition.Back){
print("Setting new camera with Front")
newCamera = self.cameraWithPosition(AVCaptureDevicePosition.Front)
} else {
print("Setting new camera with Back")
newCamera = self.cameraWithPosition(AVCaptureDevicePosition.Back)
}
let error = NSError?()
let newVideoInput = try AVCaptureDeviceInput(device: newCamera)
if (error == nil && captureSession?.canAddInput(newVideoInput) != nil) {
session.addInput(newVideoInput)
} else {
print("Error creating capture device input")
}
session.commitConfiguration()
captureDevice! = newCamera!
} catch let error as NSError {
// Handle any errors
print(error)
}
}
Thanks.
Related
I am trying to record depth data from the TrueDepth camera along with a photo. But when calling
AVCapturePhotoOutput capturePhoto(withSettings,delegate)
I get an exception stating:
No active and enabled video connection
I configure the camera and outputs like so (basically following the guide from Apple about photo capturing and capturing depth):
func configurePhotoOutput() throws {
self.captureSession = AVCaptureSession()
guard self.captureSession != nil else {
return
}
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice.lockForConfiguration()
videoDevice.activeDepthDataFormat = depthFormat
videoDevice.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
self.captureSession!.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
self.captureSession!.addInput(videoDeviceInput)
// add video output
if self.captureSession!.canAddOutput(videoOutput) {
self.captureSession!.addOutput(videoOutput)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
} else { fatalError("Can't add video output.") }
// Set up photo output for depth data capture.
let photoOutput = AVCapturePhotoOutput()
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
guard self.captureSession!.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession!.addOutput(photoOutput)
self.captureSession!.sessionPreset = .photo
self.captureSession!.commitConfiguration()
self.captureSession!.startRunning()
}
And the code responsible for capturing the photo:
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled =
self.photoOutput.isDepthDataDeliverySupported
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate) // <---- error is being thrown on this call
self.photoCaptureCompletionBlock = completion
}
What I am I doing wrong in this configuration?
solved it with the following implementation:
Any comments / remarks are highly appreciated!
import AVFoundation
import UIKit
class CameraController: NSObject {
var captureSession: AVCaptureSession?
var videoDevice: AVCaptureDevice?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoOutput = AVCaptureVideoDataOutput()
var photoOutput = AVCapturePhotoOutput()
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
// Select a depth-capable capture device.
guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
videoDevice = vd
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice!.lockForConfiguration()
videoDevice!.activeDepthDataFormat = depthFormat
videoDevice!.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
}
func configureDeviceInputs() throws {
if( captureSession == nil) {
throw CameraControllerError.captureSessionIsMissing
}
captureSession?.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
captureSession!.addInput(videoDeviceInput)
captureSession?.commitConfiguration()
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
captureSession.beginConfiguration()
// Set up photo output for depth data capture.
photoOutput = AVCapturePhotoOutput()
photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)
guard captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
captureSession.addOutput(photoOutput)
// must be set after photoOutput is added to captureSession. Why???
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
captureSession.sessionPreset = .photo
captureSession.commitConfiguration()
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
self.photoCaptureCompletionBlock = completion
}
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraController {
public enum CameraPosition {
case front
case rear
}
enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
}
///before toggle,I exactly sure isRunning == true
func toggleCamera(){
let first:TimeInterval = Date().timeIntervalSince1970
let currentVideoDevice = self.videoInput.device
///////////////begin to switch
self.captureSession.beginConfiguration()
self.captureSession.removeInput(self.videoInput)
if self.cameraDeviceType == .back {
self.cameraDeviceType = .front
self.inputCamera = self.frontDevice
}else{
self.cameraDeviceType = .back
self.inputCamera = self.backDevice
}
do {
self.videoInput = try AVCaptureDeviceInput(device:self.inputCamera)
} catch {
print(error)
}
if self.captureSession.canAddInput(self.videoInput) {
NotificationCenter.default.removeObserver(self, name: .AVCaptureDeviceSubjectAreaDidChange, object: currentVideoDevice)
NotificationCenter.default.addObserver(self, selector: #selector(self.subjectAreaDidChange), name: .AVCaptureDeviceSubjectAreaDidChange, object: self.videoInput.device)
self.captureSession.addInput(self.videoInput)
} else {
self.captureSession.addInput(self.videoInput)
}
self.captureSession.commitConfiguration()
if let connection = self.videoOutput?.connection(withMediaType: "video") {
if connection.isVideoStabilizationSupported {
connection.preferredVideoStabilizationMode = .auto
}
connection.isEnabled = false
connection.isEnabled = true
}
let second:TimeInterval = Date().timeIntervalSince1970
print("turnAroundInnerCost:",second-first)
}
////log: turnAroundInnerCost: 0.431715965270996
///the running time is soon,but the interface switch is slow,about 5s
So, every toggle you recreate your camera, reconfigure devices, enable/disable connection, etc. Try to move your camera configuration logic to other function and call it once for example in viewDidLoad().
Switching between cameras can be:
func switchToFrontCamera() throws {
guard let inputs = captureSession.inputs as? [AVCaptureInput], let rearCameraInput = self.rearCameraInput, inputs.contains(rearCameraInput),
let frontCamera = self.frontCamera else { throw CameraError.invalidOperation }
self.frontCameraInput = try AVCaptureDeviceInput(device: frontCamera)
captureSession.removeInput(rearCameraInput)
if captureSession.canAddInput(self.frontCameraInput) {
captureSession.addInput(self.frontCameraInput)
self.currentCameraPosition = .front
}
else {
throw CameraError.invalidOperation
}
}
func switchToRearCamera() throws {
guard let inputs = captureSession.inputs as? [AVCaptureInput], let frontCameraInput = self.frontCameraInput, inputs.contains(frontCameraInput),
let rearCamera = self.rearCamera else { throw CameraError.invalidOperation }
self.rearCameraInput = try AVCaptureDeviceInput(device: rearCamera)
captureSession.removeInput(frontCameraInput)
if captureSession.canAddInput(self.rearCameraInput) {
captureSession.addInput(self.rearCameraInput)
self.currentCameraPosition = .rear
}
else { throw CameraError.invalidOperation }
}
and then you can call
switch currentCameraPosition {
case .front:
try switchToRearCamera()
case .rear:
try switchToFrontCamera()
}
//create captureSession once in viewDid(),but this func was running still slow when i changed the camera from rear to front
let frontDevice = AVCaptureDevice.devices(withMediaType:AVMediaTypeVideo).map { $0 as! AVCaptureDevice }.filter { $0.position == .front}.first!
let backDevice = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo).map { $0 as! AVCaptureDevice }.filter { $0.position == .back}
.first!
public func turnAroundCamera() {
sessionQueue.async {
let first:TimeInterval = Date().timeIntervalSince1970
let oldVedioInput = self.videoInput
//self.captureSession.beginConfiguration()
self.captureSession.removeInput(self.videoInput)
if self.cameraDeviceType == .back {
self.cameraDeviceType = .front
self.inputCamera = self.frontDevice
}else{
self.cameraDeviceType = .back
self.inputCamera = self.backDevice
}
do {
self.videoInput = try AVCaptureDeviceInput(device:self.inputCamera)
} catch {
print(error)
}
if self.captureSession.canAddInput(self.videoInput) {
self.captureSession.addInput(self.videoInput)
}else{
self.captureSession.addInput(oldVedioInput)
}
//self.captureSession.commitConfiguration()
let second:TimeInterval = Date().timeIntervalSince1970
print("turnAroundInnerCost:",second-first)
}
}
More info,Log turnAroundInnerCost: 0.245857000350952
Actually the function turnAroundCamera() run fast when i called it,but the captureOutput() run slow (about 5s) behind the function turnAroundCamera() end.It's time expensive especially when i try to turn around the camera from rear to front .So what i try to do (enable/disable) is to flush the session which hope to flush the captureOutput.....
When I set the flashmode for my front camera and then call
let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo)
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: process)
I get the following error message:
error while capturing still image: Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could not be completed" UserInfo={NSUnderlyingError=0x12eeb7200 {Error Domain=NSOSStatusErrorDomain Code=-16800 "(null)"}, NSLocalizedFailureReason=An unknown error occurred (-16800), NSLocalizedDescription=The operation could not be completed}
If I don't set the camera's flashMode and then call:
let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo)
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: process)
The front camera takes a picture and doesn't throw the error.
Currently, this is how I set up my camera:
func getCameraStreamLayer() -> CALayer? {
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetPhoto
currentCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput!.outputSettings = [ AVVideoCodecKey: AVVideoCodecJPEG ]
if let input = try? AVCaptureDeviceInput(device: currentCamera) as AVCaptureDeviceInput{
if captureSession!.canAddInput(input) && captureSession!.canAddOutput(stillImageOutput) {
captureSession!.addInput(input)
captureSession!.addOutput(stillImageOutput)
}
}
return AVCaptureVideoPreviewLayer(session: captureSession)
}
func toggleFlash() {
flash = !flash
if flash {
for case let (device as AVCaptureDevice) in AVCaptureDevice.devices() {
if device.hasFlash && device.flashAvailable {
if device.isFlashModeSupported(.On) {
do {
try device.lockForConfiguration()
device.flashMode = .On
device.unlockForConfiguration()
} catch {
print("Something went wrong")
}
}
}
}
}else {//turn off flash
}
}
func photograph(process: (CMSampleBuffer!,NSError!)->()) {
let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo)
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: process)
}
func flipCamera() {
guard let session = captureSession where session.running == true else {
return
}
session.beginConfiguration()
let currentCameraInput = session.inputs[0] as! AVCaptureDeviceInput
session.removeInput(currentCameraInput)
let newCamera = {
let devices = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
for case let device as AVCaptureDevice in devices {
if(device.position == .Front && currentCameraInput.device.position == .Back){
return device
}
if(device.position == .Back && currentCameraInput.device.position == .Front){
return device
}
}
return nil
}() as AVCaptureDevice?
currentCamera = newCamera!
if let newVideoInput = try? AVCaptureDeviceInput(device: newCamera) {
captureSession?.addInput(newVideoInput)
}
captureSession?.commitConfiguration()
}
I'm not sure what I should do. I've tried to create a new capture session and then lock and then set the flashMode for the camera. I still get the same error.
I'm creating a customView for the CameraView, which works fine however i'm now working on changing from the back camera to the front camera. i've at the moment done it by doing below. However this seem to create a bad user experience where it removes the previewLayer (the screen becomes white) and then show the front camera correctly. is there a way to create a better user experience by not making everything white in 1 sec before showing the new session?
switchCamera
func switchCamera() {
if usingbackCamera == true {
endSession()
beginSession(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
} else {
endSession()
beginSession(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
}
beginSession
func beginSession(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
self.previewLayer?.removeFromSuperlayer()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraView.layer.addSublayer(previewLayer!)
previewLayer?.frame = self.cameraView.bounds
captureSession.startRunning()
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
} catch let err as NSError {
print(err)
}
}
endSession
func endSession() {
self.previewLayer?.removeFromSuperlayer()
captureSession.stopRunning()
captureSession = AVCaptureSession()
}
Take Picture
func takePicture() {
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo){
videoConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {
(sampleBuffer, error) in
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProviderCreateWithCFData(imageData)
let cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)
let image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Right)
self.previewImageView.image = image
self.previewImageView.hidden = false
self.cameraView.bringSubviewToFront(self.previewImageView)
})
}
}
You don't need to stop the captureSession and start it again when switching from back to front camera and vice versa.
All you need to do is remove the old capture session inputs, add the new one and all that in between a begin/commit session configuration block.
Here is a rough example:
func switchCamera() {
//begin configuration changes
captureSession.beginConfiguration()
//remove the previous inputs
let inputs = captureSession.inputs as! [AVCaptureInput]
for oldInput:AVCaptureInput in inputs {
captureSession.removeInput(oldInput)
}
//add the new input
if usingbackCamera == true {
addInput(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
}
else {
addInput(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
//end the configuration
captureSession.commitConfiguration()
}
func addInput(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
} catch let err as NSError {
print(err)
}
}
I think you don't have to remove the preview layer when changing the input device.
The layer is bond to the session, all you have to do is stop the session, remove the original input and add the new input then start the session again.
I make my capture view by custom rendering, but I thought the process will be the same.
The capture snippet:
for output in session.outputs {
if let capture = output as? AVCaptureStillImageOutput{
for connection in (capture.connections as! [AVCaptureConnection]){
for port in (connection.inputPorts as! [AVCaptureInputPort]){
if port.mediaType == AVMediaTypeVideo{
capture.captureStillImageAsynchronouslyFromConnection(connection, completionHandler: {(buffer, err) -> Void in
if err != nil{
print(err)
}
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer)
guard let image = CIImage(data: imageData) else{
completion(nil)
return
}
let rotatedImage = image.imageByApplyingTransform(CGAffineTransformMakeRotation(-CGFloat(M_PI_2)))
})
}
}
}
}
}
I was looking how to turn on/off the iPhone's camera flash and I found this:
#IBAction func didTouchFlashButton(sender: AnyObject) {
let avDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// check if the device has torch
if avDevice.hasTorch {
// lock your device for configuration
avDevice.lockForConfiguration(nil)
// check if your torchMode is on or off. If on turns it off otherwise turns it on
if avDevice.torchActive {
avDevice.torchMode = AVCaptureTorchMode.Off
} else {
// sets the torch intensity to 100%
avDevice.setTorchModeOnWithLevel(1.0, error: nil)
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
I do get 2 issues, one on the line:
avDevice.lockForConfiguration(nil)
and the other on the line:
avDevice.setTorchModeOnWithLevel(1.0, error:nil)
both of them are related to exception handling but I don't know how to resolve them.
#IBAction func didTouchFlashButton(sender: UIButton) {
let avDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// check if the device has torch
if avDevice.hasTorch {
// lock your device for configuration
do {
let abv = try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
// check if your torchMode is on or off. If on turns it off otherwise turns it on
if avDevice.torchActive {
avDevice.torchMode = AVCaptureTorchMode.Off
} else {
// sets the torch intensity to 100%
do {
let abv = try avDevice.setTorchModeOnWithLevel(1.0)
} catch {
print("bbb")
}
// avDevice.setTorchModeOnWithLevel(1.0, error: nil)
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
Swift 4 version, adapted from Ivan Slavov's answer. "TorchMode.auto" is also an option if you want to get fancy.
#IBAction func didTouchFlashButton(_ sender: Any) {
if let avDevice = AVCaptureDevice.default(for: AVMediaType.video) {
if (avDevice.hasTorch) {
do {
try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
if avDevice.isTorchActive {
avDevice.torchMode = AVCaptureDevice.TorchMode.off
} else {
avDevice.torchMode = AVCaptureDevice.TorchMode.on
}
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
Swift 5.4 &
Xcode 12.4 &
iOS 14.4.2
#objc private func flashEnableButtonAction() {
guard let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) else {
return
}
if captureDevice.hasTorch {
do {
let _: () = try captureDevice.lockForConfiguration()
} catch {
print("aaaa")
}
if captureDevice.isTorchActive {
captureDevice.torchMode = AVCaptureDevice.TorchMode.off
} else {
do {
let _ = try captureDevice.setTorchModeOn(level: 1.0)
} catch {
print("bbb")
}
}
captureDevice.unlockForConfiguration()
}
}
for some reason "avDevice.torchActive" is always false, even when the torch is on, making it impossible to turn off but I fixed it by declaring a boolean initially set to false and every time the flash turns on, the boolean is set to true.
var on: Bool = false
#IBAction func didTouchFlashButton(sender: UIButton) {
let avDevice = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
// check if the device has torch
if avDevice.hasTorch {
// lock your device for configuration
do {
let abv = try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
// check if your torchMode is on or off. If on turns it off otherwise turns it on
if on == true {
avDevice.torchMode = AVCaptureTorchMode.Off
on = false
} else {
// sets the torch intensity to 100%
do {
let abv = try avDevice.setTorchModeOnWithLevel(1.0)
on = true
} catch {
print("bbb")
}
// avDevice.setTorchModeOnWithLevel(1.0, error: nil)
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
import AVFoundation
var videoDeviceInput: AVCaptureDeviceInput?
var movieFileOutput: AVCaptureMovieFileOutput?
var stillImageOutput: AVCaptureStillImageOutput?
Add a class method to ViewController.
class func setFlashMode(flashMode: AVCaptureFlashMode, device: AVCaptureDevice){
if device.hasFlash && device.isFlashModeSupported(flashMode) {
var error: NSError? = nil
do {
try device.lockForConfiguration()
device.flashMode = flashMode
device.unlockForConfiguration()
} catch let error1 as NSError {
error = error1
print(error)
}
}
}
Check the flashmode status.
// Flash set to Auto/Off for Still Capture
print("flashMode.rawValue : \(self.videoDeviceInput!.device.flashMode.rawValue)")
if(self.videoDeviceInput!.device.flashMode.rawValue == 1)
{
CameraViewController.setFlashMode(AVCaptureFlashMode.On, device: self.videoDeviceInput!.device)
}
else if (self.videoDeviceInput!.device.flashMode.rawValue == 2)
{
CameraViewController.setFlashMode(AVCaptureFlashMode.Auto, device: self.videoDeviceInput!.device)
}
else
{
CameraViewController.setFlashMode(AVCaptureFlashMode.Off, device: self.videoDeviceInput!.device)
}
Another short way is to do this
let devices = AVCaptureDevice.devices()
let device = devices[0]
guard device.isTorchAvailable else { return }
do {
try device.lockForConfiguration()
if device.torchMode == .on {
device.torchMode = .off
}else{
device.torchMode = .on
}
} catch {
debugPrint(error)
}