AVCaptureDeviceInput initialization exception - ios

When I try to create an instance of type AVCaptureDeviceInput I get the following error.
What have I done so far:
1) I have gain permissions for camera use and microphone.
2) I have tested the code on an iPhone 7 and an iPhone 5s.
3) When it displays the error I printed of the value of session.isRunning and it returns true value.
4) All the properties are retain strong.
Error Domain=AVFoundationErrorDomain Code=-11814 "Cannot Record" UserInfo={NSLocalizedDescription=Cannot Record, NSLocalizedRecoverySuggestion=Try recording again.
This is the code:
let session = AVCaptureSession()
self.session = session
session.sessionPreset = AVCaptureSessionPresetPhoto
do {
let input = try AVCaptureDeviceInput(device: device)
session.addInput(input)
if session.canAddInput(input) {
let stillImageOutput = AVCapturePhotoOutput()
self.stillImageOutput = stillImageOutput
let settings = AVCapturePhotoSettings()
stillImageOutput.capturePhoto(with: settings, delegate: self)
if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
if let previewLayer = AVCaptureVideoPreviewLayer(session: session) {
self.previewLayer = previewLayer
previewLayer.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer.connection!.videoOrientation = AVCaptureVideoOrientation.portrait
viewController.centerView.layer.insertSublayer(previewLayer, at: 0)
session.startRunning()
}
}
}
} catch {
print(error.localizedDescription)
}

There are several issues I find in your code, so the updated version with the comments on it would look like this:
let session = AVCaptureSession()
self.session = session
// `This method is used to start the flow of data from the inputs to the outputs connected to the AVCaptureSession instance that is the receiver.`
//session.startRunning() // Don't startRunning until everything is configured
session.sessionPreset = AVCaptureSessionPresetPhoto
do {
let input = try AVCaptureDeviceInput(device: device)
// session.addInput(input) // This one has to be after you check if you `canAddInput`
if session.canAddInput(input) {
session.addInput(input)
let stillImageOutput = AVCapturePhotoOutput()
self.stillImageOutput = stillImageOutput
let settings = AVCapturePhotoSettings()
// stillImageOutput.capturePhoto(with: settings, delegate: self) // This one might want to be called after you add it as an output to the `session`
if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
if let previewLayer = AVCaptureVideoPreviewLayer(session: session) {
self.previewLayer = previewLayer
previewLayer.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer.connection!.videoOrientation = AVCaptureVideoOrientation.portrait
viewController.centerView.layer.insertSublayer(previewLayer, at: 0)
session.startRunning()
}
stillImageOutput.capturePhoto(with: settings, delegate: self)
}
}
} catch {
print(error.localizedDescription)
}

Related

the specified colorspace format is not supported. falling back on libyuv

I'm currently working on a camera app. Everything worked fine, but when I tried to change the constraints of the Vision View the log suddenly printed this error.
[warning]the specified colorspace format is not supported. falling back on libyuv.
I have no Idea where it comes from and what I should change. Below I'll past the relevant code where I set up the camera.
func initializeCameraSession() {
// Set up Values
//1: Create a new AV Session
// , xf , AVCaptureVideoDataOutputSampleBufferDelegate // Get camera devices
let devices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices
//2: Select a capture device
avSession.sessionPreset = .low
do {
if let captureDevice = devices.first {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
}
avSession.beginConfiguration()
if avSession.canAddInput(captureDeviceInput) {
avSession.addInput(captureDeviceInput)
self.videoDeviceInput = captureDeviceInput
} else {
print("Couldn't add video device input to the session.")
avSession.commitConfiguration()
return
}
avSession.commitConfiguration()
}
} catch {
print(error.localizedDescription)
}
//3: Show output on a preview layer
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as String) : NSNumber(value: kCVPixelFormatType_32BGRA )]
avSession.addOutput(captureOutput)
let previewLayer = AVCaptureVideoPreviewLayer(session: avSession)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = visionView.bounds
visionView.layer.addSublayer(previewLayer)
view.bringSubviewToFront(visionView)
visionView.isHidden = true
visionView.alpha = 0.0
avSession.startRunning()
}
}

Custom camera view with UIImage as new overlay

I want to make a custom cameraView overlay. I want to use the overlay which is an image as a template. But the rect of clear space will change depends on phone.
Template:
I tried to create a view as container behind image. But the image that got captured will include the part that I dont want
self.session = AVCaptureSession()
self.session!.sessionPreset = AVCaptureSession.Preset.photo
let backCamera = AVCaptureDevice.default(for: AVMediaType.video)
var error: NSError?
var input: AVCaptureDeviceInput!
do {
input = try AVCaptureDeviceInput(device: backCamera!)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
if error == nil && session!.canAddInput(input) {
session!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if session!.canAddOutput(stillImageOutput!) {
session!.addOutput(stillImageOutput!)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session!)
videoPreviewLayer!.videoGravity = AVLayerVideoGravity.resizeAspect
videoPreviewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
previewView.layer.addSublayer(videoPreviewLayer!)
session!.startRunning()
}
}
//inviewdidload
videoPreviewLayer!.frame = previewView.bounds//previewView is uiview behind image
The expected result is that the white space would be the custom camera view. also it seems that AVCaptureStillImageOutput was deprecated in iOS 10.0

How to implement 2x zoom from camera app with AVCaptureVideoPreviewLayer

I have a AVCaptureVideoPreviewLayerin my app that works well and is showing the same preview video as the camera app. I would like to implement the 2x zoom functionality of the camera app. How do I do this?
Basically I want my previewlayer to change the video feed to same scale as what you see in the camera app when you tap on the 1x icon to change it to 2x.
setting up preview layer
func startSession(){
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
// Catch error using the do catch block
do {
let input = try AVCaptureDeviceInput(device: backCamera)
if (captureSession?.canAddInput(input) != nil){
captureSession?.addInput(input)
// Setup the preview layer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.portrait
tempImageView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
// Set up AVCaptureVideoDataOutput
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA as UInt32)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if (captureSession?.canAddOutput(dataOutput) == true) {
captureSession?.addOutput(dataOutput)
}
let queue = DispatchQueue(label: "com.bigbob.videoQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
} catch _ {
print("Error setting up camera!")
}
Set the videoZoomFactor of your AVCaptureDevice.defaultDevice and the preview layer's zoom will follow suit. Note Swift 4 it is now called AVCaptureDevice.default.
do {
try backCamera?.lockForConfiguration()
let zoomFactor:CGFloat = 2
backCamera?.videoZoomFactor = zoomFactor
backCamera?.unlockForConfiguration()
} catch {
//Catch error from lockForConfiguration
}
Here's a bit of an updated answer that fist checks to make sure the zoom factor is available before you even attempt to set it. The will prevent possibly unneeded exception catches and you can adjust the zoom check and set easily with one variable.
if let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) {
let zoomFactor : CGFloat = 2
if (captureDevice.maxAvailableVideoZoomFactor >= zoomFactor) {
try? captureDevice.lockForConfiguration()
captureDevice.videoZoomFactor = zoomFactor
captureDevice.unlockForConfiguration()
}
}

No error printed however function doesn't run

I am attempting to make a camera view appear, as you can see in my code below I have it set up to display any errors and not break the program if any errors occur, however when I do run this code no error occurs or camera view is displayed. I am running it on an actual phone and the phone did request if it had permission to use the camera. Below is the code
override func viewWillAppear(animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPreset1920x1080
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var input : AVCaptureDeviceInput?
let error : NSError?
do {
input = try AVCaptureDeviceInput(device: backCamera)
} catch let error as NSError? {
print(error)
if error == nil && (captureSession?.canAddInput(input))!{
captureSession?.addInput(input)
videoOutput = AVCaptureVideoDataOutput()
//videoOutput?.outputSettings = [AVVideoCodecKey : AVVideoCodecKey]
if ((captureSession?.canAddOutput(videoOutput)) != nil){
captureSession?.addOutput(videoOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.Portrait
cameraView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
}
}
}
}
The body of catch is executed only if an error has occurred. so since your code is inside the catch block, it won't execute as it doesn't find any error.
So remove the code from the catch block and put it outside as suggested by #penatheboss.
Don't put code in the catch. That is if something goes wrong.
do {
input = try AVCaptureDeviceInput(device: backCamera)
} catch let error as NSError? {
print(error)
return//Stop rest of code
}
if (captureSession?.canAddInput(input))!{
captureSession?.addInput(input)
videoOutput = AVCaptureVideoDataOutput()
//videoOutput?.outputSettings = [AVVideoCodecKey : AVVideoCodecKey]
if ((captureSession?.canAddOutput(videoOutput)) != nil){
captureSession?.addOutput(videoOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.Portrait
cameraView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
}
}

iOS Swift - AVCaptureSession - Capture frames respecting frame rate

I'm trying to build an app which will capture frames from the camera and process them with OpenCV before saving those files to the device, but at a specific frame rate.
What I'm stuck on at the moment is the fact that AVCaptureVideoDataOutputSampleBufferDelegate doesn't appear to respect the AVCaptureDevice.activeVideoMinFrameDuration, or AVCaptureDevice.activeVideoMaxFrameDuration settings.
captureOutput runs far quicker than 2 frames per second as the above settings would indicate.
Do you happen to know how one could achieve this, with or without the delegate?
ViewController:
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidAppear(animated: Bool) {
setupCaptureSession()
}
func setupCaptureSession() {
let session : AVCaptureSession = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPreset1280x720
let videoDevices : [AVCaptureDevice] = AVCaptureDevice.devices() as! [AVCaptureDevice]
for device in videoDevices {
if device.position == AVCaptureDevicePosition.Back {
let captureDevice : AVCaptureDevice = device
do {
try captureDevice.lockForConfiguration()
captureDevice.activeVideoMinFrameDuration = CMTimeMake(1, 2)
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(1, 2)
captureDevice.unlockForConfiguration()
let input : AVCaptureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
if session.canAddInput(input) {
try session.addInput(input)
}
let output : AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
let dispatch_queue : dispatch_queue_t = dispatch_queue_create("streamoutput", nil)
output.setSampleBufferDelegate(self, queue: dispatch_queue)
session.addOutput(output)
session.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.connection.videoOrientation = .LandscapeRight
let previewBounds : CGRect = CGRectMake(0,0,self.view.frame.width/2,self.view.frame.height+20)
previewLayer.backgroundColor = UIColor.blackColor().CGColor
previewLayer.frame = previewBounds
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.imageView.layer.addSublayer(previewLayer)
self.previewMat.frame = CGRectMake(previewBounds.width, 0, previewBounds.width, previewBounds.height)
} catch _ {
}
break
}
}
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
self.wrapper.processBuffer(self.getUiImageFromBuffer(sampleBuffer), self.previewMat)
}
So I've figured out the problem.
In the comments section for AVCaptureDevice.h above the activeVideoMinFrameDuration property it states:
On iOS, the receiver's activeVideoMinFrameDuration resets to its
default value under the following conditions:
The receiver's activeFormat changes
The receiver's AVCaptureDeviceInput's session's sessionPreset changes
The receiver's AVCaptureDeviceInput is added to a session
The last bullet point was causing my problem, so doing the following solved the problem for me:
do {
let input : AVCaptureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
if session.canAddInput(input) {
try session.addInput(input)
}
try captureDevice.lockForConfiguration()
captureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: 2)
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: 2)
captureDevice.unlockForConfiguration()
let output : AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
let dispatch_queue : dispatch_queue_t = dispatch_queue_create("streamoutput", nil)
output.setSampleBufferDelegate(self, queue: dispatch_queue)
session.addOutput(output)

Resources