Depth capture for iPhone X back camera not working - ios

I'm running into an issue where isDepthDataDeliveryEnabled is false for me on an iPhone X and using .builtInDualCamera.
The weird thing is that this is only happening on one specific iPhone X. I've tested on other iPhone Xs and iPhone 11s, and everything works as expected. All devices I've tested on have been running iOS 13.3.
Just wanted to see if there might be any device-specific reason why capturing depth data might not be enabled on a device.
lazy var frontCamera: AVCaptureDevice? = {
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes:
[.builtInDualCamera, .builtInDualWideCamera, .builtInUltraWideCamera, .builtInTelephotoCamera, .builtInWideAngleCamera, .builtInTrueDepthCamera, .builtInTripleCamera],
mediaType: .video, position: .back)
let devices = discoverySession.devices
for device in devices {
print("\(device) supports \(device.activeFormat.supportedDepthDataFormats)")
}
let depthSupportedDevices = devices.filter({ return $0.activeFormat.supportedDepthDataFormats.count != 0 })
print("depth supported devices are: \(depthSupportedDevices)")
return depthSupportedDevices.first
}()
func sessionPrepare() {
guard let captureDevice = frontCamera else { return }
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice)
session.beginConfiguration()
session.sessionPreset = .photo
if session.canAddInput(deviceInput) {
session.addInput(deviceInput)
}
if session.canAddOutput(output) {
session.addOutput(output)
output.isDepthDataDeliveryEnabled = output.isDepthDataDeliverySupported
output.isPortraitEffectsMatteDeliveryEnabled = output.isPortraitEffectsMatteDeliverySupported
}
session.commitConfiguration()
} catch {
self.dismiss(animated: true, completion: nil)
}
}

Related

AVFoundation's ultra-wide-angle camera behaves differently than the default apple camera

Thanks for reading.
I am creating a custom camera using AVFoundation.
When I maximize the wide angle with the ultra wide angle camera (when videoZoomFactor is minimized), the wide angle field of view is narrower compared to the apple default camera.
Looking at the metadata from the album, the focal length is 13mm for the apple default camera, while it is 16mm for the one I created. Below is an excerpt of the code.
Camera Settings
if let captureDevice = AVCaptureDevice.default(
.builtInTripleCamera,
for: .video,
position: .back
) {
self.captureDevice = captureDevice
} else if let captureDevice = AVCaptureDevice.default(
.builtInDualWideCamera,
for: .video,
position: .back
) {
self.captureDevice = captureDevice
} else if let captureDevice = AVCaptureDevice.default(
.builtInWideAngleCamera,
for: .video,
position: .back
) {
self.captureDevice = captureDevice
}
do {
let input = try AVCaptureDeviceInput(device: captureDevice)
let videoDataOutput = AVCaptureVideoDataOutput()
// Omitted
photoOutput = AVCapturePhotoOutput()
guard let photoOutput = photoOutput else { return }
photoOutput.isHighResolutionCaptureEnabled = true
session.sessionPreset = .photo
// Omitted
} catch {
}
for connection in session.connections {
connection.preferredVideoStabilizationMode = .cinematicExtended
}
zoom function
func zoom(zoomFactor: CGFloat, ramping: Bool = false) {
do {
try captureDevice?.lockForConfiguration()
self.zoomFactor = zoomFactor
if ramping {
captureDevice?.ramp(toVideoZoomFactor: zoomFactor, withRate: 10.0)
} else {
captureDevice?.videoZoomFactor = zoomFactor
}
captureDevice?.unlockForConfiguration()
} catch {
errorReportingService.reportError(error: error)
}
}
Test devices: iPhone 11, 12mini
Thanks for reading this far. I want to make it as wide angle as the apple default camera!
This app allows for a wider angle than the one I created. So I believe there is a way.

Crash when using front camera ONLY on pre-iPhone 7 devices

I've recently started running beta on my camera-based app. Everything is working as expected except on iPhone 6 devices.
The session starts on the back camera, and each time an iPhone 6 user switches to the front camera the app crashes. (And just to be really clear: no one on any other iPhone model is experiencing the issue.) I've gotten my hands on a 6 to test and can consistently reproduce the error, resulting in libc++abi.dylib: terminating with uncaught exception of type NSException.
I've tried starting the session on the front camera and it crashes immediately.
func initializeCamera() {
self.captureSession.sessionPreset = .hd1920x1080
let discovery = AVCaptureDevice.DiscoverySession.init(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera],
mediaType: .video,
position: .unspecified) as AVCaptureDevice.DiscoverySession
for device in discovery.devices as [AVCaptureDevice] {
if device.hasMediaType(.video) {
if device.position == AVCaptureDevice.Position.front {
videoCaptureDevice = device
do {
try currentDeviceInput = AVCaptureDeviceInput(device: device)
} catch {
print("error: \(error.localizedDescription)")
}
}
}
}
if videoCaptureDevice != nil {
do {
let videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice!)
captureSession.addInput(videoInput)
if let audioInput = AVCaptureDevice.default(for: .audio) {
try captureSession.addInput(AVCaptureDeviceInput(device: audioInput))
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
guard let previewLayer = previewLayer else { return }
cameraPreviewView.frame = cameraContainer.frame
cameraPreviewView.layer.addSublayer(previewLayer)
previewLayer.frame = cameraPreviewView.frame
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
setVideoOrientation()
captureSession.addOutput(movieFileOutput)
if let movieFileOutputConnection = movieFileOutput.connection(with: .video) {
if movieFileOutputConnection.isVideoStabilizationSupported {
movieFileOutputConnection.preferredVideoStabilizationMode = .cinematic
}
}
captureSession.startRunning()
sessionIsReady(true)
} catch {
print("error: \(error.localizedDescription)")
}
}
}
func setVideoOrientation() {
if let connection = self.previewLayer?.connection {
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
previewLayer?.frame = cameraContainer.bounds
}
}
}
The crash is triggered at captureSession.addInput(videoInput). videoInput is not nil. The camera's orientation is locked to portrait.
Can anyone offer any insight? Please let me know if any additional code would be helpful. Thanks in advance.
captureSession.addInput(videoInput) is causing the crash.
So you should use canAddInput(_:) before to avoid the crash.
if captureSession.canAddInput(videoInput) {
captureSession.addInput(videoInput)
}
And in your case, captureSession.canAddInput(videoInput) == false with that iPhone 6.
Now, you are also doing self.captureSession.sessionPreset = .hd1920x1080
But according to WikiPedia, the iPhone 6 Front Camera hardware supports
camera 1.2 MP (1280×960 px max.), 720p video recording (30 fps). Doesn't seem to fit the 1920*1080 ("Full HD").
You could do this check what the "max" AVCaptureSession.Preset you can use.
func setSessionPreset(forDevice device: AVCaptureDevice) {
let videoPresets: [AVCaptureSession.Preset] = [.hd4K3840x2160, .hd1920x1080, .hd1280x720] //etc. Put them in order to "preferred" to "last preferred"
let preset = videoPresets.first(where: { device.supportsSessionPreset($0) }) ?? .hd1280x720
captureSession.sessionPreset = preset
}

AVCaptureSession freezes when torch is turned on

We have barcode scanning functionality in our iOS app and we give the customer the ability to toggle the torch on and off as needed. On the iPhone X (and only on the iPhone X) when the AvCaptureSession is running and the torch is enabled, the video capture on the screen freezes. As soon as the torch is turned off again the video capture starts again. Has anyone run into this? I can't seem to find anything that points to a work around. Wondering if this a iPhone X bug?
I ran into this issue. After some experimentation, it turned out that obtaining the device to configure the torch must be done in the exact same way that you obtain the device when you configure your AVCaptureSession. E.G.:
let captureSession = AVCaptureSession()
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Couldn't get a camera")
return
}
do {
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession!.addInput(input)
} catch {
print(error)
return
}
Use that exact method for obtaining the device when toggling the torch (flashlight) on and off. In this case, the lines:
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let device = deviceDiscoverySession.devices.first
Example:
func toggleTorch() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
guard let device = deviceDiscoverySession.devices.first
else {return}
if device.hasTorch {
do {
try device.lockForConfiguration()
let on = device.isTorchActive
if on != true && device.isTorchModeSupported(.on) {
try device.setTorchModeOn(level: 1.0)
} else if device.isTorchModeSupported(.off){
device.torchMode = .off
} else {
print("Torch mode is not supported")
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
I realize some code may be superfluous in the toggleTorch function, but I'm leaving it. Hope this helps.

How to get front camera, back camera and audio with AVCaptureDeviceDiscoverySession

Before iOS 10 came out I was using the following code to get the video and audio capture for my video recorder:
for device in AVCaptureDevice.devices()
{
if (device as AnyObject).hasMediaType( AVMediaTypeAudio )
{
self.audioCapture = device as? AVCaptureDevice
}
else if (device as AnyObject).hasMediaType( AVMediaTypeVideo )
{
if (device as AnyObject).position == AVCaptureDevicePosition.back
{
self.backCameraVideoCapture = device as? AVCaptureDevice
}
else
{
self.frontCameraVideoCapture = device as? AVCaptureDevice
}
}
}
When iOS 10 finally came out, I received the following warning when I was running my code. Note that my video recorder was still working smoothly for about 2 weeks.
'devices()' was deprecated in iOS 10.0: Use AVCaptureDeviceDiscoverySession instead.
As I was running my code this morning, my video recorder stopped working. xCode8 does not give me any errors but the previewLayer for the camera capture is completely white. When I then start recording I receive the following error:
Error Domain=AVFoundationErrorDomain Code=-11800 "The operation could not be completed" UserInfo={NSLocalizedDescription=The operation could not be completed, NSUnderlyingError=0x17554440 {Error Domain=NSOSStatusErrorDomain Code=-12780 "(null)"}, NSLocalizedFailureReason=An unknown error occurred (-12780)}
I believe that has something to do with the fact that I am using the deprecated approach AVCaptureDevice.devices(). Hence, I was wondering how to use AVCaptureDeviceDiscoverySession instead?
Thank you for your help in advance!
You can get the front camera with the following:
AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .front)
The back camera:
AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .back)
And the microphone:
AVCaptureDevice.default(.builtInMicrophone, for: AVMediaType.audio, position: .unspecified)
Here's my code (Swift 3) to get camera position :
// Find a camera with the specified AVCaptureDevicePosition, returning nil if one is not found
func cameraWithPosition(_ position: AVCaptureDevicePosition) -> AVCaptureDevice?
{
if let deviceDescoverySession = AVCaptureDeviceDiscoverySession.init(deviceTypes: [AVCaptureDeviceType.builtInWideAngleCamera],
mediaType: AVMediaTypeVideo,
position: AVCaptureDevicePosition.unspecified) {
for device in deviceDescoverySession.devices {
if device.position == position {
return device
}
}
}
return nil
}
If you want, you can also get the new devicesTypes from iPhone 7+ (dual camera) by changing the deviceTypes array.
Here's a good read : https://forums.developer.apple.com/thread/63347
Swift 4, iOS 10+ and Xcode 10.1 replaces
if let cameraID = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)?.localizedName {
//cameraID = "Front Camera"
}
with AVCaptureDevice.DiscoverySession implementation
if let cameraID = AVCaptureDevice.DiscoverySession.init(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .front).devices.first?.localizedName{
//cameraID = "Front Camera"
}
Need to wrap it with #available(iOS 10,*) check.
It works on Xcode 9.2 and Swift 4
AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .back)
https://developer.apple.com/documentation/avfoundation/avcapturedevice/2361508-default
Swift 3
For selecting the back camera:(also you can change .back as needed)
For selecting another deviceType simple add it inside the [ ] (i.e:
[deviceTypeCamera, AVCaptureDeviceType.builtInMicrophone]
(or create a private let... like I did in the code with the back camera)
private let position = AVCaptureDevicePosition.back
private let deviceTypeBackCamera = AVCaptureDeviceType.builtInWideAngleCamera
private func selectCaptureDevice() -> AVCaptureDevice? {
return AVCaptureDeviceDiscoverySession(deviceTypes: [deviceTypeBackCamera], mediaType: AVMediaTypeVideo, position: position).devices.first
}
example: iOS 11 Swift 4
override func viewDidLoad() {
super.viewDidLoad()
// Get the back-facing camera for capturing videos
// AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .back)
let deviceDiscoverySession = AVCaptureDevice.default(.builtInWideAngleCamera, for: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession else {
print("Failed to get the camera device")
return
}
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice)
// Set the input device on the capture session.
captureSession.addInput(input)
} catch {
// If any error occurs, simply print it out and don't continue any more.
print(error)
return
}
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
// Start video capture.
captureSession.startRunning()
Try below code to get camera ID:
NSString *cameraID = nil;
NSArray *captureDeviceType = #[AVCaptureDeviceTypeBuiltInWideAngleCamera];
AVCaptureDeviceDiscoverySession *captureDevice =
[AVCaptureDeviceDiscoverySession
discoverySessionWithDeviceTypes:captureDeviceType
mediaType:AVMediaTypeVideo
position:AVCaptureDevicePositionUnspecified];
cameraID = [captureDevice.devices.lastObject localizedName];
Swift 4 (xCode 10.1)
This is what worked for me in the latest version of Swift. I did not see this answer, and it took me a while to suss out so here is how to get the front facing camera.
if let device = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera , mediaType: AVMediaTypeVideo, position: .front) {
//Do the camera thing here..
}
Simplified:
func getCamera(with position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let deviceDescoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: .video, position: .unspecified)
return deviceDescoverySession.devices.first(where: { $0.position == position })
}
// and you can use like that:
guard let device = getCamera(with: .back) else { return }
For my video capture app I'm using the following code to get the mic, front and rear camera and I've tested this code from iOS 7 to 10.0.2.
var frontCamera : AVCaptureDevice?
var rearCamera : AVCaptureDevice?
captureSession = AVCaptureSession()
let devices = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
let audioDevices = AVCaptureDevice.devicesWithMediaType(AVMediaTypeAudio)
for mic in audioDevices {
audioDevice = mic as? AVCaptureDevice
audioCapturePossible = true
}
for device in devices {
if device.position == AVCaptureDevicePosition.Front {
frontCamera = device as? AVCaptureDevice
hasFrontCamera = true
}
else if device.position == AVCaptureDevicePosition.Back {
rearCamera = device as? AVCaptureDevice
hasRearCamera = true
}
}
In Swift 5 it is more easy
AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front)
05/2019 :
//video
self.session = AVCaptureSession()
guard
let videoDeviceInput = try? AVCaptureDeviceInput(device: device!),
self.session!.canAddInput(videoDeviceInput)
else { return }
self.session!.addInput(videoDeviceInput)
//audio
guard
let audioDeviceInput = try? AVCaptureDeviceInput(device: mic!),
self.session!.canAddInput(audioDeviceInput)
else { return }
self.session!.addInput(audioDeviceInput)

How to get the front camera in Swift?

I am trying to get the front camera with live view. I am able to get the back camera using:
var backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
But I can't seem to find how to get the front camera. How can I change the code above to use the front camera?
Here is a working example from one of my projects to get the front camera. This is in objective-c but proven to work and easy enough to convert to swift.
NSArray *videoDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
AVCaptureDevice *captureDevice = nil;
for (AVCaptureDevice *device in videoDevices){
if (device.position == AVCaptureDevicePositionFront){
captureDevice = device;
break;
}
}
And in Swift 3.2+:
if let videoDevices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) {
var captureDevice: AVCaptureDevice
for device in videoDevices {
if let device = device as? AVCaptureDevice {
if device.position == AVCaptureDevicePosition.front {
captureDevice = device
break
}
}
}
}
In iOS 10.0 and later, you don't need to iterate through AVCaptureDevice.devices (or devicesWithMediaType) to find a camera by position. (In fact, both of those APIs are deprecated in iOS 10, and don't return the full set of available devices on iPhone 7 Plus, iPhone 8 Plus, or iPhone X.)
If you just need to find a single device based on simple characteristics (like a front-facing camera that can shoot video), just use AVCaptureDevice.defaultDevice(withDeviceType:mediaType:position:). For example:
guard let device = AVCaptureDevice.defaultDevice(
withDeviceType: .builtInWideAngleCamera,
mediaType: AVMediaTypeVideo,
position: .front)
else { fatalError("no front camera. but don't all iOS 10 devices have them?")
// then use the device: captureSession.addInput(device) or whatever
Really that's all there is to it for most use cases.
There's also AVCaptureDeviceDiscoverySession as a replacement for the old method of iterating through the devices array. However, most of the things you'd usually iterate through the devices array for can be found using the new defaultDevice(withDeviceType:mediaType:position:) method, so you might as well use that and write less code.
The cases where AVCaptureDeviceDiscoverySession is worth using are the less common, more complicated cases: say you want to find all the devices that support a certain frame rate, or use key-value observing to see when the set of available devices changes.
By the way, Apple also has a guide to the iOS 10 / Swift 3 photo capture system and some sample code that both show current best practices for these APIs.
guard let device = AVCaptureDevice.devices().filter({ $0.position == .Front })
.first as? AVCaptureDevice else {
fatalError("No front facing camera found")
}
If you're looking for a shorter solution although .filter followed by .first isn't the most efficient
Latests is:
guard let device = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera, for: .video, position: .front) else {
return
}
Swift 4.1, iOS 10+ and Xcode 9.3 replaces
if let cameraID = AVCaptureDevice.defaultDevice(withDeviceType: AVCaptureDeviceType.builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: AVCaptureDevicePosition.front)?.localizedName {
//cameraID = "Front Camera"
}
with AVCaptureDevice.DiscoverySession implementation
if let cameraID = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.front).devices.first?.localizedName{
//cameraID = "Front Camera"
}
var captureSession: AVCaptureSession?
var frontDevice: AVCaptureDevice?
var frontInput: AVCaptureInput?
if let frontDevice = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices.first {
frontInput = try AVCaptureDeviceInput(device: frontDevice)
}
captureSession?.beginConfiguration()
if let front = frontInput, captureSession?.canAddInput(front) == true {
captureSession?.addInput(front)
}
captureSession?.commitConfiguration()
Based on the solution presented by mbuff24.
Array methods include .first(where:) besides .filter()
guard let frontCamera = AVCaptureDevice.devices().first(where: { ($0 as AnyObject).position == .front }) as? AVCaptureDevice else {
fatalError("No front facing camera found")
}
import AVFoundation
import Foundation
#IBOutlet weak var imageView: UIImageView!
let captureSession = AVCaptureSession()
let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
var captureDevice : AVCaptureDevice?
var Arrdata:[Studentdata] = []
override func viewDidLoad() {
super.viewDidLoad()
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice]
{
for device in devices
{
if (device.hasMediaType(AVMediaTypeVideo))
{
if(device.position == AVCaptureDevicePosition.front)
{
captureDevice = device
if captureDevice != nil
{
print("Capture device found")
beginSession()
}
}
}
}
}
}
func beginSession()
{
do
{
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput)
{
captureSession.addOutput(stillImageOutput)
}
}
catch
{
print("error: \(error.localizedDescription)")
}
guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else
{
print("no preview layer")
return
}
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
self.view.addSubview(imageView)
}

Resources