Depth data always nil - ios

I am trying to capture photos with depth data using the back camera I followed the example on apple website here but the depth data is always nil. I tried this solution on multiple ios devices but I keep getting nil
my code:
func setupCamare(){
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInDualCamera,
for: .video, position: .back)
else { fatalError("No dual camera.") }
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
guard self.captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession.beginConfiguration()
self.captureSession.addInput(videoDeviceInput)
// Set up photo output for depth data capture.
photoOutput.isHighResolutionCaptureEnabled = true
print(photoOutput.isDepthDataDeliverySupported)
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
self.captureSession.addOutput(photoOutput)
self.captureSession.sessionPreset = .photo
self.captureSession.commitConfiguration()
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession.startRunning()
self.view.bringSubviewToFront(self.button)
}
#IBAction func takePhoto(_ sender:UIButton){
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey:AVVideoCodecType.jpeg])
photoSettings.isDepthDataFiltered = photoOutput.isDepthDataDeliveryEnabled;
photoOutput.capturePhoto(with: photoSettings, delegate: self)
}

Related

the specified colorspace format is not supported. falling back on libyuv

I'm currently working on a camera app. Everything worked fine, but when I tried to change the constraints of the Vision View the log suddenly printed this error.
[warning]the specified colorspace format is not supported. falling back on libyuv.
I have no Idea where it comes from and what I should change. Below I'll past the relevant code where I set up the camera.
func initializeCameraSession() {
// Set up Values
//1: Create a new AV Session
// , xf , AVCaptureVideoDataOutputSampleBufferDelegate // Get camera devices
let devices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices
//2: Select a capture device
avSession.sessionPreset = .low
do {
if let captureDevice = devices.first {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
}
avSession.beginConfiguration()
if avSession.canAddInput(captureDeviceInput) {
avSession.addInput(captureDeviceInput)
self.videoDeviceInput = captureDeviceInput
} else {
print("Couldn't add video device input to the session.")
avSession.commitConfiguration()
return
}
avSession.commitConfiguration()
}
} catch {
print(error.localizedDescription)
}
//3: Show output on a preview layer
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as String) : NSNumber(value: kCVPixelFormatType_32BGRA )]
avSession.addOutput(captureOutput)
let previewLayer = AVCaptureVideoPreviewLayer(session: avSession)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = visionView.bounds
visionView.layer.addSublayer(previewLayer)
view.bringSubviewToFront(visionView)
visionView.isHidden = true
visionView.alpha = 0.0
avSession.startRunning()
}
}

Adding a view on top of a AVCapturesession

I am trying to add a button on top of my AVCapturesession preview layer. this button is to capture images (fire the capture function)..however i can not seem to set one up.
func configureCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .unspecified)
for device in deviceDiscoverySession.devices {
if device.position == .back {
rearCam = device
} else if device.position == .front {
frontCam = device
}
}
currentDevice = rearCam
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
// wheelOverlay.image = UIImage(named: "wheel")
// wheelOverlayLayer.contents = wheelOverlay
stillPhotoOutput = AVCapturePhotoOutput()
captureSession.addInput(captureDeviceInput)
captureSession.addOutput(stillPhotoOutput)
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.frame = view.layer.frame
print("Capture session setup")
// view.layer.addSublayer(wheelOverlayLayer)
// view.layer.insertSublayer(wheelOverlayLayer, above: cameraPreviewLayer)
captureSession.startRunning()
}
where abouts do i need to add the subview?
You need to add it to the view after you add the cameraLayer like this
view.layer.addSublayer(cameraPreviewLayer)
let bt = UIButton(type:.system)
bt.setTitle("Record",for:.normal)
bt.frame = /////
view.addSubview(bt)

Preview doesn't show up (AVCaptureVideoPreviewLayer)

I'm trying to first just get my app to show the camera preview of the wide angle camera.
But what I do it just doesn't show up. (the app has video and audio permission and is being tested on an iPhone 7 Plus)
Here's my session configuration code:
func configureSession() throws {
session.beginConfiguration()
// configure inputs
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let audioDevice = AVCaptureDevice.default(.builtInMicrophone, for: .audio, position: .unspecified)
guard let videoInput = try? AVCaptureDeviceInput(device: videoDevice!), let audioInput = try? AVCaptureDeviceInput(device: audioDevice!), session.canAddInput(videoInput), session.canAddInput(audioInput) else {
throw CameraError.configurationFailed
}
// configure outputs
let output = AVCaptureMovieFileOutput()
let previewOutput = AVCaptureVideoDataOutput()
previewOutput.alwaysDiscardsLateVideoFrames = true
previewOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32BGRA)]
guard session.canAddOutput(output) && session.canAddOutput(previewOutput) else {
throw CameraError.configurationFailed
}
session.sessionPreset = .high
session.addOutput(output)
session.addOutput(previewOutput)
session.commitConfiguration()
}
and this is the code for the previewLayer:
// adds given view as previewView to the session
func configureAsPreview(_ view: UIView) {
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.layer.frame
}
and it is being called in viewDidLoad:
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
if cameraManager.hasPermissionForVideo {
try! cameraManager.configureSession()
cameraManager.configureAsPreview(self.previewView)
cameraManager.startSession()
} else {
performSegue(withIdentifier: "presentPermissions", sender: nil)
}
}
What am I doing wrong?
I really can't figure it out and it's probably just something stupidly small.
Thanks.
Try update layer frame when needed, and use bounds instead of frame. Layers dont support autoresizing, you need to manage it manually.
private var __previewLayer = AVCaptureVideoPreviewLayer? = nil
func configureAsPreview(_ view: UIView) {
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.bounds
self.__previewLayer = previewLayer
}
override viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.__previewLayer?.frame = self.view.bounds
}

AVCaptureDeviceInput initialization exception

When I try to create an instance of type AVCaptureDeviceInput I get the following error.
What have I done so far:
1) I have gain permissions for camera use and microphone.
2) I have tested the code on an iPhone 7 and an iPhone 5s.
3) When it displays the error I printed of the value of session.isRunning and it returns true value.
4) All the properties are retain strong.
Error Domain=AVFoundationErrorDomain Code=-11814 "Cannot Record" UserInfo={NSLocalizedDescription=Cannot Record, NSLocalizedRecoverySuggestion=Try recording again.
This is the code:
let session = AVCaptureSession()
self.session = session
session.sessionPreset = AVCaptureSessionPresetPhoto
do {
let input = try AVCaptureDeviceInput(device: device)
session.addInput(input)
if session.canAddInput(input) {
let stillImageOutput = AVCapturePhotoOutput()
self.stillImageOutput = stillImageOutput
let settings = AVCapturePhotoSettings()
stillImageOutput.capturePhoto(with: settings, delegate: self)
if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
if let previewLayer = AVCaptureVideoPreviewLayer(session: session) {
self.previewLayer = previewLayer
previewLayer.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer.connection!.videoOrientation = AVCaptureVideoOrientation.portrait
viewController.centerView.layer.insertSublayer(previewLayer, at: 0)
session.startRunning()
}
}
}
} catch {
print(error.localizedDescription)
}
There are several issues I find in your code, so the updated version with the comments on it would look like this:
let session = AVCaptureSession()
self.session = session
// `This method is used to start the flow of data from the inputs to the outputs connected to the AVCaptureSession instance that is the receiver.`
//session.startRunning() // Don't startRunning until everything is configured
session.sessionPreset = AVCaptureSessionPresetPhoto
do {
let input = try AVCaptureDeviceInput(device: device)
// session.addInput(input) // This one has to be after you check if you `canAddInput`
if session.canAddInput(input) {
session.addInput(input)
let stillImageOutput = AVCapturePhotoOutput()
self.stillImageOutput = stillImageOutput
let settings = AVCapturePhotoSettings()
// stillImageOutput.capturePhoto(with: settings, delegate: self) // This one might want to be called after you add it as an output to the `session`
if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
if let previewLayer = AVCaptureVideoPreviewLayer(session: session) {
self.previewLayer = previewLayer
previewLayer.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer.connection!.videoOrientation = AVCaptureVideoOrientation.portrait
viewController.centerView.layer.insertSublayer(previewLayer, at: 0)
session.startRunning()
}
stillImageOutput.capturePhoto(with: settings, delegate: self)
}
}
} catch {
print(error.localizedDescription)
}

Swift IOS Record Video and Audio with AVFoundation

I was able to successfully grab the recorded video by following this question
here
Basically
Inherit from AVCaptureFileOutputRecordingDelegate prototype
Loop through available devices
Creating a session with the camera
Start Recording
Stop Recording
Get the Record video by implementing above prototype's method
But the file doesn't comes with the audio.
According to this question, i have to record audio separately and merge the video and audio using mentioned classes
But i have no idea how to implement video and audio recording at the same time.
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
in this loop only available device types are .Front and .Back
Following is the way to record video with audio using AVFoundation framework. The steps are:
1. Prepare the session:
self.captureSession = AVCaptureSession()
2. Prepare available video and audio devices:
let session = AVCaptureDevice.DiscoverySession.init(deviceTypes:[.builtInWideAngleCamera, .builtInMicrophone], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let cameras = (session.devices.compactMap{$0})
for camera in cameras {
if camera.position == .front {
self.frontCamera = camera
}
if camera.position == .back {
self.rearCamera = camera
try camera.lockForConfiguration()
camera.focusMode = .continuousAutoFocus
camera.unlockForConfiguration()
}
}
3. Prepare session inputs:
guard let captureSession = self.captureSession else {
throw CameraControllerError.captureSessionIsMissing
}
if let rearCamera = self.rearCamera {
self.rearCameraInput = try AVCaptureDeviceInput(device: rearCamera)
if captureSession.canAddInput(self.rearCameraInput!) {
captureSession.addInput(self.rearCameraInput!)
self.currentCameraPosition = .rear
} else {
throw CameraControllerError.inputsAreInvalid
}
} else if let frontCamera = self.frontCamera {
self.frontCameraInput = try AVCaptureDeviceInput(device: frontCamera)
if captureSession.canAddInput(self.frontCameraInput!) {
captureSession.addInput(self.frontCameraInput!)
self.currentCameraPosition = .front
} else {
throw CameraControllerError.inputsAreInvalid
}
} else {
throw CameraControllerError.noCamerasAvailable
}
// Add audio input
if let audioDevice = self.audioDevice {
self.audioInput = try AVCaptureDeviceInput(device: audioDevice)
if captureSession.canAddInput(self.audioInput!) {
captureSession.addInput(self.audioInput!)
} else {
throw CameraControllerError.inputsAreInvalid
}
}
4. Prepare output:
self.videoOutput = AVCaptureMovieFileOutput()
if captureSession.canAddOutput(self.videoOutput!) {
captureSession.addOutput(self.videoOutput!)
}
captureSession.startRunning()
5. Start recording:
func recordVideo(completion: #escaping (URL?, Error?) -> Void) {
guard let captureSession = self.captureSession, captureSession.isRunning else {
completion(nil, CameraControllerError.captureSessionIsMissing)
return
}
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let fileUrl = paths[0].appendingPathComponent("output.mp4")
try? FileManager.default.removeItem(at: fileUrl)
videoOutput!.startRecording(to: fileUrl, recordingDelegate: self)
self.videoRecordCompletionBlock = completion
}
6. Stop recording:
func stopRecording(completion: #escaping (Error?) -> Void) {
guard let captureSession = self.captureSession, captureSession.isRunning else {
completion(CameraControllerError.captureSessionIsMissing)
return
}
self.videoOutput?.stopRecording()
}
7. Implement the delegate:
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
if error == nil {
//do something
} else {
//do something
}
}
I took idea from here: https://www.appcoda.com/avfoundation-swift-guide/
Here is the complete project https://github.com/rubaiyat6370/iOS-Tutorial/
Found the answer, This answer goes with this code
It can simply done by
declare another capture device variable
loop through devices and initialize camera and audio capture device variable
add audio input to session
code
var captureDevice : AVCaptureDevice?
var captureAudio :AVCaptureDevice?
Loop through devices and Initialize capture devices
var captureDeviceVideoFound: Bool = false
var captureDeviceAudioFound:Bool = false
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the front camera
if(device.position == AVCaptureDevicePosition.Front) {
captureDevice = device as? AVCaptureDevice //initialize video
if captureDevice != nil {
print("Capture device found")
captureDeviceVideoFound = true;
}
}
}
if(device.hasMediaType(AVMediaTypeAudio)){
print("Capture device audio init")
captureAudio = device as? AVCaptureDevice //initialize audio
captureDeviceAudioFound = true
}
}
if(captureDeviceAudioFound && captureDeviceVideoFound){
beginSession()
}
Inside Session
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
try captureSession.addInput(AVCaptureDeviceInput(device: captureAudio))
This will output the video file with audio. no need to merge audio or do anything.
This apples documentation helps
Followed the answer from #Mumu but it didn't work for me because of the call to AVCaptureDevice.DiscoverySession.init that was returning video devices only.
Here is my version that works on iOS 14, Swift 5:
var captureSession: AVCaptureSession? = nil
var camera: AVCaptureDevice? = nil
var microphone: AVCaptureDevice? = nil
var videoOutput: AVCaptureFileOutput? = nil
var previewLayer: AVCaptureVideoPreviewLayer? = nil
func findDevices() {
camera = nil
microphone = nil
//Search for video media type and we need back camera only
let session = AVCaptureDevice.DiscoverySession.init(deviceTypes:[.builtInWideAngleCamera],
mediaType: AVMediaType.video, position: AVCaptureDevice.Position.back)
var devices = (session.devices.compactMap{$0})
//Search for microphone
let asession = AVCaptureDevice.DiscoverySession.init(deviceTypes:[.builtInMicrophone],
mediaType: AVMediaType.audio, position: AVCaptureDevice.Position.unspecified)
//Combine all devices into one list
devices.append(contentsOf: asession.devices.compactMap{$0})
for device in devices {
if device.position == .back {
do {
try device.lockForConfiguration()
device.focusMode = .continuousAutoFocus
device.flashMode = .off
device.whiteBalanceMode = .continuousAutoWhiteBalance
device.unlockForConfiguration()
camera = device
} catch {
}
}
if device.hasMediaType(.audio) {
microphone = device
}
}
}
func initVideoRecorder()->Bool {
captureSession = AVCaptureSession()
guard let captureSession = captureSession else {return false}
captureSession.sessionPreset = .hd4K3840x2160
findDevices()
guard let camera = camera else { return false}
do {
let cameraInput = try AVCaptureDeviceInput(device: camera)
captureSession.addInput(cameraInput)
} catch {
self.camera = nil
return false
}
if let audio = microphone {
do {
let audioInput = try AVCaptureDeviceInput(device: audio)
captureSession.addInput(audioInput)
} catch {
}
}
videoOutput = AVCaptureMovieFileOutput()
if captureSession.canAddOutput(videoOutput!) {
captureSession.addOutput(videoOutput!)
captureSession.startRunning()
videoOutput?.connection(with: .video)?.videoOrientation = .landscapeRight
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = .resizeAspect
previewLayer?.connection?.videoOrientation = .landscapeRight
return true
}
return false
}
func startRecording()->Bool {
guard let captureSession = captureSession, captureSession.isRunning else {return false}
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let fileUrl = paths[0].appendingPathComponent(getVideoName())
try? FileManager.default.removeItem(at: fileUrl)
videoOutput?.startRecording(to: fileUrl, recordingDelegate: self)
return true
}
I had this problem also, but when I grouped adding the video input and the sound input after, the audio worked. This is my code for adding the inputs.
if (cameraSession.canAddInput(deviceInput) == true && cameraSession.canAddInput(audioDeviceInput) == true) {//detects if devices can be added
cameraSession.addInput(deviceInput)//adds video
cameraSession.addInput(audioDeviceInput)//adds audio
}
Also I found you have to have video input first or else there won't be audio. I originally had them in two if statements, but I found putting them in one lets video and audio be recorded together. Hope this helps.
Record Video With Audio
//Get Video Device
if let devices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo) as? [AVCaptureDevice] {
for device in devices {
if device.hasMediaType(AVMediaTypeVideo) {
if device.position == AVCaptureDevicePosition.back {
videoCaptureDevice = device
}
}
}
if videoCaptureDevice != nil {
do {
// Add Video Input
try self.captureSession.addInput(AVCaptureDeviceInput(device: videoCaptureDevice))
// Get Audio Device
let audioInput = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio)
//Add Audio Input
try self.captureSession.addInput(AVCaptureDeviceInput(device: audioInput))
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.portrait
self.videoView.layer.addSublayer(self.previewLayer)
//Add File Output
self.captureSession.addOutput(self.movieOutput)
captureSession.startRunning()
} catch {
print(error)
}
}
}
For more details refer this link:
https://medium.com/#santhosh3386/ios-avcapturesession-record-video-with-audio-23c8f8c9a8f8

Resources