Unable to capture depth on iPhone 7+ with iOS 11 - ios

I'm using an iPhone 7+ with ios 11 installed, and I'm trying to adapt some code that captures regular images to also capture depth.
When I call capturePhotoOutput?.isDepthDataDeliverySupported it returns false. I was under the impression I would be able to use my iPhone 7+ to capture depth.
Am I missing a permission from info.plist? Or have I made a more fundamental error?
//
// RecorderViewController.swift
import UIKit
import AVFoundation
class RecorderViewController: UIViewController {
#IBOutlet weak var previewView: UIView!
#IBAction func onTapTakePhoto(_ sender: Any) {
// Make sure capturePhotoOutput is valid
guard let capturePhotoOutput = self.capturePhotoOutput else { return }
// Get an instance of AVCapturePhotoSettings class
let photoSettings = AVCapturePhotoSettings()
// Set photo settings for our need
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.flashMode = .auto
// Call capturePhoto method by passing our photo settings and a
// delegate implementing AVCapturePhotoCaptureDelegate
capturePhotoOutput.capturePhoto(with: photoSettings, delegate: self)
}
var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var capturePhotoOutput: AVCapturePhotoOutput?
override func viewDidLoad() {
super.viewDidLoad()
//let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
let captureDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInDualCamera, for: .video, position: .back)
do {
let input = try AVCaptureDeviceInput(device: captureDevice!)
captureSession = AVCaptureSession()
captureSession?.addInput(input)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
previewView.layer.addSublayer(videoPreviewLayer!)
capturePhotoOutput = AVCapturePhotoOutput()
capturePhotoOutput?.isHighResolutionCaptureEnabled = true
if (capturePhotoOutput?.isDepthDataDeliverySupported)!
{
capturePhotoOutput?.isDepthDataDeliveryEnabled = true
}
else{
print ("DEPTH NOT SUPPORTED!")
}
// Set the output on the capture session
captureSession?.addOutput(capturePhotoOutput!)
captureSession?.startRunning()
} catch {
print(error)
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
extension RecorderViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ captureOutput: AVCapturePhotoOutput,
didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?,
previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?,
resolvedSettings: AVCaptureResolvedPhotoSettings,
bracketSettings: AVCaptureBracketedStillImageSettings?,
error: Error?) {
// get captured image
// Make sure we get some photo sample buffer
guard error == nil,
let photoSampleBuffer = photoSampleBuffer else {
print("Error capturing photo: \(String(describing: error))")
return
}
// Convert photo same buffer to a jpeg image data by using // AVCapturePhotoOutput
guard let imageData =
AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSampleBuffer, previewPhotoSampleBuffer: previewPhotoSampleBuffer) else {
return
}
// Initialise a UIImage with our image data
let capturedImage = UIImage.init(data: imageData , scale: 1.0)
if let image = capturedImage {
// Save our captured image to photos album
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}
}
}

Related

How to get current picture from WebRTC as image?

I am using WebRTC to capture video from user camera. In some place I want to get current picture as UIImage for saving it in photo library. I am using localVideoView to show video from local camera, but when I try to make screenshot of that view, it is empty (just blue background).
This is my code to make screenshot:
func screenShotMethod() {
DispatchQueue.main.async {
//Create the UIImage
UIGraphicsBeginImageContext(self.localVideoView!.frame.size)
self.localVideoView?.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
//Save it to the camera roll
UIImageWriteToSavedPhotosAlbum(image!, nil, nil, nil)
}
}
Here is the sample code to capture photo without camera preview during a WebRTC video call. Have to invoke TakePicture() method through signal or button tap.
import AVFoundation
import WebRTC
import UIKit
import Foundation
class CallViewController: UIViewController{
var captureSession : AVCaptureSession?
func TakePicture() {
DispatchQueue.main.async { [self] in
captureSession = AVCaptureSession()
captureSession!.beginConfiguration()
let photoOutput = AVCapturePhotoOutput()
photoOutput.isHighResolutionCaptureEnabled = true
photoOutput.isLivePhotoCaptureEnabled = false
if let captureDevice = AVCaptureDevice.default(for: .video){
do
{
let input = try AVCaptureDeviceInput(device: captureDevice)
if captureSession!.canAddInput(input){
captureSession!.addInput(input)
}
} catch let error {
}
if captureSession!.canAddOutput(photoOutput){
captureSession!.addOutput(photoOutput)
}
let cameraLayer = AVCaptureVideoPreviewLayer()
cameraLayer.session = captureSession
captureSession!.commitConfiguration()
captureSession!.startRunning()
let photoSettings = AVCapturePhotoSettings()
//photoSettings.flashMode = .auto //check device properties before turning on flash
photoSettings.photoQualityPrioritization = .balanced
photoOutput.capturePhoto(with: photoSettings, delegate: self)
}
}
}
}
extension CallViewController: AVCapturePhotoCaptureDelegate{
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
captureSession!.stopRunning()
captureSession = nil
let imageData = photo.fileDataRepresentation()
//Do the rest with image bytes
}
}

MLKit Text Recognition: Text Not Being Detected

I am making an iOS where a user takes a picture and then I want to use Google's MLKit from Firebase to detect text in the picture. I have set up a custom camera UIViewController that we'll call CameraViewController. There is a simple button that a user will press to take a picture. I have followed Firebase's documentation, here, but MLKit is not working for me. Here is the code I have for your refrence and then we'll talk about what the problem is.
1.Here are my imports, class delegates, and outlets:
import UIKit
import AVFoundation
import Firebase
class CameraViewController: UIViewController, AVCapturePhotoCaptureDelegate {
var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var capturePhotoOutput: AVCapturePhotoOutput?
#IBOutlet var previewView: UIView!
#IBOutlet var captureButton: UIButton!
}
2.In the viewDidLoad, I set up the "previewView" so that the user has a "view finder":
override func viewDidLoad() {
super.viewDidLoad()
let captureDevice = AVCaptureDevice.default(for: .video)!
do {
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession = AVCaptureSession()
captureSession?.addInput(input)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
previewView.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
capturePhotoOutput = AVCapturePhotoOutput()
capturePhotoOutput?.isHighResolutionCaptureEnabled = true
captureSession?.addOutput(capturePhotoOutput!)
} catch {
print(error)
}
}
3.Here is my action for the button that takes the image
#IBAction func captureButtonTapped(_ sender: Any) {
guard let capturePhotoOutput = self.capturePhotoOutput else { return }
let photoSettings = AVCapturePhotoSettings()
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.flashMode = .off
capturePhotoOutput.capturePhoto(with: photoSettings, delegate: self)
}
4.This is where I receive the picture taken using the didFinishProcessingPhoto delegate method and start using MLKit
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
guard error == nil,
let photoSampleBuffer = photoSampleBuffer else {
print("Error capturing photo: \(String(describing: error))")
return
}
guard let imageData =
AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSampleBuffer, previewPhotoSampleBuffer: previewPhotoSampleBuffer) else {
return
}
let capturedImage = UIImage.init(data: imageData , scale: 1.0)
captureNormal()
DispatchQueue.main.asyncAfter(deadline: .now()+0.1) {
self.captureSession?.stopRunning()
self.processText(with: capturedImage!)
// Here is where I call the function processText where MLKit is run
}
}
5.Lastly, here is my function processText(with:UIImage) that uses MLKit
func processText(with image: UIImage) {
let vision = Vision.vision()
let textRecognizer = vision.onDeviceTextRecognizer()
let visionImage = VisionImage(image: image)
textRecognizer.process(visionImage) { result, error in
if error != nil {
print("MLKIT ERROR - \(error)")
} else {
let resultText = result?.text
print("MLKIT RESULT - \(resultText)")
}
}
}
Ok, that was a lot, thank you for reading all of that. Alright, so the problem is that this does not work. I do get a proper UIImage in step 4 so it's not that. Here's a screenshot of an example of what I am trying to scan...
MLKit should be able to easily detect this text. But every time I try, result?.text is always printed as nil. I'm out of ideas. Does anyone have any ideas on how to fix this? If so, thanks a lot!

iPhone 7+, ios 11.2: Depth data delivery is not supported in the current configuration

This bug is driving me mad. I'm trying to produce the absolute minimal code to get AVDepthData from an iPhone 7+ using its DualCam.
I have this code:
//
// RecorderViewController.swift
// ios-recorder-app
import UIKit
import AVFoundation
class RecorderViewController: UIViewController {
#IBOutlet weak var previewView: UIView!
#IBAction func onTapTakePhoto(_ sender: Any) {
guard let capturePhotoOutput = self.capturePhotoOutput else { return }
let photoSettings = AVCapturePhotoSettings()
photoSettings.isDepthDataDeliveryEnabled = true //Error
capturePhotoOutput.capturePhoto(with: photoSettings, delegate: self)
}
var session: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var capturePhotoOutput: AVCapturePhotoOutput?
override func viewDidLoad() {
super.viewDidLoad()
AVCaptureDevice.requestAccess(for: .video, completionHandler: { _ in })
let captureDevice = AVCaptureDevice.default(.builtInDualCamera, for: .depthData, position: .back)
do {
print(captureDevice!)
let input = try AVCaptureDeviceInput(device: captureDevice!)
self.capturePhotoOutput = AVCapturePhotoOutput()
self.capturePhotoOutput?.isDepthDataDeliveryEnabled = true //Error
self.session = AVCaptureSession()
self.session?.addInput(input)
self.videoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session!)
self.videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.videoPreviewLayer?.frame = view.layer.bounds
previewView.layer.addSublayer(self.videoPreviewLayer!)
self.session?.addOutput(self.capturePhotoOutput!)
self.session?.startRunning()
} catch {
print(error)
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
extension RecorderViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
print(photo.depthData)
}
}
If I comment out the lines that are marked with "Error" the code works as I would expect, and prints nil for depthData.
However, leaving the lines as they are, I get an exception. The error message states: AVCapturePhotoOutput setDepthDataDeliveryEnabled:] Depth data delivery is not supported in the current configuration.
How do I change the "current configuration" so that depth delivery is supported?
I've watched this video: https://developer.apple.com/videos/play/wwdc2017/507/ which was helpful, and I believe I've followed the exact steps required to make this work.
Any tips would be gratefully received!
There are two things that I needed to fix.
Set a sessionPreset to a format that supports depth, such as .photo.
Add the cameraPhotoOutput to session before setting .isDepthDataDeliveryEnabled = true.
Here is my minimal code for getting depth with photos:
//
// RecorderViewController.swift
// ios-recorder-app
//
import UIKit
import AVFoundation
class RecorderViewController: UIViewController {
#IBOutlet weak var previewView: UIView!
#IBAction func onTapTakePhoto(_ sender: Any) {
guard var capturePhotoOutput = self.capturePhotoOutput else { return }
var photoSettings = AVCapturePhotoSettings()
photoSettings.isDepthDataDeliveryEnabled = true
capturePhotoOutput.capturePhoto(with: photoSettings, delegate: self)
}
var session: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var capturePhotoOutput: AVCapturePhotoOutput?
override func viewDidLoad() {
super.viewDidLoad()
AVCaptureDevice.requestAccess(for: .video, completionHandler: { _ in })
let captureDevice = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
print(captureDevice!.activeDepthDataFormat)
do{
let input = try AVCaptureDeviceInput(device: captureDevice!)
self.capturePhotoOutput = AVCapturePhotoOutput()
self.session = AVCaptureSession()
self.session?.beginConfiguration()
self.session?.sessionPreset = .photo
self.session?.addInput(input)
self.videoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session!)
self.videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.videoPreviewLayer?.frame = self.view.layer.bounds
self.previewView.layer.addSublayer(self.videoPreviewLayer!)
self.session?.addOutput(self.capturePhotoOutput!)
self.session?.commitConfiguration()
self.capturePhotoOutput?.isDepthDataDeliveryEnabled = true
self.session?.startRunning()
}
catch{
print(error)
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
extension RecorderViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
print(photo.depthData)
}
}

Swift 3 - How to capture image and its subview?

I am working on a project that contains custom camera view.And i am unable to capture the subview over the main view.My main view consist of AVCaptureSession and i want to take photo of superview and subview both in single image.
What i am trying in code:
class ViewController: UIViewController {
#IBOutlet weak var cameraButton: UIButton!
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentDevice: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer:AVCaptureVideoPreviewLayer?
var image: UIImage?
var toggleCameraGestureRecognizer = UISwipeGestureRecognizer()
var zoomInGestureRecognizer = UISwipeGestureRecognizer()
var zoomOutGestureRecognizer = UISwipeGestureRecognizer()
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
captureSession.startRunning()
toggleCameraGestureRecognizer.direction = .up
toggleCameraGestureRecognizer.addTarget(self, action: #selector(self.switchCamera))
view.addGestureRecognizer(toggleCameraGestureRecognizer)
// Zoom In recognizer
zoomInGestureRecognizer.direction = .right
zoomInGestureRecognizer.addTarget(self, action: #selector(zoomIn))
view.addGestureRecognizer(zoomInGestureRecognizer)
// Zoom Out recognizer
zoomOutGestureRecognizer.direction = .left
zoomOutGestureRecognizer.addTarget(self, action: #selector(zoomOut))
view.addGestureRecognizer(zoomOutGestureRecognizer)
styleCaptureButton()
}
cameraButton.layer.borderColor = UIColor.white.cgColor
cameraButton.layer.borderWidth = 5
cameraButton.clipsToBounds = true
cameraButton.layer.cornerRadius = min(cameraButton.frame.width, cameraButton.frame.height) / 2
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}
}
currentDevice = backCamera
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput!.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer() {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.cameraPreviewLayer?.frame = view.frame
self.view.layer.insertSublayer(self.cameraPreviewLayer!, at: 0)
}
#IBAction func cameraButton_TouchUpInside(_ sender: Any) {
let settings = AVCapturePhotoSettings()
self.photoOutput?.capturePhoto(with: settings, delegate: self)
}
}
extension ViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
self.image = UIImage(data: imageData)
performSegue(withIdentifier: "Preview_Segue", sender: nil)
}
}
}
Please help me
If I understood you are trying to get the content (as an image) of what the camera is grabbing and some overlay views.
As far as I remember is not possible to grab what is inside the AVPreviewLayer, maybe they changed something in the latest version. When I tried (iOS6) it wasn't possible, the area with the AVPreviewLayer was always empty.
What you can do is take the current camera buffer and draw inside it. By setting a class as a session delegate you can receive this callback optional
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection)
Here you will receive the image from the camera, this buffer can be converted into images using Accelerate framework or CoreImage.
Is not easy, but also not impossible.

How to capture image only custom camera view area in Swift3

I have the custom camera view. What I want is that I only want to capture the image inside of custom camera view when I press button.But it take the whole screen not just camera view. I also set preview layer's frame to cameraView's frame. Here is my code
class CustomCameraVC: UIViewController, AVCapturePhotoCaptureDelegate {
#IBOutlet weak var cameraView: UIView!
#IBOutlet weak var shotImage: UIImageView!
var captureSession: AVCaptureSession!
var imageOutput: AVCapturePhotoOutput!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetMedium
imageOutput = AVCapturePhotoOutput()
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
do {
let input = try AVCaptureDeviceInput(device: device)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
if captureSession.canAddOutput(imageOutput) {
captureSession.addOutput(imageOutput)
captureSession.startRunning()
let captureVideoLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer.init(session: captureSession)
captureVideoLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
captureVideoLayer.frame = self.cameraView.layer.frame
self.cameraView.layer.addSublayer(captureVideoLayer)
}
}
} catch {
print("error")
}
}
#IBAction func takePhoto(_ sender: UIButton) {
let settingsForMonitoring = AVCapturePhotoSettings()
settingsForMonitoring.flashMode = .auto
settingsForMonitoring.isAutoStillImageStabilizationEnabled = true
settingsForMonitoring.isHighResolutionPhotoEnabled = false
imageOutput?.capturePhoto(with: settingsForMonitoring, delegate: self)
}
func capture(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhotoSampleBuffer photoSampleBuffer: CMSampleBuffer?, previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let photoSampleBuffer = photoSampleBuffer {
let photoData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSampleBuffer, previewPhotoSampleBuffer: previewPhotoSampleBuffer)
let image = UIImage(data: photoData!)
shotImage.image = UIImage(data: photoData!)
UIImageWriteToSavedPhotosAlbum(image!, nil, nil, nil)
}
}
}

Resources