didFinishProcessingPhotoSampleBuffer not Calling - ios

I am trying to learn how to take and save photos using AVFoundation. I have currently been able to make a custom camera view with a button that is linked to an action to take a photo. When i click the button the delegate method is not being called.
Here is my ViewController.swift:
class ViewController: UIViewController, AVCapturePhotoCaptureDelegate{
#IBOutlet weak var camerView: UIView!
#IBOutlet weak var photoButton: UIButton!
var captureSession : AVCaptureSession?
var sessionOutput : AVCapturePhotoOutput?
var previewLayer : AVCaptureVideoPreviewLayer?
var photoSettings : AVCapturePhotoSettings?
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
previewLayer?.frame = (self.camerView?.bounds)!
previewLayer?.position = CGPoint(x: (self.camerView?.frame.width)! / 2, y: (self.camerView?.frame.height)!/2)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
//Capture Session
captureSession = AVCaptureSession()
let devices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaTypeVideo, position: .back)
for device in (devices?.devices)! {
do {
let input = try AVCaptureDeviceInput(device: device)
if (captureSession?.canAddInput(input))! {
captureSession?.addInput(input)
}
if (captureSession?.canAddOutput(sessionOutput))! {
captureSession?.addOutput(sessionOutput)
}
previewLayer = AVCaptureVideoPreviewLayer()
previewLayer?.session = captureSession
self.camerView.layer.addSublayer(previewLayer!)
self.camerView.addSubview(photoButton)
captureSession?.startRunning()
} catch {
print("error occurred")
}
}
}
#IBAction func takePhoto(_ sender: UIButton) {
photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecJPEG])
photoSettings?.flashMode = .on
sessionOutput?.capturePhoto(with: photoSettings!, delegate: self)
}
//AVCapturePhotoCaptureDelegate Functions
func capture(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhotoSampleBuffer photoSampleBuffer: CMSampleBuffer?, previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
print("PhotoSampleBuffer")
}
}
All i am trying to do in the didFinisheProcessingPhotoSampleBuffer method is just to print() so I know it is being called. I will figure out how to save later (unless someone can point me to a good resoure to learn this.)
Let me know if you need any other info!

The capturePhoto method (and therefore the delegate callback) is not being called because your sessionOutput variable is nil.
To fix this, instantiate the sessionOutput variable when it is declared:
var sessionOutput = AVCapturePhotoOutput()
Also, you'll need to remove the ? after the sessionOutput because it is no longer Optional.
sessionOutput.capturePhoto(with: photoSettings!, delegate: self)

Related

Overlay image over custom camera

I have made a custom camera and want to overlay another image over it. I am using AVKit now to get the custom camera. I was able to overlay the image when I was using the built-in camera. This is the code for what I have for the custom camera. "newImage" is the image that i would like to overlay over the camera.
import UIKit
import AVKit
class liveView: UIViewController, AVCapturePhotoCaptureDelegate {
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var captureImageView: UIImageView!
var captureSession: AVCaptureSession!
var stillImageOutput: AVCapturePhotoOutput!
var videoPreviewLayer: AVCaptureVideoPreviewLayer!
var newImage: UIImage!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession.sessionPreset = .medium
guard let backCamera = AVCaptureDevice.default(for: AVMediaType.video)
else {
print("Unable to access back camera!")
return
}
do {
let input = try AVCaptureDeviceInput(device: backCamera)
stillImageOutput = AVCapturePhotoOutput()
if captureSession.canAddInput(input) && captureSession.canAddOutput(stillImageOutput) {
captureSession.addInput(input)
captureSession.addOutput(stillImageOutput)
// videoPreviewLayer?.frame = self.newImage.accessibilityFrame
setupLivePreview()
}
}
catch let error {
print("Error Unable to initialize back camera: \(error.localizedDescription)")
}
}
func setupLivePreview() {
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer.videoGravity = .resizeAspect
videoPreviewLayer.connection?.videoOrientation = .portrait
previewView.layer.addSublayer(videoPreviewLayer)
DispatchQueue.global(qos: .userInitiated).async {
self.captureSession.startRunning()
DispatchQueue.main.async {
self.videoPreviewLayer.frame = self.previewView.bounds
}
}
}
#IBAction func didTakePhoto(_sender : UIBarButtonItem) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
stillImageOutput.capturePhoto(with: settings, delegate: self)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
captureImageView.image = image
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
self.captureSession.stopRunning()
}
}

Close session AVCaptureSession and open view controller?

How to make the camera close after the photo and the controller opens.I want to close the camera after the session, tell me what method it is. I watched a lot of articles here and wrote it down, it seems to be correct. But I don’t see the effect.
Я записал этот метод в viewDidDissaper. Tell me how to specify correctly so that after I take a photo, it opens ViewController
import UIKit
import AVFoundation
import PhotoEditorSDK
import Photos
class ViewController: UIViewController {
#IBOutlet weak var ImageGray: UIImageView!
var session = AVCaptureSession()
var camera : AVCaptureDevice?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
var cameraCaptureOutput : AVCapturePhotoOutput?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
// let grayImage = openCVWrapper.toGray(ImageGray.image!)
// ImageGray.image = grayImage
}
func stopCaptureSession() {
if let inputs = session.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
session.removeInput(input)
}
let vc = self.storyboard!.instantiateViewController(withIdentifier: "viewController")
self.present(vc, animated: true, completion: nil)
}
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
// Setup your camera here...
initializeCaptureSession()
}
override func viewDidDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
stopCaptureSession()
}
#IBAction func cameraShot(_ sender: Any) {
takePicture()
}
func initializeCaptureSession() {
session.sessionPreset = AVCaptureSession.Preset.high
camera = AVCaptureDevice.default(for: AVMediaType.video)
do {
let cameraCaptureInput = try AVCaptureDeviceInput(device: camera!)
cameraCaptureOutput = AVCapturePhotoOutput()
session.addInput(cameraCaptureInput)
session.addOutput(cameraCaptureOutput!)
} catch {
print(error.localizedDescription)
}
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: session)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.frame = view.bounds
cameraPreviewLayer?.connection!.videoOrientation = AVCaptureVideoOrientation.portrait
view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
session.startRunning()
}
func takePicture() {
let settings = AVCapturePhotoSettings()
settings.flashMode = .auto
cameraCaptureOutput?.capturePhoto(with: settings, delegate: self)
}
#IBAction func AddGray(_ sender: Any) {
let grayImage = openCVWrapper.toGray(ImageGray.image!)
ImageGray.image = grayImage
ImageGray.transform = ImageGray.transform.rotated(by: CGFloat(Double.pi / 2)) //90
}
#IBAction func addColor(_ sender: Any) {
}
}
extension ViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let unwrappedError = error {
print(unwrappedError.localizedDescription)
} else {
if let sampleBuffer = photoSampleBuffer, let dataImage = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewPhotoSampleBuffer) {
if let finalImage = UIImage(data: dataImage) {
self.ImageGray.image = finalImage
}
}
}
}
}

MLKit Text Recognition: Text Not Being Detected

I am making an iOS where a user takes a picture and then I want to use Google's MLKit from Firebase to detect text in the picture. I have set up a custom camera UIViewController that we'll call CameraViewController. There is a simple button that a user will press to take a picture. I have followed Firebase's documentation, here, but MLKit is not working for me. Here is the code I have for your refrence and then we'll talk about what the problem is.
1.Here are my imports, class delegates, and outlets:
import UIKit
import AVFoundation
import Firebase
class CameraViewController: UIViewController, AVCapturePhotoCaptureDelegate {
var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var capturePhotoOutput: AVCapturePhotoOutput?
#IBOutlet var previewView: UIView!
#IBOutlet var captureButton: UIButton!
}
2.In the viewDidLoad, I set up the "previewView" so that the user has a "view finder":
override func viewDidLoad() {
super.viewDidLoad()
let captureDevice = AVCaptureDevice.default(for: .video)!
do {
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession = AVCaptureSession()
captureSession?.addInput(input)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
previewView.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
capturePhotoOutput = AVCapturePhotoOutput()
capturePhotoOutput?.isHighResolutionCaptureEnabled = true
captureSession?.addOutput(capturePhotoOutput!)
} catch {
print(error)
}
}
3.Here is my action for the button that takes the image
#IBAction func captureButtonTapped(_ sender: Any) {
guard let capturePhotoOutput = self.capturePhotoOutput else { return }
let photoSettings = AVCapturePhotoSettings()
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.flashMode = .off
capturePhotoOutput.capturePhoto(with: photoSettings, delegate: self)
}
4.This is where I receive the picture taken using the didFinishProcessingPhoto delegate method and start using MLKit
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
guard error == nil,
let photoSampleBuffer = photoSampleBuffer else {
print("Error capturing photo: \(String(describing: error))")
return
}
guard let imageData =
AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSampleBuffer, previewPhotoSampleBuffer: previewPhotoSampleBuffer) else {
return
}
let capturedImage = UIImage.init(data: imageData , scale: 1.0)
captureNormal()
DispatchQueue.main.asyncAfter(deadline: .now()+0.1) {
self.captureSession?.stopRunning()
self.processText(with: capturedImage!)
// Here is where I call the function processText where MLKit is run
}
}
5.Lastly, here is my function processText(with:UIImage) that uses MLKit
func processText(with image: UIImage) {
let vision = Vision.vision()
let textRecognizer = vision.onDeviceTextRecognizer()
let visionImage = VisionImage(image: image)
textRecognizer.process(visionImage) { result, error in
if error != nil {
print("MLKIT ERROR - \(error)")
} else {
let resultText = result?.text
print("MLKIT RESULT - \(resultText)")
}
}
}
Ok, that was a lot, thank you for reading all of that. Alright, so the problem is that this does not work. I do get a proper UIImage in step 4 so it's not that. Here's a screenshot of an example of what I am trying to scan...
MLKit should be able to easily detect this text. But every time I try, result?.text is always printed as nil. I'm out of ideas. Does anyone have any ideas on how to fix this? If so, thanks a lot!

iPhone 7+, ios 11.2: Depth data delivery is not supported in the current configuration

This bug is driving me mad. I'm trying to produce the absolute minimal code to get AVDepthData from an iPhone 7+ using its DualCam.
I have this code:
//
// RecorderViewController.swift
// ios-recorder-app
import UIKit
import AVFoundation
class RecorderViewController: UIViewController {
#IBOutlet weak var previewView: UIView!
#IBAction func onTapTakePhoto(_ sender: Any) {
guard let capturePhotoOutput = self.capturePhotoOutput else { return }
let photoSettings = AVCapturePhotoSettings()
photoSettings.isDepthDataDeliveryEnabled = true //Error
capturePhotoOutput.capturePhoto(with: photoSettings, delegate: self)
}
var session: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var capturePhotoOutput: AVCapturePhotoOutput?
override func viewDidLoad() {
super.viewDidLoad()
AVCaptureDevice.requestAccess(for: .video, completionHandler: { _ in })
let captureDevice = AVCaptureDevice.default(.builtInDualCamera, for: .depthData, position: .back)
do {
print(captureDevice!)
let input = try AVCaptureDeviceInput(device: captureDevice!)
self.capturePhotoOutput = AVCapturePhotoOutput()
self.capturePhotoOutput?.isDepthDataDeliveryEnabled = true //Error
self.session = AVCaptureSession()
self.session?.addInput(input)
self.videoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session!)
self.videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.videoPreviewLayer?.frame = view.layer.bounds
previewView.layer.addSublayer(self.videoPreviewLayer!)
self.session?.addOutput(self.capturePhotoOutput!)
self.session?.startRunning()
} catch {
print(error)
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
extension RecorderViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
print(photo.depthData)
}
}
If I comment out the lines that are marked with "Error" the code works as I would expect, and prints nil for depthData.
However, leaving the lines as they are, I get an exception. The error message states: AVCapturePhotoOutput setDepthDataDeliveryEnabled:] Depth data delivery is not supported in the current configuration.
How do I change the "current configuration" so that depth delivery is supported?
I've watched this video: https://developer.apple.com/videos/play/wwdc2017/507/ which was helpful, and I believe I've followed the exact steps required to make this work.
Any tips would be gratefully received!
There are two things that I needed to fix.
Set a sessionPreset to a format that supports depth, such as .photo.
Add the cameraPhotoOutput to session before setting .isDepthDataDeliveryEnabled = true.
Here is my minimal code for getting depth with photos:
//
// RecorderViewController.swift
// ios-recorder-app
//
import UIKit
import AVFoundation
class RecorderViewController: UIViewController {
#IBOutlet weak var previewView: UIView!
#IBAction func onTapTakePhoto(_ sender: Any) {
guard var capturePhotoOutput = self.capturePhotoOutput else { return }
var photoSettings = AVCapturePhotoSettings()
photoSettings.isDepthDataDeliveryEnabled = true
capturePhotoOutput.capturePhoto(with: photoSettings, delegate: self)
}
var session: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var capturePhotoOutput: AVCapturePhotoOutput?
override func viewDidLoad() {
super.viewDidLoad()
AVCaptureDevice.requestAccess(for: .video, completionHandler: { _ in })
let captureDevice = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
print(captureDevice!.activeDepthDataFormat)
do{
let input = try AVCaptureDeviceInput(device: captureDevice!)
self.capturePhotoOutput = AVCapturePhotoOutput()
self.session = AVCaptureSession()
self.session?.beginConfiguration()
self.session?.sessionPreset = .photo
self.session?.addInput(input)
self.videoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session!)
self.videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.videoPreviewLayer?.frame = self.view.layer.bounds
self.previewView.layer.addSublayer(self.videoPreviewLayer!)
self.session?.addOutput(self.capturePhotoOutput!)
self.session?.commitConfiguration()
self.capturePhotoOutput?.isDepthDataDeliveryEnabled = true
self.session?.startRunning()
}
catch{
print(error)
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
extension RecorderViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
print(photo.depthData)
}
}

How to capture image only custom camera view area in Swift3

I have the custom camera view. What I want is that I only want to capture the image inside of custom camera view when I press button.But it take the whole screen not just camera view. I also set preview layer's frame to cameraView's frame. Here is my code
class CustomCameraVC: UIViewController, AVCapturePhotoCaptureDelegate {
#IBOutlet weak var cameraView: UIView!
#IBOutlet weak var shotImage: UIImageView!
var captureSession: AVCaptureSession!
var imageOutput: AVCapturePhotoOutput!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetMedium
imageOutput = AVCapturePhotoOutput()
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
do {
let input = try AVCaptureDeviceInput(device: device)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
if captureSession.canAddOutput(imageOutput) {
captureSession.addOutput(imageOutput)
captureSession.startRunning()
let captureVideoLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer.init(session: captureSession)
captureVideoLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
captureVideoLayer.frame = self.cameraView.layer.frame
self.cameraView.layer.addSublayer(captureVideoLayer)
}
}
} catch {
print("error")
}
}
#IBAction func takePhoto(_ sender: UIButton) {
let settingsForMonitoring = AVCapturePhotoSettings()
settingsForMonitoring.flashMode = .auto
settingsForMonitoring.isAutoStillImageStabilizationEnabled = true
settingsForMonitoring.isHighResolutionPhotoEnabled = false
imageOutput?.capturePhoto(with: settingsForMonitoring, delegate: self)
}
func capture(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhotoSampleBuffer photoSampleBuffer: CMSampleBuffer?, previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let photoSampleBuffer = photoSampleBuffer {
let photoData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSampleBuffer, previewPhotoSampleBuffer: previewPhotoSampleBuffer)
let image = UIImage(data: photoData!)
shotImage.image = UIImage(data: photoData!)
UIImageWriteToSavedPhotosAlbum(image!, nil, nil, nil)
}
}
}

Resources