I am writing an app to get the depth data and disparity data from pictures taken from the camera. I can get the disparity data but not the depth data it always returns nil. I need to get the depth information
and save it as a jpg
I have tried the below code where user can switch between front and back camera and take pictures then the picture we took will be the process
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var ImageView: UIView!
var img:UIImage?
var rgbImage:UIImage?
var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var backCamera = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
var frontCamera = AVCaptureDevice.default(.builtInTrueDepthCamera, for: .video, position: .front)
var capturePhotoOut : AVCapturePhotoOutput?
override func viewDidLoad() {
super.viewDidLoad()
if #available(iOS 10.2, *){
let captureDevice = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
do{
let input = try AVCaptureDeviceInput(device: captureDevice!)
captureSession = AVCaptureSession()
captureSession?.addInput(input)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.frame = view.layer.bounds
ImageView.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
}catch{
print("error")
}
}
capturePhotoOut = AVCapturePhotoOutput()
capturePhotoOut?.isHighResolutionCaptureEnabled = true
captureSession?.sessionPreset = .photo
captureSession?.addOutput(capturePhotoOut!)
capturePhotoOut!.isDepthDataDeliveryEnabled = capturePhotoOut!.isDepthDataDeliverySupported
capturePhotoOut!.isPortraitEffectsMatteDeliveryEnabled = capturePhotoOut!.isPortraitEffectsMatteDeliverySupported
}
#IBAction func imageCapture(_ sender: Any) {
guard let capturePhotoOutput = self.capturePhotoOut else {return}
let photoSettings = AVCapturePhotoSettings()
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isPortraitEffectsMatteDeliveryEnabled = true
capturePhotoOut?.capturePhoto(with: photoSettings, delegate: self)
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
let vc = segue.destination as! DepthImageView
vc.img = img
vc.rgbImg = rgbImage
}
extension ViewController : AVCapturePhotoCaptureDelegate{
public func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard error == nil else{return}
guard let imageData = photo.fileDataRepresentation() else {return}
let detailImage = UIImage.init(data: imageData,scale: 1.0)
rgbImage = detailImage
let nsData = imageData as NSData
let ptr = nsData.bytes.assumingMemoryBound(to: UInt8.self)
let cfDataset = CFDataCreate(nil,ptr,imageData.count)
guard let source = CGImageSourceCreateWithData(cfDataset!,nil) else {return}
guard let auxDataInfo = CGImageSourceCopyAuxiliaryDataInfoAtIndex(source, 0, kCGImageAuxiliaryDataTypeDepth) as? [String : AnyObject] else {
return
}
var depthData: AVDepthData
do {
depthData = try AVDepthData(fromDictionaryRepresentation: auxDataInfo)
if depthData.depthDataType != kCVPixelFormatType_DepthFloat32 {
depthData = depthData.converting(toDepthDataType: kCVPixelFormatType_DepthFloat32)
}
let depthDataMap = depthData.depthDataMap
let ciImage = CIImage(cvPixelBuffer: depthDataMap)
let depthDataMapImage = UIImage(ciImage: ciImage,scale: 1.0,orientation: .down)
img = depthDataMapImage
self.performSegue(withIdentifier: "ImageViewScreen", sender: self)
} catch {
print("Error")
}
}
}
I always get nil at auxDataInfo guard
AVCapturePhoto containing information about AVDepthData. Try to get depth Data from photo
let depthData = photo.depthData
Related
I want to create an application on IOS that can record and save RGB+Depth data. I have been able to capture both data from the dual-camera and preview on the screen in real-time. Now I want to save it as two sequences in the library (one RGB sequence and one depth map sequence).
So my question is how can I save this depth information on the iPhone gallery as a video or sequence, saving at the same time the RGB info, for future deep processing?
I am working with Xcode 10.2, Swift 5 and an iPhone XS.
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var previewView: UIImageView!
#IBOutlet weak var previewModeControl: UISegmentedControl!
var previewMode = PreviewMode.original //Original(RGB) or Depth
let session = AVCaptureSession()
let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
var background: CIImage?
var depthMap: CIImage?
var scale: CGFloat = 0.0
override func viewDidLoad() {
super.viewDidLoad()
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
configureCaptureSession()
session.startRunning()
}
override var shouldAutorotate: Bool {
return false
}
func configureCaptureSession() {
session.beginConfiguration()
//Add input to the session
guard let camera = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .unspecified) else {
fatalError("No depth video camera available")
}
session.sessionPreset = .photo
do{
let cameraInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(cameraInput){
session.addInput(cameraInput)
}else{
fatalError("Error adding input device to session")
}
}catch{
fatalError(error.localizedDescription)
}
//Add output to the session
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
if session.canAddOutput(videoOutput){
session.addOutput(videoOutput)
}else{
fatalError("Error adding output to session")
}
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
//Add output to the session DEPTH
let depthOutput = AVCaptureDepthDataOutput()
//Set the current view controller as the delegate for the new object
depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
depthOutput.isFilteringEnabled = true //take advantge of holesin the data
if session.canAddOutput(depthOutput){
session.addOutput(depthOutput)
}else{
fatalError("Error adding output to session")
}
let depthConnection = depthOutput.connection(with: .depthData)
depthConnection?.videoOrientation = .portrait
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
do{
try camera.lockForConfiguration()
if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
camera.activeVideoMinFrameDuration = frameDuration
}
camera.unlockForConfiguration()
}catch{
fatalError(error.localizedDescription)
}
session.commitConfiguration()
}
#IBAction func previewModeChanged(_ sender: UISegmentedControl) {
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
switch previewMode {
case .original:
previewImage = image
case .depth:
previewImage = depthMap ?? image
//default:
//previewImage = image
}
let displayImage = UIImage(ciImage: previewImage)
DispatchQueue.main.async {
[weak self] in self?.previewView.image = displayImage
}
}
}
extension ViewController: AVCaptureDepthDataOutputDelegate{
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
if previewMode == .original{
return
}
var convertedDepth: AVDepthData
if depthData.depthDataType != kCVPixelFormatType_DisparityFloat32{
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DisparityFloat32)
}else{
convertedDepth = depthData
}
let pixelBuffer = convertedDepth.depthDataMap
pixelBuffer.clamp()
let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
DispatchQueue.main.async {
[weak self] in self?.depthMap = depthMap
}
}
}
Actual result preview on screen in real-time the different CIImage selected on the UI (image or depthMap)
I have implemented the preview camera using AVFoundation, its working fine. But I have a hard time to switch the camera back and front. I have added a switch button at the bottom bar. By default, its the back camera, I want to switch it to front. How can I do that?
class FifteenSecsViewController: UIViewController, AVCaptureFileOutputRecordingDelegate {
#IBOutlet weak var camPreview: UIView!
let captureSession = AVCaptureSession()
let movieOutput = AVCaptureMovieFileOutput()
var previewLayer: AVCaptureVideoPreviewLayer!
var activeInput: AVCaptureDeviceInput!
var outputURL: URL!
override func viewDidLoad() {
super.viewDidLoad()
if setupSession() {
setupPreview()
startSession()
}
self.switchCameraButton.addTarget(self, action: #selector(switchButtonTapped), for: .touchUpInside)
}
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera: AVCaptureDevice?
camera = AVCaptureDevice.default(for: .video)
do {
let input = try AVCaptureDeviceInput(device: camera!)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: .audio)
do {
let micInput = try AVCaptureDeviceInput(device: microphone!)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
let seconds : Int64 = 3
let maxDuration = CMTime(seconds: Double(seconds),
preferredTimescale: 1)
movieOutput.maxRecordedDuration = maxDuration
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session:
captureSession)
previewLayer.frame = camPreview.bounds
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
camPreview.layer.addSublayer(previewLayer)
}
//MARK:- Camera Session
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
#objc func switchButtonTapped(){
// what to write here??
}
}
Function switchButtonTapped is an actionTarget of UIButton. If I add this code in this button:
#objc func switchButtonTapped(){
if setupSession() {
setupPreview()
startSession()
}
}
Camerapreview screen shows a white screen and got stuck.
Try this code:
func switchCamera() {
session?.beginConfiguration()
let currentInput = session?.inputs.first as? AVCaptureDeviceInput
session?.removeInput(currentInput!)
let newCameraDevice = currentInput?.device.position == .back ? getCamera(with: .front) : getCamera(with: .back)
let newVideoInput = try? AVCaptureDeviceInput(device: newCameraDevice!)
session?.addInput(newVideoInput!)
session?.commitConfiguration()
}
func getCamera(with position: AVCaptureDevice.Position) -> AVCaptureDevice? {
guard let devices = AVCaptureDevice.devices(for: AVMediaType.video) as? [AVCaptureDevice] else {
return nil
}
return devices.filter {
$0.position == position
}.first
}
To begin create a device input for the front camera:
let frontDevice: AVCaptureDevice? = {
for device in AVCaptureDevice.devices(for: AVMediaType.video) {
if device.position == .front {
return device
}
}
return nil
}()
lazy var frontDeviceInput: AVCaptureDeviceInput? = {
if let _frontDevice = self.frontDevice {
return try? AVCaptureDeviceInput(device: _frontDevice)
}
return nil
}()
Then in your switchButtonTapped, if there is a front camera you can do the switch between the front and the ones:
func switchButtonTapped() {
if let _frontDeviceInput = frontDeviceInput {
captureSession.beginConfiguration()
if let _currentInput = captureSession.inputs.first as? AVCaptureDeviceInput {
captureSession.removeInput(_currentInput)
let newDeviceInput = (_currentInput.device.position == .front) ? activeInput : _frontDeviceInput
captureSession.addInput(newDeviceInput!)
}
captureSession.commitConfiguration()
}
}
If you need more details, don't hesitate.
I have an app setup to use the camera for a photo (on a timer basis) to detect the presence of a face. The detection process works fairly well when I feed the app a photo that I have added to assets. However, when I attempt to use the output of the camera directly or even after saving the image to a file, the resulting image is so dark that the face recognition is completely unreliable.
If I display the image as seen by the camera, it looks correct. I captured the following two images - one from the camera as seen live, the other of the same view after the image was created from AVCapturePhotoOutput. The same darkness happens if I simply display the captured image in an image view.
Note the comment: "I put the breakpoint here and took a screen shot". Then I took the second screen shot when the code completed. These were taken in HIGH light.
Here's the basic code:
class CRSFaceRecognitionViewController: UIViewController, UIImagePickerControllerDelegate {
var sentBy : String?
//timers
var faceTimer : Timer?
var frvcTimer : Timer?
//capture
var captureSession = AVCaptureSession()
var settings = AVCapturePhotoSettings()
var backCamera : AVCaptureDevice?
var frontCamera : AVCaptureDevice?
var currentCamera : AVCaptureDevice?
var photoOutput : AVCapturePhotoOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
var image : UIImage?
var outputImage : UIImage?
#IBOutlet weak var imageView: UIImageView!
//MARK: - Setup
override func viewDidLoad() {
super.viewDidLoad()
}//viewDidLoad
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(true)
}//viewWillAppear
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(true)
//check for camera
if (UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)) {
setupCaptureSession()
setupDevices()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
} else {
print("Camera not present")
}
}//viewDidAppear
//MARK: - Video
#objc func showFaceRecognitionViewController() {
//all this does is present the image in a new ViewController imageView
performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
}//showThePhotoView
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession
func setupDevices() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}//if else
}//for in
currentCamera = frontCamera
}//setupDevices
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
print("in photoOutput completion handler")
})
captureSession.addOutput(photoOutput!)
} catch {
print("Error creating AVCaptureDeviceInput:", error)
}//do catch
}//setupInputOutput
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session : captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}//setupPreviewLayer
func startRunningCaptureSession() {
captureSession.startRunning()
}//startRunningCaptureSession
//MARK: - Segue
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showSavedCameraPhoto" {
let controller = segue.destination as! JustToSeeThePhotoViewController
controller.inImage = outputImage
}//if segue
}//prepare
//MARK: - Look for Faces
func findTheFaces() {
let myView : UIView = self.view
guard let outImage = outputImage else {return}
let imageView = UIImageView(image: outImage)
imageView.contentMode = .scaleAspectFit
let scaledHeight = myView.frame.width / outImage.size.width * outImage.size.height
imageView.frame = CGRect(x: 0, y: 0, width: myView.frame.width, height: myView.frame.height)
imageView.backgroundColor = UIColor.blue
myView.addSubview(imageView)
let request = VNDetectFaceRectanglesRequest { (req, err) in
if let err = err {
print("VNDetectFaceRectanglesRequest failed to run:", err)
return
}//if let err
print(req.results ?? "req.results is empty")
req.results?.forEach({ (res) in
DispatchQueue.main.async {
guard let faceObservation = res as? VNFaceObservation else {return}
let x = myView.frame.width * faceObservation.boundingBox.origin.x
let width = myView.frame.width * faceObservation.boundingBox.width
let height = scaledHeight * faceObservation.boundingBox.height
let y = scaledHeight * (1 - faceObservation.boundingBox.origin.y) - height
let redView = UIView()
redView.backgroundColor = .red
redView.alpha = 0.4
redView.frame = CGRect(x: x, y: y, width: width, height: height)
myView.addSubview(redView)
print("faceObservation bounding box:")
print(faceObservation.boundingBox)
//if you get here, then you have a face bounding box
}//main
})//forEach block
}//let request
guard let cgImage = outImage.cgImage else {return}
DispatchQueue.global(qos: .utility).async {
let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
do {
try handler.perform([request])
print("handler request was successful")
self.performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
} catch let reqErr {
print("Failed to perform request:", reqErr)
}
}//DispatchQueue
}//findTheFaces
//MARK: - Memory
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}//didReceiveMemoryWarning
}//class
extension CRSFaceRecognitionViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
print(imageData)
outputImage = UIImage(data : imageData)
//
//I put breakpoint here and took a screen shot
//
if let outImage = outputImage?.updateImageOrientionUpSide() {
self.outputImage = outImage
}
DispatchQueue.main.async {
self.findTheFaces()
}
}//if let imageData
}//photoOutput
}//extension
extension UIImage {
//you need to do this to ensure that the image is in portrait mode
//the face recognition method will not work if the face is horizontal
func updateImageOrientionUpSide() -> UIImage? {
if self.imageOrientation == .up {
return self
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
if let normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext() {
UIGraphicsEndImageContext()
return normalizedImage
}
UIGraphicsEndImageContext()
return nil
}//updateImageOrientionUpSide
}//image
I must be doing something wrong with the camera capture. Any help would be appreciated. Swift 4, iOS 11.2.5, Xcode 9.2
I would try adding a delay between startRunningCaptureSession() and photoOutput?.capturePhoto(with:settings, delegate: self)
For example,
DispatchQueue.main.asyncAfter(deadline: .now() + .seconds(4), execute: {
// take a photo
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
})
It appears as though I have too many async pieces. I broke the code into separate functions for each major piece - async or not and put them all into a DispatchGroup. That seems to have solved the issue.
In my code, Xcode say AVCaptureStillImageOutput is deprecated but I don't know how to change by AVCapturePhotoOutput.
I know that the var output is the problem and the one who I had to replace.
Thanks for your help.
class CameraView: UIViewController,UIImagePickerControllerDelegate{
var session: AVCaptureSession?
var input: AVCaptureDeviceInput?
var output: AVCaptureStillImageOutput?
var previewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
//Initialize session an output variables this is necessary
session = AVCaptureSession()
session?.sessionPreset = AVCaptureSession.Preset.photo;
output = AVCaptureStillImageOutput()
let camera = getDevice(position: .back)
do {
input = try AVCaptureDeviceInput(device: camera!)
} catch let error as NSError {
print(error)
input = nil
}
if(session?.canAddInput(input!) == true){
session?.addInput(input!)
output?.outputSettings = [AVVideoCodecKey : AVVideoCodecType.jpeg]
if(session?.canAddOutput(output!) == true){
session?.addOutput(output!)
previewLayer = AVCaptureVideoPreviewLayer(session: session!)
previewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
previewLayer?.frame = self.view.bounds
previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill;
cameraView.layer.addSublayer(previewLayer!)
session?.startRunning()
}
}
//Select library photo
}
//Get the device (Front or Back)
func getDevice(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let devices: NSArray = AVCaptureDevice.devices() as NSArray;
for de in devices {
let deviceConverted = de as! AVCaptureDevice
if(deviceConverted.position == position){
return deviceConverted
}
}
return nil
}
#IBOutlet weak var prendreBouton: UIButton!
#IBOutlet weak var previewImage: UIImageView!
#IBAction func takeAPicture(_ sender: Any) {
if let videoConnection = output?.connection(with:AVMediaType.video){
output?.captureStillImageAsynchronously(from:videoConnection, completionHandler: {
(sampleBuffer, error) in
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer!)
let dataProvider = CGDataProvider.init(data: imageData! as CFData)
let cgImageRef = CGImage.init(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
let image = UIImage.init(cgImage: cgImageRef!, scale: 1.0, orientation: .right)
// do something with image
})
}
}}
So, lately I have been trying to implement the function of switching the camera view from back to front camera in Swift 3. However, with no luck.
Currently, my default view is from the back camera - I can take pictures with it and then retake. But can anyone help me and show how do I either double tap the screen to switch cameras or simply use the assigned button to switch them? Thank you!
import UIKit
import AVFoundation
import FirebaseDatabase
class CameraView: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
override var prefersStatusBarHidden: Bool {
return true
}
var captureSession : AVCaptureSession!
var stillImageOutput : AVCaptureStillImageOutput!
var previewLayer : AVCaptureVideoPreviewLayer!
#IBOutlet var cameraView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
previewLayer?.frame = cameraView.bounds
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPreset1920x1080
var backCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
var error : NSError?
do {
var input = try! AVCaptureDeviceInput(device: backCamera)
if (error == nil && captureSession?.canAddInput(input) != nil) {
captureSession?.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if (captureSession?.canAddOutput(stillImageOutput) != nil) {
captureSession?.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer (session: captureSession)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer)
captureSession?.startRunning() }
}
} catch {
}
}
#IBOutlet var tempImageView: UIImageView!
#IBAction func didPressTakePhoto(_ sender: UIButton) {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
videoConnection.videoOrientation = AVCaptureVideoOrientation.portrait
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: {
(sampleBuffer, error) in
if sampleBuffer != nil {
var imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
var dataProvider = CGDataProvider.init(data: imageData as! CFData)
var cgImageRef = CGImage.init(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
var image = UIImage (cgImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.right)
self.tempImageView.image = image
self.tempImageView.isHidden = false
}
})
}
}
var didTakePhoto = Bool()
#IBAction func didPressTakeAnother(_ sender: UIButton) {
if didTakePhoto == true {
tempImageView.isHidden = true
didTakePhoto = false
} else {
captureSession?.startRunning()
didTakePhoto = true
}
}
}
don't see here any problems - here is working solution:
import Foundation
import UIKit
import AVFoundation
class MainViewController: UIViewController {
var tempImage: UIImageView?
var captureSession: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var currentCaptureDevice: AVCaptureDevice?
var usingFrontCamera = false
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
loadCamera()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
videoPreviewLayer!.frame = self.cameraPreviewSurface.bounds
}
#IBAction func switchButtonAction(_ sender: Any) {
usingFrontCamera = !usingFrontCamera
loadCamera()
}
func getFrontCamera() -> AVCaptureDevice?{
let videoDevices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo)
for device in videoDevices!{
let device = device as! AVCaptureDevice
if device.position == AVCaptureDevicePosition.front {
return device
}
}
return nil
}
func getBackCamera() -> AVCaptureDevice{
return AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
}
func loadCamera() {
if(captureSession == nil){
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetPhoto
}
var error: NSError?
var input: AVCaptureDeviceInput!
currentCaptureDevice = (usingFrontCamera ? getFrontCamera() : getBackCamera())
do {
input = try AVCaptureDeviceInput(device: currentCaptureDevice)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
for i : AVCaptureDeviceInput in (self.captureSession?.inputs as! [AVCaptureDeviceInput]){
self.captureSession?.removeInput(i)
}
if error == nil && captureSession!.canAddInput(input) {
captureSession!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession!.canAddOutput(stillImageOutput) {
captureSession!.addOutput(stillImageOutput)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
//self.cameraPreviewSurface.layer.sublayers?.forEach { $0.removeFromSuperlayer() }
self.cameraPreviewSurface.layer.addSublayer(videoPreviewLayer!)
DispatchQueue.main.async {
self.captureSession!.startRunning()
}
}
}
}
}
some notes:
cameraPreviewSurface - this is your UIView where camera will show
don't reassign the session, don't just add input, but before add new - remove existing ones,
p.s. code done with swift 3.0.1 / xcode 8.1
Cheers )
Xcode Version : Version 10.1 (10B61)
Swift Version : Swift 4.2
Change AVCaptureSession Capture Source
Refers from Stepan Maksymov Solution
we can simplified by replacing captureSession.inputs
First Create an IBAction Outlet and Connect ViewController to Change Camera View
#IBAction private func changeCamera(_ cameraButton: UIButton) {
usingFrontCamera = !usingFrontCamera
do{
captureSession.removeInput(captureSession.inputs.first!)
if(usingFrontCamera){
captureDevice = getFrontCamera()
}else{
captureDevice = getBackCamera()
}
let captureDeviceInput1 = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput1)
}catch{
print(error.localizedDescription)
}
}
Second step Copy simplified AVCaptureDevice Setting || refer Stepan Maksymov
func getFrontCamera() -> AVCaptureDevice?{
return AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices.first
return nil
}
func getBackCamera() -> AVCaptureDevice?{
return AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices.first
return nil
}
You can replace #IBAction with function like so.
func changeCamera(){
usingFrontCamera = !usingFrontCamera
do{
captureSession.removeInput(captureSession.inputs.first!)
if(usingFrontCamera){
captureDevice = getFrontCamera()
}else{
captureDevice = getBackCamera()
}
let captureDeviceInput1 = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput1)
}catch{
print(error.localizedDescription)
}
}
In addition to Stepan Maksymov post, I sugest to add this function
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
And call it instead of his post lines:
for i : AVCaptureDeviceInput in (self.captureSession?.inputs as! [AVCaptureDeviceInput]){
self.captureSession?.removeInput(i)
}
This way the cameras will change quicker.