I am working on camera application for iOS using AVFoundation framework. It has 2 options for capturing, either in RAW (DNG) or depth mode. It works fine except the scenario when I first make raw image and then the depth one. After that preview layout freezes (picture is being saved to the gallery anyway though) and when I play around with the toggle which is responsible for switching to depth mode XCode's console shows that captureSessionIsMissing is thrown. It only happens when a picture is taken in this sequence, so just switching between modes doesn't give such effect.
What I've figured out as well is that if cameraController.switchCameraDevice(to: .rearDual) inside toggleDepthCapture() function is changed to .rearWide it works fine, but I need to work with a dual camera in this case.
EDIT: code stops executing after if let depthData = photo.depthData (depthData is nil) in
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?)
Testing on iPhone 8 Plus and iPhone X
XCode Version 10.0 beta 6
Deployment target 11.4
ViewController.swift
#IBAction func toggleRawCapture(_ sender: UISwitch) {
//raw capture is only allowed in rear camera mode
if let position = cameraController.currentCameraPosition,
position == .rear,
sender.isOn, cameraController.rawCaptureMode == false {
//if depth mode is on, first disable it
if toggleDepthCaptureSwitch.isOn {
toggleDepthCaptureSwitch.setOn(false, animated: true)
}
do {
try cameraController.switchCameraDevice(to: .rearWide)
}catch(let error) {print(error)}
cameraController.rawCaptureMode = true
cameraController.depthMode = false
}else {
toggleRawCaptureSwitch.setOn(false, animated: true)
cameraController.rawCaptureMode = false
}
}
#IBAction func toggleDepthCapture(_ sender: UISwitch) {
if sender.isOn, cameraController.depthMode == false {
//if raw mode is on, first disable it
if toggleRawCaptureSwitch.isOn {
toggleRawCaptureSwitch.setOn(false, animated: true)
}
//check the position of the camera (rear or front)
if let position = cameraController.currentCameraPosition {
if position == .rear {
do {
// Allow rear depth capturing on iPhone 7 Plus, 8 Plus and X models only
switch UIDevice().modelName {
//
case "iPhone 7 Plus", "iPhone 8 Plus", "iPhone X": try cameraController.switchCameraDevice(to: .rearDual)
default:
//try cameraController.switchCameraDevice(to: .rearWide)
let alert = UIAlertController(title: "Warning!", message: "Operation not available (only on iPhone 7 Plus, 8 Plus and X)", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "Got it", style: .default, handler: nil))
self.present(alert, animated: true, completion: nil)
}
} catch(let error) {print("Rear error: \(error)")}
} else{
do {
// Allow front depth capturing on iPhone X only
if case UIDevice().modelName = "iPhone X"
{
try cameraController.switchCameraDevice(to: .frontTrueDepth)
} else {
let alert = UIAlertController(title: "Warning!", message: "Operation not available (only on iPhone X)", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "Got it", style: .default, handler: nil))
self.present(alert, animated: true, completion: nil)}
}catch(let error) {print("Front error: \(error)")}
}
}
cameraController.depthMode = true
cameraController.rawCaptureMode = false
}else {
//check the position of camera (rear or front)
if let position = cameraController.currentCameraPosition {
if position == .rear {
do {
try cameraController.switchCameraDevice(to: .rearWide)
}catch(let error) {print(error)}
} else{
do {
try cameraController.switchCameraDevice(to: .frontWide)
}catch(let error) {print(error)}
}
}
cameraController.depthMode = false
}
}
CameraController.swift
func switchCameraDevice(to cameraDevice: CameraDevice) throws {
guard let currentCameraDevice = currentCameraDevice, let captureSession = self.captureSession, captureSession.isRunning else {
throw CameraControllerError.captureSessionIsMissing
}
captureSession.beginConfiguration()
func switchToRearDualCamera() throws {
guard let rearCameraInput = self.rearCameraInput, captureSession.inputs.contains(rearCameraInput),
let rearDualCamera = self.rearDualCamera else { throw CameraControllerError.invalidOperation}
self.rearCameraInput = try AVCaptureDeviceInput(device: rearDualCamera)
captureSession.removeInput(rearCameraInput)
if captureSession.canAddInput(self.rearCameraInput!) {
captureSession.addInput(self.rearCameraInput!)
self.currentCameraDevice = .rearDual
self.photoOutput?.isDepthDataDeliveryEnabled = true
}else { throw CameraControllerError.invalidOperation}
}
func switchToRearWideCamera() throws {
guard let rearCameraInput = self.rearCameraInput, captureSession.inputs.contains(rearCameraInput),
let rearWideCamera = self.rearCamera else { throw CameraControllerError.invalidOperation}
self.rearCameraInput = try AVCaptureDeviceInput(device: rearWideCamera)
captureSession.removeInput(rearCameraInput)
if captureSession.canAddInput(self.rearCameraInput!) {
captureSession.addInput(self.rearCameraInput!)
self.currentCameraDevice = .rearWide
self.photoOutput?.isDepthDataDeliveryEnabled = false
}else { throw CameraControllerError.invalidOperation}
}
func switchToFrontTrueDepthCamera() throws {
guard let frontCameraInput = self.frontCameraInput, captureSession.inputs.contains(frontCameraInput),
let trueDepthCamera = self.frontTrueDepthCamera else { throw CameraControllerError.invalidOperation}
self.frontCameraInput = try AVCaptureDeviceInput(device: trueDepthCamera)
captureSession.removeInput(frontCameraInput)
if captureSession.canAddInput(self.frontCameraInput!) {
captureSession.addInput(self.frontCameraInput!)
self.currentCameraDevice = .frontTrueDepth
self.photoOutput?.isDepthDataDeliveryEnabled = true
}else { throw CameraControllerError.invalidOperation}
}
func switchToFrontWideCamera() throws {
guard let frontCameraInput = self.frontCameraInput, captureSession.inputs.contains(frontCameraInput),
let frontWideCamera = self.frontCamera else { throw CameraControllerError.invalidOperation}
self.frontCameraInput = try AVCaptureDeviceInput(device: frontWideCamera)
captureSession.removeInput(frontCameraInput)
if captureSession.canAddInput(self.frontCameraInput!) {
captureSession.addInput(self.frontCameraInput!)
self.currentCameraDevice = .frontWide
self.photoOutput?.isDepthDataDeliveryEnabled = false
} else { throw CameraControllerError.invalidOperation}
}
//todo: complete implementation
func switchToRearTelephotoCamera() throws {
}
switch cameraDevice {
case .rearWide:
try switchToRearWideCamera()
case .rearDual:
try switchToRearDualCamera()
case .frontWide:
try switchToFrontWideCamera()
case .frontTrueDepth:
try switchToFrontTrueDepthCamera()
case .rearTelephoto:
try switchToRearTelephotoCamera()
}
captureSession.commitConfiguration()
}
func captureImage(completion: #escaping (UIImage?, Error?) -> Void) {
guard let captureSession = captureSession, captureSession.isRunning else {
completion(nil, CameraControllerError.captureSessionIsMissing);
return
}
var photoSettings: AVCapturePhotoSettings
if let availableRawFormat = self.photoOutput?.availableRawPhotoPixelFormatTypes.first, self.rawCaptureMode{
photoSettings = AVCapturePhotoSettings(rawPixelFormatType: availableRawFormat,
processedFormat: [AVVideoCodecKey : AVVideoCodecType.jpeg])
// RAW capture is incompatible with digital image stabilization.
photoSettings.isAutoStillImageStabilizationEnabled = false
}
// else if self.photoOutput?.availablePhotoCodecTypes.contains(AVVideoCodecType.hevc) != nil {
// photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
// }
else{
photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
}
photoSettings.flashMode = self.flashMode
if let depthEnabled = self.photoOutput?.isDepthDataDeliverySupported, self.depthMode {
photoSettings.isDepthDataDeliveryEnabled = depthEnabled
photoSettings.embedsDepthDataInPhoto = true
photoSettings.isDepthDataFiltered = true
}
self.photoOutput?.capturePhoto(with: photoSettings, delegate: self)
self.photoCaptureCompletionBlock = completion
}
}
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?) {
if let error = error {
self.photoCaptureCompletionBlock?(nil, error)
}else if photo.isRawPhoto{
// Save the RAW (DNG) file data to a URL.
rawImageFileURL = self.makeUniqueTempFileURL(extension: "dng")
do {
try photo.fileDataRepresentation()!.write(to: rawImageFileURL!)
} catch {
fatalError("couldn't write DNG file to URL")
}
}else if let imageData = photo.fileDataRepresentation(){
self.compressedFileData = imageData
if self.depthMode{
if let depthData = photo.depthData{
saveDepthData(depth: depthData)
// Create a depthmap image
let context = CIContext()
let depthDataMap = depthData.converting(toDepthDataType: kCVPixelFormatType_DepthFloat32).depthDataMap
let ciImage = CIImage(cvPixelBuffer: depthDataMap)
let cgImage = context.createCGImage(ciImage, from: ciImage.extent)!
let imageOrientation: UIImageOrientation
switch currentOrientation {
case .portrait: imageOrientation = .right
case .portraitUpsideDown: imageOrientation = .left
case .landscapeLeft: imageOrientation = .down
default: imageOrientation = .up
}
let uiImage = UIImage(cgImage: cgImage, scale: 1.0, orientation: imageOrientation)
self.photoCaptureCompletionBlock?(uiImage, nil)
}
}
}else{
self.photoCaptureCompletionBlock?(nil, CameraControllerError.unknown)
}
}
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishCaptureFor resolvedSettings: AVCaptureResolvedPhotoSettings,
error: Error?) {
if let error = error {
print("Error capturing photo: \(error)");
}
guard let compressedData = self.compressedFileData else {return}
PHPhotoLibrary.shared().performChanges({
// Add the compressed (JPEG/HEIF) data as the main resource for the Photos asset.
let creationRequest = PHAssetCreationRequest.forAsset()
creationRequest.addResource(with: .photo, data: compressedData, options: nil)
if self.rawCaptureMode{
// Add the RAW (DNG) file as an altenate resource.
let options = PHAssetResourceCreationOptions()
options.shouldMoveFile = true
creationRequest.addResource(with: .alternatePhoto, fileURL: self.rawImageFileURL!, options: options)
}
}, completionHandler:{(_, error) in
if let error = error {
print("Error occurred while saving photo to photo library: \(error)")
}
})
}
enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
Related
class CaptureController: NSObject, AVCapturePhotoCaptureDelegate, AVCaptureVideoDataOutputSampleBufferDelegate {
static let shared = CaptureController()
weak var delegate: CaptureControllerDelegate?
private let session = AVCaptureSession()
private var cameraInput: AVCaptureDeviceInput?
private let photoOutput = AVCapturePhotoOutput()
private let previewVideoOutput = AVCaptureVideoDataOutput()
private let previewRectangleDetectionController = RectangleDetectionController()
var flashMode = AVCaptureDevice.FlashMode.off
override init() {
super.init()
previewVideoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "video output buffer queue", qos: .userInteractive))
}
func startCaptureSession() {
self.session.sessionPreset = AVCaptureSession.Preset.photo
guard let backCameraDevice = AVCaptureDevice.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera, for: AVMediaType.video, position: .back) else {
assertionFailure("Could not find a back facing camera")
return
}
do {
try cameraInput = AVCaptureDeviceInput(device: backCameraDevice)
guard let cameraInput = cameraInput else { return }
if self.session.canAddInput(cameraInput) {
self.session.addInput(cameraInput)
}
} catch {
print("AVCaptureDeviceInput error: \(error)")
}
if self.session.canAddOutput(self.photoOutput) {
if UIDevice.supportsJPGCaptureFormatOnly() {
self.photoOutput.isHighResolutionCaptureEnabled = false
self.photoOutput.setPreparedPhotoSettingsArray(
[AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil
)
}
self.session.addOutput(self.photoOutput)
}
if self.session.canAddOutput(self.previewVideoOutput) {
self.session.addOutput(self.previewVideoOutput)
if let connection = self.previewVideoOutput.connection(with: .video) {
connection.videoOrientation = .portrait
}
}
self.session.startRunning()
}
func endCaptureSession() {
self.session.outputs.forEach { output in
self.session.removeOutput(output)
}
self.session.inputs.forEach { input in
self.session.removeInput(input)
}
self.session.stopRunning()
}
func takePhoto() {
if UIDevice.supportsJPGCaptureFormatOnly() {
let settings = AVCapturePhotoSettings()
if UIImagePickerController.isFlashAvailable(for: .rear) {
settings.flashMode = flashMode
}
self.photoOutput.capturePhoto(with: settings, delegate: self)
} else {
guard let rawFormatFileType = self.photoOutput.availableRawPhotoFileTypes.first,
let rawFormatPixelType = self.photoOutput.supportedRawPhotoPixelFormatTypes(for: rawFormatFileType).first
else {
print("This device does not support RAW image capture format.")
return
}
guard let processFormat = self.photoOutput.availablePhotoCodecTypes.first else {
print("No supported codecs")
return
}
let settings = AVCapturePhotoSettings(rawPixelFormatType: rawFormatPixelType.uint32Value,
processedFormat: [AVVideoCodecKey: processFormat])
settings.flashMode = flashMode
self.photoOutput.capturePhoto(with: settings, delegate: self)
}
}
// MARK: - AVCapturePhotoCaptureDelegate
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingRawPhoto rawSampleBuffer: CMSampleBuffer?,
previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?,
resolvedSettings: AVCaptureResolvedPhotoSettings,
bracketSettings: AVCaptureBracketedStillImageSettings?,
error: Error?) {
if let error = error {
print("Capture failed: \(error)")
}
}
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?) {
if let error = error {
print("Capture failed: \(error)")
self.delegate?.captureDidFail()
} else {
DispatchQueue.global(qos: .userInitiated).async {
guard let scannedImage = self.processImage(from: photo, withDetectedRectangle: self.previewRectangleDetectionController.bestRectangle) else {
return
}
self.delegate?.didCaptureImage(scannedImage)
}
}
}
private func processImage(from photo: AVCapturePhoto, withDetectedRectangle detectedRect: DetectedRectangle?) -> ScannedImage? {
guard let imageData = photo.fileDataRepresentation() else { return nil }
guard let image = CIImage(data: imageData) else { return nil }
// the image data always comes landscape oriented from the AVCapturePhoto, but the CIImage doesn't know.
// We have to flip it "right" here to match the expected portrait orientation ("up" would be the typical
// portrait orientation).
let targetOrientation: CGImagePropertyOrientation
if let orientationRawValue = photo.metadata[kCGImagePropertyOrientation as String] as? UInt32,
let photoOrientation = CGImagePropertyOrientation(rawValue: orientationRawValue) {
targetOrientation = photoOrientation
} else {
targetOrientation = .right
}
let rotatedImage = image.oriented(targetOrientation)
guard let flattenedImage = ImageManipulationController.shared.flatten(rotatedImage) else { return nil }
var editingCorners: Quadrilateral?
if let detectedRect = detectedRect {
// match scale
let scaleTransform = ResizingHelpers.transform(from: detectedRect.imageSize, to: flattenedImage.extent.size, destinationContentMode: .scaleToFill)
let scaledRect = detectedRect.rectangle.applying(scaleTransform)
// inset
let insetAmount: CGFloat = 0.97
let insetScale = CGAffineTransform(scaleX: insetAmount, y: insetAmount)
let insetScaledRect = scaledRect.applying(insetScale)
let distanceToCenter = scaledRect.center - insetScaledRect.center
let insetTranslation = CGAffineTransform(translationX: distanceToCenter.x, y: distanceToCenter.y)
let finalInsettingTransform = insetScale.concatenating(insetTranslation)
editingCorners = scaledRect.applying(finalInsettingTransform)
}
return ScannedImage(fullResolutionImage: flattenedImage, editingCorners: editingCorners)
}
// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ captureOutput: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let ciImage = CIImage(cvImageBuffer: imageBuffer)
self.previewRectangleDetectionController.processRectangles(in: ciImage)
let bestRect = self.previewRectangleDetectionController.bestRectangle
self.delegate?.didUpdatePreviewImage(ciImage, withDetectedRectangle: bestRect)
}
}
Above code are working fine in iOS 13 and below. But in iOS 14 and above producing duplicate Image. I tried in internet but no hope to find why it's coming duplicates.
Note: While pressing camera button , its appearing in two images
I am trying to save generated UIImage to Camera Roll.
I have necessary privacy string in Info.plist.
I tried couple of methods;
The error I am facing for PHPhotoLibrary is
Error Domain=com.apple.accounts Code=7 "(null)
The codes I have tried;
UIImageWriteToSavedPhotosAlbum(image!, nil, nil, nil);
The other one is;
PHPhotoLibrary.shared().savePhoto(image: image!, albumName: "AlbumName", completion: { (asset) in
if asset != nil
{
print("error")
}
})
func savePhoto(image:UIImage, albumName:String, completion:((PHAsset?)->())? = nil) {
func save() {
if let album = PHPhotoLibrary.shared().findAlbum(albumName: albumName) {
PHPhotoLibrary.shared().saveImage(image: image, album: album, completion: completion)
} else {
PHPhotoLibrary.shared().createAlbum(albumName: albumName, completion: { (collection) in
if let collection = collection {
PHPhotoLibrary.shared().saveImage(image: image, album: collection, completion: completion)
} else {
completion?(nil)
}
})
}
}
if PHPhotoLibrary.authorizationStatus() == .authorized {
save()
} else {
PHPhotoLibrary.requestAuthorization({ (status) in
if status == .authorized {
save()
}
})
}
}
// MARK: - Private
fileprivate func findAlbum(albumName: String) -> PHAssetCollection? {
let fetchOptions = PHFetchOptions()
fetchOptions.predicate = NSPredicate(format: "title = %#", albumName)
let fetchResult : PHFetchResult = PHAssetCollection.fetchAssetCollections(with: .album, subtype: .any, options: fetchOptions)
guard let photoAlbum = fetchResult.firstObject else {
return nil
}
return photoAlbum
}
fileprivate func createAlbum(albumName: String, completion: #escaping (PHAssetCollection?)->()) {
var albumPlaceholder: PHObjectPlaceholder?
PHPhotoLibrary.shared().performChanges({
let createAlbumRequest = PHAssetCollectionChangeRequest.creationRequestForAssetCollection(withTitle: albumName)
albumPlaceholder = createAlbumRequest.placeholderForCreatedAssetCollection
}, completionHandler: { success, error in
if success {
guard let placeholder = albumPlaceholder else {
completion(nil)
return
}
let fetchResult = PHAssetCollection.fetchAssetCollections(withLocalIdentifiers: [placeholder.localIdentifier], options: nil)
guard let album = fetchResult.firstObject else {
completion(nil)
return
}
completion(album)
} else {
completion(nil)
}
})
}
fileprivate func saveImage(image: UIImage, album: PHAssetCollection, completion:((PHAsset?)->())? = nil) {
var placeholder: PHObjectPlaceholder?
PHPhotoLibrary.shared().performChanges({
let createAssetRequest = PHAssetChangeRequest.creationRequestForAsset(from: image)
guard let albumChangeRequest = PHAssetCollectionChangeRequest(for: album),
let photoPlaceholder = createAssetRequest.placeholderForCreatedAsset else { return }
placeholder = photoPlaceholder
let fastEnumeration = NSArray(array: [photoPlaceholder] as [PHObjectPlaceholder])
albumChangeRequest.addAssets(fastEnumeration)
}, completionHandler: { success, error in
guard let placeholder = placeholder else {
completion?(nil)
return
}
if success {
let assets:PHFetchResult<PHAsset> = PHAsset.fetchAssets(withLocalIdentifiers: [placeholder.localIdentifier], options: nil)
let asset:PHAsset? = assets.firstObject
completion?(asset)
} else {
completion?(nil)
}
})
}
Could you please help me on figuring out this?
BR,
Erdem
I have figured it out.
The problem is I have been creating QR Image that is created with CIFilter is CIImage.
It returns error code = -1
You need to create UIImage from CGImage not CIImage
if let output = filter.outputImage?.transformed(by: transform) {
let context = CIContext()
guard let cgImage = context.createCGImage(output, from: output.extent) else { return nil }
return UIImage(cgImage: cgImage)
}
I am trying to record depth data from the TrueDepth camera along with a photo. But when calling
AVCapturePhotoOutput capturePhoto(withSettings,delegate)
I get an exception stating:
No active and enabled video connection
I configure the camera and outputs like so (basically following the guide from Apple about photo capturing and capturing depth):
func configurePhotoOutput() throws {
self.captureSession = AVCaptureSession()
guard self.captureSession != nil else {
return
}
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice.lockForConfiguration()
videoDevice.activeDepthDataFormat = depthFormat
videoDevice.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
self.captureSession!.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
self.captureSession!.addInput(videoDeviceInput)
// add video output
if self.captureSession!.canAddOutput(videoOutput) {
self.captureSession!.addOutput(videoOutput)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
} else { fatalError("Can't add video output.") }
// Set up photo output for depth data capture.
let photoOutput = AVCapturePhotoOutput()
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
guard self.captureSession!.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession!.addOutput(photoOutput)
self.captureSession!.sessionPreset = .photo
self.captureSession!.commitConfiguration()
self.captureSession!.startRunning()
}
And the code responsible for capturing the photo:
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled =
self.photoOutput.isDepthDataDeliverySupported
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate) // <---- error is being thrown on this call
self.photoCaptureCompletionBlock = completion
}
What I am I doing wrong in this configuration?
solved it with the following implementation:
Any comments / remarks are highly appreciated!
import AVFoundation
import UIKit
class CameraController: NSObject {
var captureSession: AVCaptureSession?
var videoDevice: AVCaptureDevice?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoOutput = AVCaptureVideoDataOutput()
var photoOutput = AVCapturePhotoOutput()
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
// Select a depth-capable capture device.
guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
videoDevice = vd
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice!.lockForConfiguration()
videoDevice!.activeDepthDataFormat = depthFormat
videoDevice!.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
}
func configureDeviceInputs() throws {
if( captureSession == nil) {
throw CameraControllerError.captureSessionIsMissing
}
captureSession?.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
captureSession!.addInput(videoDeviceInput)
captureSession?.commitConfiguration()
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
captureSession.beginConfiguration()
// Set up photo output for depth data capture.
photoOutput = AVCapturePhotoOutput()
photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)
guard captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
captureSession.addOutput(photoOutput)
// must be set after photoOutput is added to captureSession. Why???
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
captureSession.sessionPreset = .photo
captureSession.commitConfiguration()
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
self.photoCaptureCompletionBlock = completion
}
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraController {
public enum CameraPosition {
case front
case rear
}
enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
}
I use AVCamSwift to take pictures. But, when I take picture from internal camera, it save images as reversed vertically. The owner of the code is confirmed that this is a bug. These are codes that I use :
#IBAction func snapStillImage(sender: AnyObject) {
print("snapStillImage")
dispatch_async(self.sessionQueue, {
// Update the orientation on the still image output video connection before capturing.
let videoOrientation = (self.previewView.layer as! AVCaptureVideoPreviewLayer).connection.videoOrientation
self.stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo).videoOrientation = videoOrientation
// Flash set to Auto for Still Capture
takePhotoScreen.setFlashMode(AVCaptureFlashMode.Auto, device: self.videoDeviceInput!.device)
self.stillImageOutput!.captureStillImageAsynchronouslyFromConnection(self.stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo), completionHandler: {
(imageDataSampleBuffer: CMSampleBuffer!, error: NSError!) in
if error == nil {
let data:NSData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageDataSampleBuffer)
let image:UIImage = UIImage( data: data)!
let libaray:ALAssetsLibrary = ALAssetsLibrary()
let orientation: ALAssetOrientation = ALAssetOrientation(rawValue: image.imageOrientation.rawValue)!
libaray.writeImageToSavedPhotosAlbum(image.CGImage, orientation: orientation, completionBlock: nil)
print("save to album")
}else{
print("Did not capture still image")
print(error)
}
})
})
}
#IBAction func changeCamera(sender: AnyObject) {
print("change camera")
self.cameraButton.enabled = false
self.snapButton.enabled = false
dispatch_async(self.sessionQueue, {
let currentVideoDevice:AVCaptureDevice = self.videoDeviceInput!.device
let currentPosition: AVCaptureDevicePosition = currentVideoDevice.position
var preferredPosition: AVCaptureDevicePosition = AVCaptureDevicePosition.Unspecified
switch currentPosition{
case AVCaptureDevicePosition.Front:
preferredPosition = AVCaptureDevicePosition.Back
case AVCaptureDevicePosition.Back:
preferredPosition = AVCaptureDevicePosition.Front
case AVCaptureDevicePosition.Unspecified:
preferredPosition = AVCaptureDevicePosition.Back
}
let device:AVCaptureDevice = takePhotoScreen.deviceWithMediaType(AVMediaTypeVideo, preferringPosition: preferredPosition)
var videoDeviceInput: AVCaptureDeviceInput?
do {
videoDeviceInput = try AVCaptureDeviceInput(device: device)
} catch _ as NSError {
videoDeviceInput = nil
} catch {
fatalError()
}
self.session!.beginConfiguration()
self.session!.removeInput(self.videoDeviceInput)
if self.session!.canAddInput(videoDeviceInput){
NSNotificationCenter.defaultCenter().removeObserver(self, name:AVCaptureDeviceSubjectAreaDidChangeNotification, object:currentVideoDevice)
takePhotoScreen.setFlashMode(AVCaptureFlashMode.Auto, device: device)
NSNotificationCenter.defaultCenter().addObserver(self, selector: "subjectAreaDidChange:", name: AVCaptureDeviceSubjectAreaDidChangeNotification, object: device)
self.session!.addInput(videoDeviceInput)
self.videoDeviceInput = videoDeviceInput
}else{
self.session!.addInput(self.videoDeviceInput)
}
self.session!.commitConfiguration()
dispatch_async(dispatch_get_main_queue(), {
//
self.snapButton.enabled = true
self.cameraButton.enabled = true
})
})
}
I tried to find a solution with the following code but it failed in front cam. Do you have a recommendation to find a workaround ?
let reversedImage = UIImage(CGImage: pickedPhoto!.CGImage!, scale: 1.0, orientation: .LeftMirrored)
I solved the bug with following solution :
let currentVideoDevice:AVCaptureDevice = self.videoDeviceInput!.device
let currentPosition: AVCaptureDevicePosition = currentVideoDevice.position
if currentPosition == AVCaptureDevicePosition.Front
{let reversedImage = UIImage(CGImage: image.CGImage!, scale: 1.0, orientation: .LeftMirrored)
sp.pickedPhoto = reversedImage}
I'm creating a customView for the CameraView, which works fine however i'm now working on changing from the back camera to the front camera. i've at the moment done it by doing below. However this seem to create a bad user experience where it removes the previewLayer (the screen becomes white) and then show the front camera correctly. is there a way to create a better user experience by not making everything white in 1 sec before showing the new session?
switchCamera
func switchCamera() {
if usingbackCamera == true {
endSession()
beginSession(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
} else {
endSession()
beginSession(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
}
beginSession
func beginSession(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
self.previewLayer?.removeFromSuperlayer()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraView.layer.addSublayer(previewLayer!)
previewLayer?.frame = self.cameraView.bounds
captureSession.startRunning()
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
} catch let err as NSError {
print(err)
}
}
endSession
func endSession() {
self.previewLayer?.removeFromSuperlayer()
captureSession.stopRunning()
captureSession = AVCaptureSession()
}
Take Picture
func takePicture() {
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo){
videoConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {
(sampleBuffer, error) in
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProviderCreateWithCFData(imageData)
let cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)
let image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Right)
self.previewImageView.image = image
self.previewImageView.hidden = false
self.cameraView.bringSubviewToFront(self.previewImageView)
})
}
}
You don't need to stop the captureSession and start it again when switching from back to front camera and vice versa.
All you need to do is remove the old capture session inputs, add the new one and all that in between a begin/commit session configuration block.
Here is a rough example:
func switchCamera() {
//begin configuration changes
captureSession.beginConfiguration()
//remove the previous inputs
let inputs = captureSession.inputs as! [AVCaptureInput]
for oldInput:AVCaptureInput in inputs {
captureSession.removeInput(oldInput)
}
//add the new input
if usingbackCamera == true {
addInput(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
}
else {
addInput(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
//end the configuration
captureSession.commitConfiguration()
}
func addInput(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
} catch let err as NSError {
print(err)
}
}
I think you don't have to remove the preview layer when changing the input device.
The layer is bond to the session, all you have to do is stop the session, remove the original input and add the new input then start the session again.
I make my capture view by custom rendering, but I thought the process will be the same.
The capture snippet:
for output in session.outputs {
if let capture = output as? AVCaptureStillImageOutput{
for connection in (capture.connections as! [AVCaptureConnection]){
for port in (connection.inputPorts as! [AVCaptureInputPort]){
if port.mediaType == AVMediaTypeVideo{
capture.captureStillImageAsynchronouslyFromConnection(connection, completionHandler: {(buffer, err) -> Void in
if err != nil{
print(err)
}
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer)
guard let image = CIImage(data: imageData) else{
completion(nil)
return
}
let rotatedImage = image.imageByApplyingTransform(CGAffineTransformMakeRotation(-CGFloat(M_PI_2)))
})
}
}
}
}
}