AVCapturePhoto SemanticSegmentationMatte nil without audio input? - ios

When I add audio input to capture session, photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) callback returns semantic segmentation mattes properly. Without audio input, returned mattes are nil. Is it possible to avoid adding audio input and requesting user to give permission for microphone in order to get mattes?
// MARK: - Session
private func setupSession() {
captureSession = AVCaptureSession()
captureSession?.sessionPreset = .photo
setupInputOutput()
setupPreviewLayer(view)
captureSession?.startRunning()
}
// MARK: - Settings
private func setupCamera() {
settings = AVCapturePhotoSettings()
let supportsHEVC = AVAssetExportSession.allExportPresets().contains(AVAssetExportPresetHEVCHighestQuality)
settings = supportsHEVC ? AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc]) : AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
settings!.flashMode = .auto
settings!.isHighResolutionPhotoEnabled = true
settings!.previewPhotoFormat = [kCVPixelBufferPixelFormatTypeKey as String: settings!.__availablePreviewPhotoPixelFormatTypes.first ?? NSNumber()]
settings!.isDepthDataDeliveryEnabled = true
settings!.isPortraitEffectsMatteDeliveryEnabled = true
if self.photoOutput?.enabledSemanticSegmentationMatteTypes.isEmpty == false {
settings!.enabledSemanticSegmentationMatteTypes = self.photoOutput?.enabledSemanticSegmentationMatteTypes ?? [AVSemanticSegmentationMatte.MatteType]()
}
settings!.photoQualityPrioritization = self.photoQualityPrioritizationMode
}
private func setupInputOutput() {
photoOutput = AVCapturePhotoOutput()
guard let captureSession = captureSession else { return }
guard let photoOutput = photoOutput else { return }
do {
captureSession.beginConfiguration()
captureSession.sessionPreset = .photo
let devices = self.videoDeviceDiscoverySession.devices
currentDevice = devices.first(where: { $0.position == .front && $0.deviceType == .builtInTrueDepthCamera })
guard let videoDevice = currentDevice else {
captureSession.commitConfiguration()
return
}
videoDeviceInput = try AVCaptureDeviceInput(device: videoDevice)
if captureSession.canAddInput(videoDeviceInput) {
captureSession.addInput(videoDeviceInput)
} else {
captureSession.commitConfiguration()
return
}
currentDevice = AVCaptureDevice.default(for: .audio)
captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!)
if captureSession.canAddInput(captureDeviceInput) {
captureSession.addInput(captureDeviceInput)
} else {
captureSession.commitConfiguration()
return
}
} catch {
errorMessage = error.localizedDescription
print(error.localizedDescription)
captureSession.commitConfiguration()
return
}
if captureSession.canAddOutput(photoOutput) {
captureSession.addOutput(photoOutput)
photoOutput.isHighResolutionCaptureEnabled = true
photoOutput.isLivePhotoCaptureEnabled = photoOutput.isLivePhotoCaptureSupported
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
photoOutput.isPortraitEffectsMatteDeliveryEnabled = photoOutput.isPortraitEffectsMatteDeliverySupported
photoOutput.enabledSemanticSegmentationMatteTypes = photoOutput.availableSemanticSegmentationMatteTypes
photoOutput.maxPhotoQualityPrioritization = .balanced
}
captureSession.commitConfiguration()
}
private func setupPreviewLayer(_ view: UIView) {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession ?? AVCaptureSession())
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(self.cameraPreviewLayer ?? AVCaptureVideoPreviewLayer(), at: 0)
}

I was not able to return semantic segmentation mattes (SSM) at all with/without setting up audio input. I am currently developing on an iPhone X. After struggling for some time, I asked Apple the question on a 1-1 lab session during WWDC2021. I was told that the API would only make portrait effect matte visible to my device. iPhone 11 and above would be able to get skin, teeth and hair. The new glasses ssm that they snuck in recently without announcing requires iPhone 12.

Related

How to switch camera using AVFoundation

I have implemented the preview camera using AVFoundation, its working fine. But I have a hard time to switch the camera back and front. I have added a switch button at the bottom bar. By default, its the back camera, I want to switch it to front. How can I do that?
class FifteenSecsViewController: UIViewController, AVCaptureFileOutputRecordingDelegate {
#IBOutlet weak var camPreview: UIView!
let captureSession = AVCaptureSession()
let movieOutput = AVCaptureMovieFileOutput()
var previewLayer: AVCaptureVideoPreviewLayer!
var activeInput: AVCaptureDeviceInput!
var outputURL: URL!
override func viewDidLoad() {
super.viewDidLoad()
if setupSession() {
setupPreview()
startSession()
}
self.switchCameraButton.addTarget(self, action: #selector(switchButtonTapped), for: .touchUpInside)
}
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera: AVCaptureDevice?
camera = AVCaptureDevice.default(for: .video)
do {
let input = try AVCaptureDeviceInput(device: camera!)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: .audio)
do {
let micInput = try AVCaptureDeviceInput(device: microphone!)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
let seconds : Int64 = 3
let maxDuration = CMTime(seconds: Double(seconds),
preferredTimescale: 1)
movieOutput.maxRecordedDuration = maxDuration
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session:
captureSession)
previewLayer.frame = camPreview.bounds
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
camPreview.layer.addSublayer(previewLayer)
}
//MARK:- Camera Session
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
#objc func switchButtonTapped(){
// what to write here??
}
}
Function switchButtonTapped is an actionTarget of UIButton. If I add this code in this button:
#objc func switchButtonTapped(){
if setupSession() {
setupPreview()
startSession()
}
}
Camerapreview screen shows a white screen and got stuck.
Try this code:
func switchCamera() {
session?.beginConfiguration()
let currentInput = session?.inputs.first as? AVCaptureDeviceInput
session?.removeInput(currentInput!)
let newCameraDevice = currentInput?.device.position == .back ? getCamera(with: .front) : getCamera(with: .back)
let newVideoInput = try? AVCaptureDeviceInput(device: newCameraDevice!)
session?.addInput(newVideoInput!)
session?.commitConfiguration()
}
func getCamera(with position: AVCaptureDevice.Position) -> AVCaptureDevice? {
guard let devices = AVCaptureDevice.devices(for: AVMediaType.video) as? [AVCaptureDevice] else {
return nil
}
return devices.filter {
$0.position == position
}.first
}
To begin create a device input for the front camera:
let frontDevice: AVCaptureDevice? = {
for device in AVCaptureDevice.devices(for: AVMediaType.video) {
if device.position == .front {
return device
}
}
return nil
}()
lazy var frontDeviceInput: AVCaptureDeviceInput? = {
if let _frontDevice = self.frontDevice {
return try? AVCaptureDeviceInput(device: _frontDevice)
}
return nil
}()
Then in your switchButtonTapped, if there is a front camera you can do the switch between the front and the ones:
func switchButtonTapped() {
if let _frontDeviceInput = frontDeviceInput {
captureSession.beginConfiguration()
if let _currentInput = captureSession.inputs.first as? AVCaptureDeviceInput {
captureSession.removeInput(_currentInput)
let newDeviceInput = (_currentInput.device.position == .front) ? activeInput : _frontDeviceInput
captureSession.addInput(newDeviceInput!)
}
captureSession.commitConfiguration()
}
}
If you need more details, don't hesitate.

AVFoundation Switching Cameras Slow (Connected To Sample Buffer)

override func viewDidLoad() {
super.viewDidLoad()
...
setupDevice()
setupInputOutput()
}
View did load starts the setup chain
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
setupCorrectFramerate(currentCamera: backCamera!)
}
else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
setupCorrectFramerate(currentCamera: frontCamera!)
}
}
currentCamera = cameraModeIsBack ? backCamera : frontCamera
}
var deviceInput: AVCaptureDeviceInput?
let videoOutput = AVCaptureVideoDataOutput()
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
deviceInput = captureDeviceInput
captureSession.sessionPreset = AVCaptureSession.Preset.hd1280x720
if captureSession.canAddInput(deviceInput!) {
captureSession.addInput(deviceInput!)
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sample buffer delegate", attributes: []))
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
if(firstTime){
setupMetal()
}
firstTime = false
toggleCamerButton.isEnabled = true
} catch {
print(error)
}
}
On a high level I find the device inputs, setup their framerate and setup the capture session, (the buffer output gets processed to a MTKView).
The issue is when I change cameras. It's either snappy and isntant or becomes very slow and freezes for 4-5 seconds before responding.
The logic I use to change the cameras:
func updateCameras(){
DispatchQueue.global().async {
self.captureSession.stopRunning()
self.captureSession.removeInput(self.deviceInput!)
self.currentCamera = self.cameraModeIsBack ? self.backCamera : self.frontCamera
self.changeCameras()
}
}
func changeCameras(){
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: self.currentCamera!)
self.deviceInput = captureDeviceInput
if self.captureSession.canAddInput(self.deviceInput!) {
self.captureSession.addInput(self.deviceInput!)
}
self.captureSession.startRunning()
DispatchQueue.main.async {
self.toggleCamerButton.isEnabled = true
}
} catch {
print(error)
}
}
I've revised it as much as I can. Switching cameras is essentially
Stopping the camera session
removing the input
adding the input
starting the session
I have no idea why on earth it's so variable (works perfectly most of the time on my iPhone X while on my iPhone 6 it's always slow).
I suspect the calls to manipulate the cameraSession should be done on the main thread. Try changing updateCameras() to use DispatchQueue.main.async() instead of DispatchQueue.global().async
Like this:
func updateCameras(){
DispatchQueue.main.async {
self.captureSession.stopRunning()
self.captureSession.removeInput(self.deviceInput!)
self.currentCamera = self.cameraModeIsBack ? self.backCamera : self.frontCamera
self.changeCameras()
}
}
With that change the call to DispatchQueue.main.async() in your changeCameras() function should no longer be needed

'No active and enabled video connection' error when capturing photo with TrueDepth cam

I am trying to record depth data from the TrueDepth camera along with a photo. But when calling
AVCapturePhotoOutput capturePhoto(withSettings,delegate)
I get an exception stating:
No active and enabled video connection
I configure the camera and outputs like so (basically following the guide from Apple about photo capturing and capturing depth):
func configurePhotoOutput() throws {
self.captureSession = AVCaptureSession()
guard self.captureSession != nil else {
return
}
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice.lockForConfiguration()
videoDevice.activeDepthDataFormat = depthFormat
videoDevice.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
self.captureSession!.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
self.captureSession!.addInput(videoDeviceInput)
// add video output
if self.captureSession!.canAddOutput(videoOutput) {
self.captureSession!.addOutput(videoOutput)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
} else { fatalError("Can't add video output.") }
// Set up photo output for depth data capture.
let photoOutput = AVCapturePhotoOutput()
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
guard self.captureSession!.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession!.addOutput(photoOutput)
self.captureSession!.sessionPreset = .photo
self.captureSession!.commitConfiguration()
self.captureSession!.startRunning()
}
And the code responsible for capturing the photo:
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled =
self.photoOutput.isDepthDataDeliverySupported
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate) // <---- error is being thrown on this call
self.photoCaptureCompletionBlock = completion
}
What I am I doing wrong in this configuration?
solved it with the following implementation:
Any comments / remarks are highly appreciated!
import AVFoundation
import UIKit
class CameraController: NSObject {
var captureSession: AVCaptureSession?
var videoDevice: AVCaptureDevice?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoOutput = AVCaptureVideoDataOutput()
var photoOutput = AVCapturePhotoOutput()
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
// Select a depth-capable capture device.
guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
videoDevice = vd
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice!.lockForConfiguration()
videoDevice!.activeDepthDataFormat = depthFormat
videoDevice!.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
}
func configureDeviceInputs() throws {
if( captureSession == nil) {
throw CameraControllerError.captureSessionIsMissing
}
captureSession?.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
captureSession!.addInput(videoDeviceInput)
captureSession?.commitConfiguration()
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
captureSession.beginConfiguration()
// Set up photo output for depth data capture.
photoOutput = AVCapturePhotoOutput()
photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)
guard captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
captureSession.addOutput(photoOutput)
// must be set after photoOutput is added to captureSession. Why???
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
captureSession.sessionPreset = .photo
captureSession.commitConfiguration()
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
self.photoCaptureCompletionBlock = completion
}
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraController {
public enum CameraPosition {
case front
case rear
}
enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
}

AVFoundation camera crashing while switching to front camera (refreshing camera)

I am a new bee to iOS, working on simple application with swift, In that I need a custom camera. I am using AVFoundation but the app is showing black screen for long time, then it's loading the camera. here is my code
func prepareCamera() {
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
if frontCam{
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .front).devices {
captureDevice = availableDevices.first
DispatchQueue(label: "prepare").async {
self.beginSession()
}
}
}else{
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back).devices {
captureDevice = availableDevices.first
beginSession()
}
}
}
#IBAction func switchCameraBtnClicked(_ sender: Any) {
frontCam = !frontCam
prepareCamera()
}
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
captureSession.removeInput(input)
}
}
if captureSession.inputs.isEmpty {
captureSession.addInput(captureDeviceInput)
}
}catch {
print(error.localizedDescription)
}
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
// self.view.layer.addSublayer(self.previewLayer)
self.view.layer.insertSublayer(self.previewLayer, at: 0)
self.previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.graymatics.customcamera")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
}
Please correct me if the code is not proper.
Finally found the solution
here is my code:
override func viewDidDisappear(_ animated: Bool) {
self.stopCaptureSession()
}
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
session needs to be stopped while moving on from the current view controller.

Begin new captureSession without removing previewLayer

I'm creating a customView for the CameraView, which works fine however i'm now working on changing from the back camera to the front camera. i've at the moment done it by doing below. However this seem to create a bad user experience where it removes the previewLayer (the screen becomes white) and then show the front camera correctly. is there a way to create a better user experience by not making everything white in 1 sec before showing the new session?
switchCamera
func switchCamera() {
if usingbackCamera == true {
endSession()
beginSession(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
} else {
endSession()
beginSession(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
}
beginSession
func beginSession(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
self.previewLayer?.removeFromSuperlayer()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraView.layer.addSublayer(previewLayer!)
previewLayer?.frame = self.cameraView.bounds
captureSession.startRunning()
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
} catch let err as NSError {
print(err)
}
}
endSession
func endSession() {
self.previewLayer?.removeFromSuperlayer()
captureSession.stopRunning()
captureSession = AVCaptureSession()
}
Take Picture
func takePicture() {
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo){
videoConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {
(sampleBuffer, error) in
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProviderCreateWithCFData(imageData)
let cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)
let image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Right)
self.previewImageView.image = image
self.previewImageView.hidden = false
self.cameraView.bringSubviewToFront(self.previewImageView)
})
}
}
You don't need to stop the captureSession and start it again when switching from back to front camera and vice versa.
All you need to do is remove the old capture session inputs, add the new one and all that in between a begin/commit session configuration block.
Here is a rough example:
func switchCamera() {
//begin configuration changes
captureSession.beginConfiguration()
//remove the previous inputs
let inputs = captureSession.inputs as! [AVCaptureInput]
for oldInput:AVCaptureInput in inputs {
captureSession.removeInput(oldInput)
}
//add the new input
if usingbackCamera == true {
addInput(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
}
else {
addInput(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
//end the configuration
captureSession.commitConfiguration()
}
func addInput(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
} catch let err as NSError {
print(err)
}
}
I think you don't have to remove the preview layer when changing the input device.
The layer is bond to the session, all you have to do is stop the session, remove the original input and add the new input then start the session again.
I make my capture view by custom rendering, but I thought the process will be the same.
The capture snippet:
for output in session.outputs {
if let capture = output as? AVCaptureStillImageOutput{
for connection in (capture.connections as! [AVCaptureConnection]){
for port in (connection.inputPorts as! [AVCaptureInputPort]){
if port.mediaType == AVMediaTypeVideo{
capture.captureStillImageAsynchronouslyFromConnection(connection, completionHandler: {(buffer, err) -> Void in
if err != nil{
print(err)
}
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer)
guard let image = CIImage(data: imageData) else{
completion(nil)
return
}
let rotatedImage = image.imageByApplyingTransform(CGAffineTransformMakeRotation(-CGFloat(M_PI_2)))
})
}
}
}
}
}

Resources