How to switch camera using AVFoundation - ios

I have implemented the preview camera using AVFoundation, its working fine. But I have a hard time to switch the camera back and front. I have added a switch button at the bottom bar. By default, its the back camera, I want to switch it to front. How can I do that?
class FifteenSecsViewController: UIViewController, AVCaptureFileOutputRecordingDelegate {
#IBOutlet weak var camPreview: UIView!
let captureSession = AVCaptureSession()
let movieOutput = AVCaptureMovieFileOutput()
var previewLayer: AVCaptureVideoPreviewLayer!
var activeInput: AVCaptureDeviceInput!
var outputURL: URL!
override func viewDidLoad() {
super.viewDidLoad()
if setupSession() {
setupPreview()
startSession()
}
self.switchCameraButton.addTarget(self, action: #selector(switchButtonTapped), for: .touchUpInside)
}
func setupSession() -> Bool {
captureSession.sessionPreset = AVCaptureSession.Preset.high
// Setup Camera
let camera: AVCaptureDevice?
camera = AVCaptureDevice.default(for: .video)
do {
let input = try AVCaptureDeviceInput(device: camera!)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
activeInput = input
}
} catch {
print("Error setting device video input: \(error)")
return false
}
// Setup Microphone
let microphone = AVCaptureDevice.default(for: .audio)
do {
let micInput = try AVCaptureDeviceInput(device: microphone!)
if captureSession.canAddInput(micInput) {
captureSession.addInput(micInput)
}
} catch {
print("Error setting device audio input: \(error)")
return false
}
// Movie output
let seconds : Int64 = 3
let maxDuration = CMTime(seconds: Double(seconds),
preferredTimescale: 1)
movieOutput.maxRecordedDuration = maxDuration
if captureSession.canAddOutput(movieOutput) {
captureSession.addOutput(movieOutput)
}
return true
}
func setupPreview() {
// Configure previewLayer
previewLayer = AVCaptureVideoPreviewLayer(session:
captureSession)
previewLayer.frame = camPreview.bounds
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
camPreview.layer.addSublayer(previewLayer)
}
//MARK:- Camera Session
func startSession() {
if !captureSession.isRunning {
videoQueue().async {
self.captureSession.startRunning()
}
}
}
#objc func switchButtonTapped(){
// what to write here??
}
}
Function switchButtonTapped is an actionTarget of UIButton. If I add this code in this button:
#objc func switchButtonTapped(){
if setupSession() {
setupPreview()
startSession()
}
}
Camerapreview screen shows a white screen and got stuck.

Try this code:
func switchCamera() {
session?.beginConfiguration()
let currentInput = session?.inputs.first as? AVCaptureDeviceInput
session?.removeInput(currentInput!)
let newCameraDevice = currentInput?.device.position == .back ? getCamera(with: .front) : getCamera(with: .back)
let newVideoInput = try? AVCaptureDeviceInput(device: newCameraDevice!)
session?.addInput(newVideoInput!)
session?.commitConfiguration()
}
func getCamera(with position: AVCaptureDevice.Position) -> AVCaptureDevice? {
guard let devices = AVCaptureDevice.devices(for: AVMediaType.video) as? [AVCaptureDevice] else {
return nil
}
return devices.filter {
$0.position == position
}.first
}

To begin create a device input for the front camera:
let frontDevice: AVCaptureDevice? = {
for device in AVCaptureDevice.devices(for: AVMediaType.video) {
if device.position == .front {
return device
}
}
return nil
}()
lazy var frontDeviceInput: AVCaptureDeviceInput? = {
if let _frontDevice = self.frontDevice {
return try? AVCaptureDeviceInput(device: _frontDevice)
}
return nil
}()
Then in your switchButtonTapped, if there is a front camera you can do the switch between the front and the ones:
func switchButtonTapped() {
if let _frontDeviceInput = frontDeviceInput {
captureSession.beginConfiguration()
if let _currentInput = captureSession.inputs.first as? AVCaptureDeviceInput {
captureSession.removeInput(_currentInput)
let newDeviceInput = (_currentInput.device.position == .front) ? activeInput : _frontDeviceInput
captureSession.addInput(newDeviceInput!)
}
captureSession.commitConfiguration()
}
}
If you need more details, don't hesitate.

Related

Adding a Switch Camera Button to Custom Camera View

I got this code off of GitHub as the most basic custom camera view controller. I have implemented a flash, which works if I test both front and back cameras. But how would I implement a button in which you could switch cameras while in the app? I have created a camera Button Pressed action which will run when the camera switch button is tapped.
class CameraViewController: UIViewController {
#IBOutlet weak var flashButton: UIButton!
#IBOutlet weak var cameraButton: UIButton!
var flashOn = false
#IBOutlet weak var previewView: PreviewView!
let captureSession = AVCaptureSession()
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
let capturePhotoOutput = AVCapturePhotoOutput()
let capturePhotoDelegate = CapturePhotoDelegate()
private var deviceInput: AVCaptureDeviceInput?
private var cameraPosition: CameraPosition = .back
enum CameraPosition {
case front
case back
}
override func viewDidLoad() {
super.viewDidLoad()
checkCameraUsagePermission()
flashButton.setTitle("OFF", for: .normal)
cameraButton.setTitle("BACK", for: .normal)
}
func initialiseCaptureSession() {
let captureDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .unspecified)
guard let input = try? AVCaptureDeviceInput(device: captureDevice!),
captureSession.canAddInput(input)
else { return }
captureSession.addInput(input)
self.previewView.videoPreviewLayer.session = self.captureSession
self.previewView.videoPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
capturePhotoOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(capturePhotoOutput)
captureSession.startRunning()
}
#IBAction func onTapTakePhoto(_ sender: UIButton) {
let photoSettings = AVCapturePhotoSettings()
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.flashMode = .auto
if flashOn == true {
photoSettings.flashMode = .on
} else if flashOn == false {
photoSettings.flashMode = .off
}
capturePhotoOutput.capturePhoto(with: photoSettings, delegate: capturePhotoDelegate)
}
func checkCameraUsagePermission() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
self.initialiseCaptureSession()
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { granted in
if granted {
self.initialiseCaptureSession()
}
}
case .denied:
return
case .restricted:
return
}
}
#IBAction func flashButtonPressed(_ sender: UIButton) {
if flashOn == false {
flashOn = true
flashButton.setTitle("ON", for: .normal)
} else {
flashOn = false
flashButton.setTitle("OFF", for: .normal)
}
}
func addVideoInput(position: AVCaptureDevice.Position) {
guard let device: AVCaptureDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: position) else { return }
if let currentInput = self.deviceInput {
self.captureSession.removeInput(currentInput)
self.deviceInput = nil
}
do {
let input = try AVCaptureDeviceInput(device: device)
if self.captureSession.canAddInput(input) {
self.captureSession.addInput(input)
self.deviceInput = input
}
} catch {
print(error)
}
}
#IBAction func cameraButtonPressed(_ sender: UIButton) {
switch self.cameraPosition {
case .front:
self.cameraPosition = .back
self.addVideoInput(position: .back)
case .back:
self.cameraPosition = .front
self.addVideoInput(position: .front)
}
//configure your session here
DispatchQueue.main.async {
self.captureSession.beginConfiguration()
if self.captureSession.canAddOutput(self.capturePhotoOutput) {
self.captureSession.addOutput(self.capturePhotoOutput)
}
self.captureSession.commitConfiguration()
}
}
}
First of all, store your AVCaptureDeviceInput in property
private var deviceInput: AVCaptureDeviceInput?
next create enum that will indicate which camera is active
enum CameraPosition {
case front
case back
}
and property for current camera mode
private var cameraPosition: CameraPosition = .back
Now create function that will be responsible for camera switching
func addVideoInput(position: AVCaptureDevice.Position) {
guard let device: AVCaptureDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: position) else { return }
if let currentInput = self.deviceInput {
self.captureSession.removeInput(currentInput)
self.deviceInput = nil
}
do {
let input = try AVCaptureDeviceInput(device: device)
if self.captureSession.canAddInput(input) {
self.captureSession.addInput(input)
self.deviceInput = input
}
} catch {
print(error)
}
}
and now in cameraButtonPressed method you can switch cameras
#IBAction func cameraButtonPressed(_ sender: UIButton) {
switch self.cameraPosition {
case .front:
self.cameraPosition = .back
self.addVideoInput(position: .back)
case .back:
self.cameraPosition = .front
self.addVideoInput(position: .front)
}
//configure your session here
DispatchQueue.main.async {
self.captureSession.beginConfiguration()
if self.captureSession.canAddOutput(self.capturePhotoOutput) {
self.captureSession.addOutput(self.capturePhotoOutput)
}
self.captureSession.commitConfiguration()
}
}

'No active and enabled video connection' error when capturing photo with TrueDepth cam

I am trying to record depth data from the TrueDepth camera along with a photo. But when calling
AVCapturePhotoOutput capturePhoto(withSettings,delegate)
I get an exception stating:
No active and enabled video connection
I configure the camera and outputs like so (basically following the guide from Apple about photo capturing and capturing depth):
func configurePhotoOutput() throws {
self.captureSession = AVCaptureSession()
guard self.captureSession != nil else {
return
}
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice.lockForConfiguration()
videoDevice.activeDepthDataFormat = depthFormat
videoDevice.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
self.captureSession!.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
self.captureSession!.addInput(videoDeviceInput)
// add video output
if self.captureSession!.canAddOutput(videoOutput) {
self.captureSession!.addOutput(videoOutput)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
} else { fatalError("Can't add video output.") }
// Set up photo output for depth data capture.
let photoOutput = AVCapturePhotoOutput()
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
guard self.captureSession!.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession!.addOutput(photoOutput)
self.captureSession!.sessionPreset = .photo
self.captureSession!.commitConfiguration()
self.captureSession!.startRunning()
}
And the code responsible for capturing the photo:
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled =
self.photoOutput.isDepthDataDeliverySupported
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate) // <---- error is being thrown on this call
self.photoCaptureCompletionBlock = completion
}
What I am I doing wrong in this configuration?
solved it with the following implementation:
Any comments / remarks are highly appreciated!
import AVFoundation
import UIKit
class CameraController: NSObject {
var captureSession: AVCaptureSession?
var videoDevice: AVCaptureDevice?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoOutput = AVCaptureVideoDataOutput()
var photoOutput = AVCapturePhotoOutput()
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
// Select a depth-capable capture device.
guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
videoDevice = vd
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice!.lockForConfiguration()
videoDevice!.activeDepthDataFormat = depthFormat
videoDevice!.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
}
func configureDeviceInputs() throws {
if( captureSession == nil) {
throw CameraControllerError.captureSessionIsMissing
}
captureSession?.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
captureSession!.addInput(videoDeviceInput)
captureSession?.commitConfiguration()
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
captureSession.beginConfiguration()
// Set up photo output for depth data capture.
photoOutput = AVCapturePhotoOutput()
photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)
guard captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
captureSession.addOutput(photoOutput)
// must be set after photoOutput is added to captureSession. Why???
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
captureSession.sessionPreset = .photo
captureSession.commitConfiguration()
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
self.photoCaptureCompletionBlock = completion
}
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraController {
public enum CameraPosition {
case front
case rear
}
enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
}

How can i improve my Custom Camera View Quality + Code

i followed a quick tutorial on how to create a custom camera view on youtube and its working fine...but for some reason the camera is zoomed in ( not like the regular camera of the iPhone & the quality seems like downgraded,Plus i don't know if the code is very optimized so i thought i should ask some professionals online like you guys :) How can i improve\Maximize my camera Quality & performance?
here is my code:
Custom Camera View Code:
import UIKit
import AVFoundation
class CustomCameraViewController: UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate,UIImagePickerControllerDelegate{
#IBOutlet var CameraView: UIView!
var audioPlayer = AVAudioPlayer()
let captureSession = AVCaptureSession()
var captureDevice: AVCaptureDevice?
var previewLayer : AVCaptureVideoPreviewLayer?
var frontCamera: Bool = false
var stilledImageOutput: AVCaptureStillImageOutput = AVCaptureStillImageOutput()
func beginSession(){
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.view.layer.addSublayer(previewLayer!)
previewLayer?.frame = self.view.layer.bounds
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
captureSession.startRunning()
stilledImageOutput.outputSettings = [AVVideoCodecKey : AVVideoCodecJPEG ]
if captureSession.canAddOutput(stilledImageOutput){
captureSession.addOutput(stilledImageOutput)
}
}
func frontCamera(_ front: Bool){
let devices = AVCaptureDevice.devices()
do{
try captureSession.removeInput(AVCaptureDeviceInput(device:captureDevice))
}catch{
print("Error")
}
for device in devices!{
if((device as AnyObject).hasMediaType(AVMediaTypeVideo)){
if front{
if (device as AnyObject).position == AVCaptureDevicePosition.front {
captureDevice = device as? AVCaptureDevice
do{
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
}catch{
}
break
}
}else{
if (device as AnyObject).position == AVCaptureDevicePosition.back {
captureDevice = device as? AVCaptureDevice
do{
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
}catch{
}
break
}
}
}
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
override func viewDidLoad() {
super.viewDidLoad()
if #available(iOS 10.0, *) {
let photoSettings = AVCapturePhotoSettings()
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.isAutoStillImageStabilizationEnabled = true
} else {
// Fallback on earlier versions.
}
frontCamera(frontCamera)
if captureDevice != nil{
beginSession()
let music = Bundle.main.path(forResource: "CameraShutterSFX", ofType: "mp3")
// copy this syntax, it tells the compiler what to do when action is received
do {
audioPlayer = try AVAudioPlayer(contentsOf: URL(fileURLWithPath: music! ))
try AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryAmbient)
try AVAudioSession.sharedInstance().setActive(true)
}
catch{
print("error playing sound")
}
}
}
func TakePhoto(_ sender: UIButton) {
audioPlayer.play()
audioPlayer.volume = 0.1
if #available(iOS 10.0, *) {
let photoSettings = AVCapturePhotoSettings()
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.isAutoStillImageStabilizationEnabled = true
} else {
// Fallback on earlier versions.
}
}
func ActivateFlash(_ sender: UIButton) {
let FlashValue = !sender.isSelected
sender.isSelected = FlashValue
if captureDevice!.hasTorch{
do{
try captureDevice!.lockForConfiguration()
captureDevice!.torchMode = captureDevice!.isTorchActive ? AVCaptureTorchMode.off : AVCaptureTorchMode.on
captureDevice!.unlockForConfiguration()
}catch{
}
}
}
func DismissAction(_ sender: UIButton) {
performSegue(withIdentifier: "Segue", sender: nil)
}
func SwitchCameraDirectionsButton(_ sender: Any) {
//Switch Camera to Front:
frontCamera = !frontCamera
captureSession.beginConfiguration()
let inputs = captureSession.inputs as! [AVCaptureInput]
for oldInput: AVCaptureInput in inputs{
captureSession.removeInput(oldInput)
}
frontCamera(frontCamera)
captureSession.commitConfiguration()
}
}
it doesn't take Photos yet...but i would really love to maximize my Camera Performance and quality before i continue forward,i hope you understand , Thank you for helping :)

Double Tap or Use Button To Switch Camera From Back to Front (Xcode 8, Swift 3)

So, lately I have been trying to implement the function of switching the camera view from back to front camera in Swift 3. However, with no luck.
Currently, my default view is from the back camera - I can take pictures with it and then retake. But can anyone help me and show how do I either double tap the screen to switch cameras or simply use the assigned button to switch them? Thank you!
import UIKit
import AVFoundation
import FirebaseDatabase
class CameraView: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
override var prefersStatusBarHidden: Bool {
return true
}
var captureSession : AVCaptureSession!
var stillImageOutput : AVCaptureStillImageOutput!
var previewLayer : AVCaptureVideoPreviewLayer!
#IBOutlet var cameraView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
previewLayer?.frame = cameraView.bounds
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPreset1920x1080
var backCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
var error : NSError?
do {
var input = try! AVCaptureDeviceInput(device: backCamera)
if (error == nil && captureSession?.canAddInput(input) != nil) {
captureSession?.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if (captureSession?.canAddOutput(stillImageOutput) != nil) {
captureSession?.addOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer (session: captureSession)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer)
captureSession?.startRunning() }
}
} catch {
}
}
#IBOutlet var tempImageView: UIImageView!
#IBAction func didPressTakePhoto(_ sender: UIButton) {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
videoConnection.videoOrientation = AVCaptureVideoOrientation.portrait
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: {
(sampleBuffer, error) in
if sampleBuffer != nil {
var imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
var dataProvider = CGDataProvider.init(data: imageData as! CFData)
var cgImageRef = CGImage.init(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
var image = UIImage (cgImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.right)
self.tempImageView.image = image
self.tempImageView.isHidden = false
}
})
}
}
var didTakePhoto = Bool()
#IBAction func didPressTakeAnother(_ sender: UIButton) {
if didTakePhoto == true {
tempImageView.isHidden = true
didTakePhoto = false
} else {
captureSession?.startRunning()
didTakePhoto = true
}
}
}
don't see here any problems - here is working solution:
import Foundation
import UIKit
import AVFoundation
class MainViewController: UIViewController {
var tempImage: UIImageView?
var captureSession: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var currentCaptureDevice: AVCaptureDevice?
var usingFrontCamera = false
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
loadCamera()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
videoPreviewLayer!.frame = self.cameraPreviewSurface.bounds
}
#IBAction func switchButtonAction(_ sender: Any) {
usingFrontCamera = !usingFrontCamera
loadCamera()
}
func getFrontCamera() -> AVCaptureDevice?{
let videoDevices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo)
for device in videoDevices!{
let device = device as! AVCaptureDevice
if device.position == AVCaptureDevicePosition.front {
return device
}
}
return nil
}
func getBackCamera() -> AVCaptureDevice{
return AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
}
func loadCamera() {
if(captureSession == nil){
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetPhoto
}
var error: NSError?
var input: AVCaptureDeviceInput!
currentCaptureDevice = (usingFrontCamera ? getFrontCamera() : getBackCamera())
do {
input = try AVCaptureDeviceInput(device: currentCaptureDevice)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
for i : AVCaptureDeviceInput in (self.captureSession?.inputs as! [AVCaptureDeviceInput]){
self.captureSession?.removeInput(i)
}
if error == nil && captureSession!.canAddInput(input) {
captureSession!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession!.canAddOutput(stillImageOutput) {
captureSession!.addOutput(stillImageOutput)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
//self.cameraPreviewSurface.layer.sublayers?.forEach { $0.removeFromSuperlayer() }
self.cameraPreviewSurface.layer.addSublayer(videoPreviewLayer!)
DispatchQueue.main.async {
self.captureSession!.startRunning()
}
}
}
}
}
some notes:
cameraPreviewSurface - this is your UIView where camera will show
don't reassign the session, don't just add input, but before add new - remove existing ones,
p.s. code done with swift 3.0.1 / xcode 8.1
Cheers )
Xcode Version : Version 10.1 (10B61)
Swift Version : Swift 4.2
Change AVCaptureSession Capture Source
Refers from Stepan Maksymov Solution
we can simplified by replacing captureSession.inputs
First Create an IBAction Outlet and Connect ViewController to Change Camera View
#IBAction private func changeCamera(_ cameraButton: UIButton) {
usingFrontCamera = !usingFrontCamera
do{
captureSession.removeInput(captureSession.inputs.first!)
if(usingFrontCamera){
captureDevice = getFrontCamera()
}else{
captureDevice = getBackCamera()
}
let captureDeviceInput1 = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput1)
}catch{
print(error.localizedDescription)
}
}
Second step Copy simplified AVCaptureDevice Setting || refer Stepan Maksymov
func getFrontCamera() -> AVCaptureDevice?{
return AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices.first
return nil
}
func getBackCamera() -> AVCaptureDevice?{
return AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices.first
return nil
}
You can replace #IBAction with function like so.
func changeCamera(){
usingFrontCamera = !usingFrontCamera
do{
captureSession.removeInput(captureSession.inputs.first!)
if(usingFrontCamera){
captureDevice = getFrontCamera()
}else{
captureDevice = getBackCamera()
}
let captureDeviceInput1 = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput1)
}catch{
print(error.localizedDescription)
}
}
In addition to Stepan Maksymov post, I sugest to add this function
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
And call it instead of his post lines:
for i : AVCaptureDeviceInput in (self.captureSession?.inputs as! [AVCaptureDeviceInput]){
self.captureSession?.removeInput(i)
}
This way the cameras will change quicker.

AVCaptureFileOutputRecordingDelegate not writing to file, Swift 2

My view controller contains a preview layer, which projects the image from my camera live. When pressing and holding a button, my code is supposed to record a video, and write it to a temporary file locally. This worked well with Swift 1.2 and Xcode 6, but stopped working after I converted the code to Swift 2 when updating to Xcode 7.
When I let go of the button, the captureOutput doesn´t get called, and there is no file written to the given path.
Some relevant code follows.
I would appreciate any help!
import UIKit
import MobileCoreServices
import AVFoundation
import AVKit
class ViewControllerPhoto: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate, UIPickerViewDelegate, UIGestureRecognizerDelegate, ACEDrawingViewDelegate, UITextViewDelegate, AVCaptureFileOutputRecordingDelegate, UITableViewDelegate, UITableViewDataSource {
#IBOutlet weak var captureButton: UIButton!
var videoCheck: Bool = false
let captureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer?
var captureDevice : AVCaptureDevice?
var movieFileOutput = AVCaptureMovieFileOutput()
var imageData: NSData!
var outputPath: NSString!
var outputURL: NSURL!
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
if captureSession.canSetSessionPreset(AVCaptureSessionPresetMedium) {
captureSession.sessionPreset = AVCaptureSessionPresetMedium
}
let devices = AVCaptureDevice.devices()
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
self.videoCheck = false
}
func beginSession() {
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
configureDevice()
var err : NSError? = nil
// captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
do{
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
}
catch{
print("error: \(err?.localizedDescription)")
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.view.layer.addSublayer(previewLayer!)
previewLayer?.frame = self.view.layer.frame
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
if captureSession.canAddOutput(movieFileOutput) {
self.captureSession.addOutput(movieFileOutput)
}
// SET CONNECTION PROPERTIES
var captureConnection: AVCaptureConnection = movieFileOutput.connectionWithMediaType(AVMediaTypeVideo)
if captureConnection.supportsVideoOrientation {
captureConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
}
var audioDevice: AVCaptureDevice = AVCaptureDevice.devicesWithMediaType(AVMediaTypeAudio)[0] as! AVCaptureDevice
do{
let audioDeviceInput: AVCaptureDeviceInput = try AVCaptureDeviceInput(device: audioDevice)
if captureSession.canAddInput(audioDeviceInput) {
captureSession.addInput(audioDeviceInput)
}
}
catch {
print("error")
}
captureSession.startRunning()
}
func captureVideo() {
outputPath = (NSURL(fileURLWithPath: NSTemporaryDirectory())).URLByAppendingPathComponent("movie.mov").absoluteString as NSString
outputURL = NSURL(fileURLWithPath: outputPath as String)
let fileManager: NSFileManager = NSFileManager.defaultManager()
if outputURL.path != nil{
if fileManager.fileExistsAtPath(outputURL.path!) {
do{
try fileManager.removeItemAtPath(outputPath as String)
}
catch{
print(error)
}
}
}
self.movieFileOutput.startRecordingToOutputFileURL(outputURL, recordingDelegate: self)
}
func cameraWithPosition(position: AVCaptureDevicePosition) -> AVCaptureDevice {
let devices: NSArray = AVCaptureDevice.devicesWithMediaType(AVMediaTypeVideo)
for device in devices {
if(device.position == position){
return device as! AVCaptureDevice
}
}
return AVCaptureDevice()
}
#IBAction func captureButtonIsLongPressed(sender: UILongPressGestureRecognizer) {
if sender.state == UIGestureRecognizerState.Began {
videoCheck = true
captureVideo()
}
else if sender.state == UIGestureRecognizerState.Ended{
self.movieFileOutput.stopRecording()
}
}
func captureOutput(captureOutput: AVCaptureFileOutput!, didFinishRecordingToOutputFileAtURL outputFileURL: NSURL!, fromConnections connections: [AnyObject]!, error: NSError!) {
print("Output")
playVideo()
}
func playVideo() {
let path = outputPath
let url = outputURL
let player = AVPlayer(URL: url)
let playerLayer = AVPlayerLayer(player: player)
playerLayer.frame = self.view.bounds
player.play()
}
}
Figured it out after a lot of hazzle. My mistake was lying in the line
outputPath = (NSURL(fileURLWithPath: NSTemporaryDirectory())).URLByAppendingPathComponent("movie.mov").absoluteString as NSString
Of course this is a NSString, not a NSPath. Changing to these lines fixed my problem:
outputURL = NSURL(fileURLWithPath: NSTemporaryDirectory(), isDirectory: true).URLByAppendingPathComponent("movie.mov")
outputPath = outputURL.path
Hope this helps anyone!

Resources