I am working on a custom camera app and the tutorial uses AVCaptureStillImageOutput, which is deprecated for ios 10. I have set up the camera and am now stuck on how to take the photo
Here is my full view where i have the camera
import UIKit
import AVFoundation
var cameraPos = "back"
class View3: UIViewController,UIImagePickerControllerDelegate,UINavigationControllerDelegate {
#IBOutlet weak var clickButton: UIButton!
#IBOutlet var cameraView: UIView!
var session: AVCaptureSession?
var stillImageOutput: AVCapturePhotoOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
clickButton.center.x = cameraView.bounds.width/2
loadCamera()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
}
#IBAction func clickCapture(_ sender: UIButton) {
if let videoConnection = stillImageOutput!.connection(withMediaType: AVMediaTypeVideo) {
// This is where I need help
}
}
#IBAction func changeDevice(_ sender: UIButton) {
if cameraPos == "back"
{cameraPos = "front"}
else
{cameraPos = "back"}
loadCamera()
}
func loadCamera()
{
session?.stopRunning()
videoPreviewLayer?.removeFromSuperlayer()
session = AVCaptureSession()
session!.sessionPreset = AVCaptureSessionPresetPhoto
var backCamera = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: .front)
if cameraPos == "back"
{
backCamera = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: .back)
}
var error: NSError?
var input: AVCaptureDeviceInput!
do {
input = try AVCaptureDeviceInput(device: backCamera)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
if error == nil && session!.canAddInput(input) {
session!.addInput(input)
stillImageOutput = AVCapturePhotoOutput()
if session!.canAddOutput(stillImageOutput) {
session!.addOutput(stillImageOutput)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: session)
videoPreviewLayer?.frame = cameraView.bounds
videoPreviewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
videoPreviewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(videoPreviewLayer!)
session!.startRunning()
} }
}
}
This is where i need help
#IBAction func clickCapture(_ sender: UIButton) {
if let videoConnection = stillImageOutput!.connection(withMediaType: AVMediaTypeVideo) {
// This is where I need help
}
}
I have gone through the answer here How to use AVCapturePhotoOutput
but i do not understand how to incorporate that code in this code, as it involves declaring a new class
You are almost there.
For Output as AVCapturePhotoOutput
Check out AVCapturePhotoOutput documentation for more help.
These are the steps to capture a photo.
Create an AVCapturePhotoOutput object. Use its properties to
determine supported capture settings and to enable certain features
(for example, whether to capture Live Photos).
Create and configure an AVCapturePhotoSettings object to choose
features and settings for a specific capture (for example, whether
to enable image stabilization or flash).
Capture an image by passing your photo settings object to the
capturePhoto(with:delegate:) method along with a delegate object
implementing the AVCapturePhotoCaptureDelegate protocol. The photo
capture output then calls your delegate to notify you of significant
events during the capture process.
have this below code on your clickCapture method and don't forgot to confirm and implement to delegate in your class.
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: 160,
kCVPixelBufferHeightKey as String: 160,
]
settings.previewPhotoFormat = previewFormat
self.cameraOutput.capturePhoto(with: settings, delegate: self)
For Output as AVCaptureStillImageOutput
if you intend to snap a photo from video connection. you can follow the below steps.
Step 1: Get the connection
if let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo) {
// ...
// Code for photo capture goes here...
}
Step 2: Capture the photo
Call the captureStillImageAsynchronouslyFromConnection function on
the stillImageOutput.
The sampleBuffer represents the data that is captured.
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: { (sampleBuffer, error) -> Void in
// ...
// Process the image data (sampleBuffer) here to get an image file we can put in our captureImageView
})
Step 3: Process the Image Data
We will need to to take a few steps to process the image data found in sampleBuffer in order to end up with a UIImage that we can insert into our captureImageView and easily use elsewhere in our app.
if sampleBuffer != nil {
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProviderCreateWithCFData(imageData)
let cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)
let image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Right)
// ...
// Add the image to captureImageView here...
}
Step 4: Save the image
Based on your need either save the image to photos gallery or show that in a image view
For more details check out Create custom camera view guide under Snap a Photo
Related
I am using WebRTC to capture video from user camera. In some place I want to get current picture as UIImage for saving it in photo library. I am using localVideoView to show video from local camera, but when I try to make screenshot of that view, it is empty (just blue background).
This is my code to make screenshot:
func screenShotMethod() {
DispatchQueue.main.async {
//Create the UIImage
UIGraphicsBeginImageContext(self.localVideoView!.frame.size)
self.localVideoView?.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
//Save it to the camera roll
UIImageWriteToSavedPhotosAlbum(image!, nil, nil, nil)
}
}
Here is the sample code to capture photo without camera preview during a WebRTC video call. Have to invoke TakePicture() method through signal or button tap.
import AVFoundation
import WebRTC
import UIKit
import Foundation
class CallViewController: UIViewController{
var captureSession : AVCaptureSession?
func TakePicture() {
DispatchQueue.main.async { [self] in
captureSession = AVCaptureSession()
captureSession!.beginConfiguration()
let photoOutput = AVCapturePhotoOutput()
photoOutput.isHighResolutionCaptureEnabled = true
photoOutput.isLivePhotoCaptureEnabled = false
if let captureDevice = AVCaptureDevice.default(for: .video){
do
{
let input = try AVCaptureDeviceInput(device: captureDevice)
if captureSession!.canAddInput(input){
captureSession!.addInput(input)
}
} catch let error {
}
if captureSession!.canAddOutput(photoOutput){
captureSession!.addOutput(photoOutput)
}
let cameraLayer = AVCaptureVideoPreviewLayer()
cameraLayer.session = captureSession
captureSession!.commitConfiguration()
captureSession!.startRunning()
let photoSettings = AVCapturePhotoSettings()
//photoSettings.flashMode = .auto //check device properties before turning on flash
photoSettings.photoQualityPrioritization = .balanced
photoOutput.capturePhoto(with: photoSettings, delegate: self)
}
}
}
}
extension CallViewController: AVCapturePhotoCaptureDelegate{
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
captureSession!.stopRunning()
captureSession = nil
let imageData = photo.fileDataRepresentation()
//Do the rest with image bytes
}
}
I am making a view controller to make a camera view controller like snapchat camera. my code below is worked perfectly for iOS 11 or above. to be honest, I don't really grasp my code since i just follow along the tutorial for this snapchat like camera view controller
import UIKit
import AVFoundation
import SVProgressHUD
class CameraVC: UIViewController {
#IBOutlet weak var timeLabel: UILabel!
#IBOutlet weak var dateLabel: UILabel!
#IBOutlet weak var cameraButton: DesignableButton!
#IBOutlet weak var retryButton: DesignableButton!
// to receive data from MainMenuVC
var employeeData : Employee?
var checkinData = CheckIn()
var captureSession = AVCaptureSession()
// which camera input do we want to use
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
// to keep track which camera do we use currently
var currentDevice: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var toggleCameraGestureRecognizer = UISwipeGestureRecognizer()
var zoomInGestureRecognizer = UISwipeGestureRecognizer()
var zoomOutGestureRecognizer = UISwipeGestureRecognizer()
var thereIsAnError : Bool = false {
didSet {
if thereIsAnError {
cameraButton.isHidden = true
cameraButton.isEnabled = false
retryButton.isHidden = false
retryButton.isEnabled = true
} else {
cameraButton.isHidden = false
cameraButton.isEnabled = true
retryButton.isHidden = true
retryButton.isEnabled = false
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
getDateTimeFromServer()
// initial value
thereIsAnError = false
timeLabel.text = ""
dateLabel.text = ""
cameraButton.isEnabled = false
cameraButton.alpha = 0.4
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
setGestureRecognizer()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if checkinData.dateTime != nil {
SVProgressHUD.dismiss()
}
}
#IBAction func shutterButtonDidPressed(_ sender: Any) {
// when the button is pressed, we capture the image and set the photoOutput
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: self)
// perform segue is below in the AVCapturePhotoCaptureDelegate
}
#IBAction func retryButtonDidPressed(_ sender: Any) {
if checkinData.dateTime == nil {
getDateTimeFromServer()
}
}
}
extension CameraVC {
// MARK: - Helper Methods
// MARK: - Helper Methods
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "goToCheckinDetail" {
let checkinDetailTVC = segue.destination as! CheckinDetailVC
checkinDetailTVC.dataOfCheckin = checkinData
checkinDetailTVC.dataOfEmployee = employeeData
// to set the navbar back button title in the checkinDetailVC
navigationItem.backBarButtonItem = UIBarButtonItem(title: "", style: .plain, target: nil, action: nil)
}
}
func getDateTimeFromServer() {
SVProgressHUD.show(withStatus: "Loading Data")
NetworkingService.getCurrentTimeFromServer { (result) in
switch result {
case .failure:
self.thereIsAnError = true
SVProgressHUD.dismiss()
self.showAlert(alertTitle: "Sorry", alertMessage: "Internet connection issue, please tap the retry button.", actionTitle: "Back")
case .success(let timeFromServer) :
guard let stringDateTimeServer = timeFromServer as? String else {return}
self.checkinData.dateTime = stringDateTimeServer
let dateTimeService = DateTimeService(fromDateTimeString: stringDateTimeServer)
let time = dateTimeService.parsingDateAndTime()?.timeOnly
self.timeLabel.text = "\(time ?? "-")"
self.dateLabel.text = DateTimeService.changeFormat(of: stringDateTimeServer, toFormat: "dd MMM yyyy")
self.cameraButton.isEnabled = true
self.cameraButton.alpha = 1
self.thereIsAnError = false
SVProgressHUD.dismiss()
}
}
}
func setGestureRecognizer() {
// change camera from front to back
toggleCameraGestureRecognizer.direction = .up
toggleCameraGestureRecognizer.addTarget(self, action: #selector(self.switchCamera))
view.addGestureRecognizer(toggleCameraGestureRecognizer)
// Zoom In recognizer
zoomInGestureRecognizer.direction = .right
zoomInGestureRecognizer.addTarget(self, action: #selector(zoomIn))
view.addGestureRecognizer(zoomInGestureRecognizer)
// Zoom Out recognizer
zoomOutGestureRecognizer.direction = .left
zoomOutGestureRecognizer.addTarget(self, action: #selector(zoomOut))
view.addGestureRecognizer(zoomOutGestureRecognizer)
}
func setupCaptureSession() {
// to specify image resolution and quality we want, we set to the highest resolution possible
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
// to decide whether we use front or back camer
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}
}
// default device
currentDevice = frontCamera
}
func setupInputOutput() {
// after the camera capture that image (input), we generate the image DATA (output)
// put the input and output to capture Session
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer() {
// to display image data on the screen
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
#objc func switchCamera() {
captureSession.beginConfiguration()
// Change the device based on the current camera
let newDevice = (currentDevice?.position == AVCaptureDevice.Position.back) ? frontCamera : backCamera
// Remove all inputs from the session
for input in captureSession.inputs {
captureSession.removeInput(input as! AVCaptureDeviceInput)
}
// Change to the new input
let cameraInput:AVCaptureDeviceInput
do {
cameraInput = try AVCaptureDeviceInput(device: newDevice!)
} catch {
print(error)
return
}
if captureSession.canAddInput(cameraInput) {
captureSession.addInput(cameraInput)
}
currentDevice = newDevice
captureSession.commitConfiguration()
}
#objc func zoomIn() {
if let zoomFactor = currentDevice?.videoZoomFactor {
if zoomFactor < 5.0 {
let newZoomFactor = min(zoomFactor + 1.0, 5.0)
do {
try currentDevice?.lockForConfiguration()
currentDevice?.ramp(toVideoZoomFactor: newZoomFactor, withRate: 1.0)
currentDevice?.unlockForConfiguration()
} catch {
print(error)
}
}
}
}
#objc func zoomOut() {
if let zoomFactor = currentDevice?.videoZoomFactor {
if zoomFactor > 1.0 {
let newZoomFactor = max(zoomFactor - 1.0, 1.0)
do {
try currentDevice?.lockForConfiguration()
currentDevice?.ramp(toVideoZoomFactor: newZoomFactor, withRate: 1.0)
currentDevice?.unlockForConfiguration()
} catch {
print(error)
}
}
}
}
func startRunningCaptureSession() {
// to start capturing the data
captureSession.startRunning()
}
}
extension CameraVC: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
checkinData.photo = UIImage(data: imageData)
performSegue(withIdentifier: "goToCheckinDetail", sender: nil)
}
}
}
but when I set my deployment target to iOS 10.3, I got an error that said some method is only available for iOS 11 or newer.
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
checkinData.photo = UIImage(data: imageData)
performSegue(withIdentifier: "goToCheckinDetail", sender: nil)
}
}
AVCapturePhoto' is only available on iOS 11.0 or newer
fileDataRepresentation()' is only available on iOS 11.0 or newer
and
func setupInputOutput() {
// after the camera capture that image (input), we generate the image DATA (output)
// put the input and output to capture Session
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
'jpeg' is only available on iOS 11.0 or newer
Please help me, I need some function that equal to those function for iOS 10 (at least) or below.
Create an AVCapturePhotoOutput object. Use its properties to determine supported capture settings and to enable certain features (for example, whether to capture Live Photos).
fileprivate var photoOutput: AVCapturePhotoOutput!
Create and configure an AVCapturePhotoSettings object to choose
features and settings for a specific capture (for example, whether to enable image stabilization or flash).
photoOutput = AVCapturePhotoOutput()
if self.session.canAddOutput(photoOutput) {
self.session.addOutput(photoOutput)
}
Capture an image by passing your photo settings object to the
capturePhoto(with:delegate:) method along with a delegate object implementing the AVCapturePhotoCaptureDelegate protocol. The photo capture output then calls your delegate to notify you of significant events during the capture process.
queue.async { self.photoOutput.capturePhoto(with: AVCapturePhotoSettings(), delegate: self) }
I am trying to figure out how to record a video using AVFoundation in Swift. I have got as far as creating a custom camera but I only figured out how to take still pictures with it and I can't figure out how to record video. Hope you can help me figure this one out.
I want to hold the takePhotoButton to record the video and then it will be previewed where I preview my current still photos. Your help will really help me continuing my project. Thanks a lot!
import UIKit
import AVFoundation
#available(iOS 10.0, *)
class CameraViewController: UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
let photoSettings = AVCapturePhotoSettings()
var audioPlayer = AVAudioPlayer()
var captureSession = AVCaptureSession()
var videoDeviceInput: AVCaptureDeviceInput!
var previewLayer = AVCaptureVideoPreviewLayer()
var frontCamera: Bool = false
var captureDevice:AVCaptureDevice!
var takePhoto = false
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
prepareCamera()
}
func prepareCamera() {
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back).devices {
captureDevice = availableDevices.first
beginSession()
}
}
func frontCamera(_ front: Bool){
let devices = AVCaptureDevice.devices()
do{
try captureSession.removeInput(AVCaptureDeviceInput(device:captureDevice!))
}catch{
print("Error")
}
for device in devices!{
if((device as AnyObject).hasMediaType(AVMediaTypeVideo)){
if front{
if (device as AnyObject).position == AVCaptureDevicePosition.front {
captureDevice = device as? AVCaptureDevice
do{
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice!))
}catch{}
break
}
}else{
if (device as AnyObject).position == AVCaptureDevicePosition.back {
captureDevice = device as? AVCaptureDevice
do{
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice!))
}catch{}
break
}
}
}
}
}
func beginSession () {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) {
self.previewLayer = previewLayer
containerView.layer.addSublayer(previewLayer as? CALayer ?? CALayer())
self.previewLayer.frame = self.view.layer.frame
self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.portrait
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.isAutoStillImageStabilizationEnabled = true
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.NightOut.captureQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
}
#IBAction func takePhoto(_ sender: Any) {
takePhoto = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.isAutoStillImageStabilizationEnabled = true
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if takePhoto {
takePhoto = false
if let image = self.getImageFromSampleBuffer(buffer: sampleBuffer) {
let photoVC = UIStoryboard(name: "Main", bundle: nil).instantiateViewController(withIdentifier: "PhotoVC") as! PhotoPreviewViewController
photoVC.takenPhoto = image
DispatchQueue.main.async {
self.present(photoVC, animated: true, completion: {
self.stopCaptureSession()
})
}
}
}
}
func getImageFromSampleBuffer (buffer:CMSampleBuffer) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: .leftMirrored)
}
}
return nil
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
self.captureSession.stopRunning()
}
func stopCaptureSession () {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
#IBAction func DismissButtonAction(_ sender: UIButton) {
UIView.animate(withDuration: 0.1, animations: {
self.DismissButton.transform = CGAffineTransform.identity.scaledBy(x: 0.8, y: 0.8)
}, completion: { (finish) in
UIView.animate(withDuration: 0.1, animations: {
self.DismissButton.transform = CGAffineTransform.identity
})
})
performSegue(withIdentifier: "Segue", sender: nil)
}
}
To identify the holding down the button and releasing it, can be done in different ways. The easiest way would be adding a target for UIControlEvents.TouchUpInside and UIControlEvents.TouchDown for capture button like below.
aButton.addTarget(self, action: Selector("holdRelease:"), forControlEvents: UIControlEvents.TouchUpInside);
aButton.addTarget(self, action: Selector("HoldDown:"), forControlEvents: UIControlEvents.TouchDown)
//target functions
func HoldDown(sender:UIButton)
{
// Start recording the video
}
func holdRelease(sender:UIButton)
{
// Stop recording the video
}
There are other ways as well, like adding a long tap gesture recognizer to button or view and start/stop based on recognizer state. More info can be found here in another SO answer UIButton with hold down action and release action
Video Recording
You need to add AVCaptureMovieFileOutput to your capture session and use the method startRecordingToOutputFileURL to start the video recording.
Things to notice
Implement AVCaptureFileOutputRecordingDelegate method to identify the start and didFinish recording
File path should be meaningful, Which means you should give the correct file path which your app has access.
Have this code inside HoldDown() method to start recording
let videoFileOutput = AVCaptureMovieFileOutput()
self.captureSession?.addOutput(videoFileOutput)
let documentsURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] as URL
let filePath = documentsURL.appendingPathComponent("tempMovie")
videoFileOutput.startRecording(toOutputFileURL: filePath, recordingDelegate: self)
to stop recording use vidoeFileOutput.stopRecording()
You need to use AVCaptureMovieFileOutput. Add AVCaptureMovieFileOutput to a capture session using addOutput(_:)
Starting a Recording
You start recording a QuickTime movie using
startRecording(to:recordingDelegate:). You need to supply a
file-based URL and a delegate. The URL must not identify an existing
file, because the movie file output does not overwrite existing
resources. You must also have permission to write to the specified
location. The delegate must conform to the
AVCaptureFileOutputRecordingDelegate protocol, and must implement the
fileOutput(_:didFinishRecordingTo:from:error:)
method.
See docs for more info.
I'm trying to integrate a custom camera view and following some slightly outdated code whilst doing so. I've had several errors but believed I've fixed them bar 2.
Here is the current code so far:
import Foundation
import AVFoundation
import UIKit
class setupView : UIViewController {
#IBOutlet var cameraView: UIView!
#IBOutlet var nameTextField: UITextField!
var captureSession = AVCaptureSession()
var stillImageOutput = AVCapturePhotoOutput()
var previewLayer = AVCaptureVideoPreviewLayer()
override func viewDidLoad() {
let session = AVCaptureDeviceDiscoverySession.init(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .back)
if let device = session?.devices[0] {
if device.position == AVCaptureDevicePosition.back {
do {
let input = try AVCaptureDeviceInput(device: device )
if captureSession.canAddInput(input){
captureSession.addInput(input)
stillImageOutput.outputSettings = [AVVideoCodecKey : AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
captureSession.startRunning()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.AVLayerVideoGravityResizeAspectFill
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer)
previewLayer.bounds = cameraView.frame
previewLayer.position = CGPoint(x: cameraView.frame.width / 2, y:cameraView.frame.height / 2)
}
}
} catch {
}
}
}
}
#IBAction func takePhoto(_ sender: Any) {
}
#IBAction func submitAction(_ sender: Any) {
}
}
I'm currently getting 2 errors:
"Value of type AVCapturePhotoOutput" has no member "outputSettings"
"Value of type "AVCaptureVideoPreviewLayer" has no member
"AVLayerVideoGravityResizeAspectFill"
You are almost there. The problem is some of the AVFoundation classes are deprecated and there is more than one way to take a photo now. Here is the issues with your code.
"Value of type AVCapturePhotoOutput" has no member "outputSettings"
It is because actually AVCapturePhotoOutput don't have any member defined as outputSettings. Check out full documentation of AVCapturePhotoOutput
Actually outputSettings is member of AVCaptureStillImageOutput and the same is deprecated from iOS 10.0
"Value of type "AVCaptureVideoPreviewLayer" has no member
"AVLayerVideoGravityResizeAspectFill"
Again the same mistake, as per your code there is no member for AVCaptureVideoPreviewLayer. In case if you want to set the video preview layer set it like below.
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
As like you mentioned the code is outdated and its using deprecated AVCaptureStillImageOutput
If really want to use AVCapturePhotoOutput then you should follow the below steps.
These are the steps to capture a photo.
Create an AVCapturePhotoOutput object. Use its properties to determine supported capture settings and to enable certain features (for example, whether to capture Live Photos).
Create and configure an AVCapturePhotoSettings object to choose features and settings for a specific capture (for example, whether to enable image stabilization or flash).
Capture an image by passing your photo settings object to the capturePhoto(with:delegate:) method along with a delegate object implementing the AVCapturePhotoCaptureDelegate protocol. The photo capture output then calls your delegate to notify you of significant events during the capture process.
have this below code on your clickCapture method and don't forgot to confirm and implement to delegate in your class.
let settings = AVCapturePhotoSettings()
let previewPixelType = settings.availablePreviewPhotoPixelFormatTypes.first!
let previewFormat = [kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: 160,
kCVPixelBufferHeightKey as String: 160,
]
settings.previewPhotoFormat = previewFormat
self.cameraOutput.capturePhoto(with: settings, delegate: self)
if you would like to know the different way to capturing photo from avfoundation check out my previous SO answer
Also Apple documentation explains very clear for How to use AVCapturePhotoOutput
import AVFoundation
import Foundation
#IBOutlet weak var mainimage: UIImageView!
let captureSession = AVCaptureSession()
let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
var captureDevice : AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] {
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.front) {
captureDevice = device
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
}
}
func beginSession() {
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
catch {
print("error: \(error.localizedDescription)")
}
guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else {
print("no preview layer")
return
}
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
self.view.addSubview(mainimage)
}
This code is working in my app
I have an application that uses RSBarcodes_Swift to scan for barcodes.
When the scan is successful i need to segue to camera view controller to take pictures. The problem i have is that after the barcode scanner, AVFoundation touch to focus stops working. What i mean by this is that whenever i touch to focus, camera just briefly tries to focus and than resets to default lens position. When i don't use barcode scanner and go directly to camera, everything works fine. I have also used a couple of other barcode scanners, but the result is the same. Is there any way that i somehow reset the camera usage, or dispose of barcode scanner when i'm done with it?
This is the code i use to present the camera and have touch to focus capability:
import Foundation
import AVFoundation
public class CameraViewController : UIViewController {
var backCamera: AVCaptureDevice?
var captureSession: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var previewView: UIView?
public override func viewWillAppear(animated: Bool) {
super.viewWillAppear(animated)
previewView = UIView()
previewView!.frame = UIScreen.mainScreen().bounds
let shortTap = UITapGestureRecognizer(target: self, action: #selector(shortTapRecognize))
shortTap.numberOfTapsRequired = 1
shortTap.numberOfTouchesRequired = 1
previewView!.addGestureRecognizer(shortTap)
self.view.addSubview(previewView!)
captureSession = AVCaptureSession()
captureSession!.sessionPreset = AVCaptureSessionPresetPhoto
backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var error: NSError?
var input: AVCaptureDeviceInput!
do {
input = try AVCaptureDeviceInput(device: backCamera!)
} catch let error1 as NSError{
error = error1
input = nil
print(error!.localizedDescription)
}
if error == nil && captureSession!.canAddInput(input){
captureSession!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession!.canAddOutput(stillImageOutput){
captureSession!.addOutput(stillImageOutput)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer!.frame = previewView!.bounds
videoPreviewLayer!.videoGravity = AVLayerVideoGravityResizeAspect
videoPreviewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
previewView!.layer.addSublayer(videoPreviewLayer!)
captureSession!.startRunning()
}
}
}
public override func viewDidAppear(animated: Bool) {
super.viewDidAppear(animated)
}
public override func viewDidLoad() {
super.viewDidLoad()
}
public override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func shortTapRecognize(tap: UITapGestureRecognizer){
if tap.state == UIGestureRecognizerState.Ended {
let pointInPreview = tap.locationInView(tap.view)
let pointInCamera = videoPreviewLayer!.captureDevicePointOfInterestForPoint(pointInPreview)
if backCamera!.focusPointOfInterestSupported{
do {
try backCamera!.lockForConfiguration()
} catch let error as NSError{
print(error.localizedDescription)
}
backCamera!.focusPointOfInterest = pointInCamera
backCamera!.focusMode = .AutoFocus
backCamera!.unlockForConfiguration()
}
}
}
}