I'm creating a customView for the CameraView, which works fine however i'm now working on changing from the back camera to the front camera. i've at the moment done it by doing below. However this seem to create a bad user experience where it removes the previewLayer (the screen becomes white) and then show the front camera correctly. is there a way to create a better user experience by not making everything white in 1 sec before showing the new session?
switchCamera
func switchCamera() {
if usingbackCamera == true {
endSession()
beginSession(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
} else {
endSession()
beginSession(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
}
beginSession
func beginSession(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
self.previewLayer?.removeFromSuperlayer()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraView.layer.addSublayer(previewLayer!)
previewLayer?.frame = self.cameraView.bounds
captureSession.startRunning()
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
} catch let err as NSError {
print(err)
}
}
endSession
func endSession() {
self.previewLayer?.removeFromSuperlayer()
captureSession.stopRunning()
captureSession = AVCaptureSession()
}
Take Picture
func takePicture() {
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo){
videoConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {
(sampleBuffer, error) in
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProviderCreateWithCFData(imageData)
let cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)
let image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Right)
self.previewImageView.image = image
self.previewImageView.hidden = false
self.cameraView.bringSubviewToFront(self.previewImageView)
})
}
}
You don't need to stop the captureSession and start it again when switching from back to front camera and vice versa.
All you need to do is remove the old capture session inputs, add the new one and all that in between a begin/commit session configuration block.
Here is a rough example:
func switchCamera() {
//begin configuration changes
captureSession.beginConfiguration()
//remove the previous inputs
let inputs = captureSession.inputs as! [AVCaptureInput]
for oldInput:AVCaptureInput in inputs {
captureSession.removeInput(oldInput)
}
//add the new input
if usingbackCamera == true {
addInput(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
}
else {
addInput(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
//end the configuration
captureSession.commitConfiguration()
}
func addInput(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
} catch let err as NSError {
print(err)
}
}
I think you don't have to remove the preview layer when changing the input device.
The layer is bond to the session, all you have to do is stop the session, remove the original input and add the new input then start the session again.
I make my capture view by custom rendering, but I thought the process will be the same.
The capture snippet:
for output in session.outputs {
if let capture = output as? AVCaptureStillImageOutput{
for connection in (capture.connections as! [AVCaptureConnection]){
for port in (connection.inputPorts as! [AVCaptureInputPort]){
if port.mediaType == AVMediaTypeVideo{
capture.captureStillImageAsynchronouslyFromConnection(connection, completionHandler: {(buffer, err) -> Void in
if err != nil{
print(err)
}
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer)
guard let image = CIImage(data: imageData) else{
completion(nil)
return
}
let rotatedImage = image.imageByApplyingTransform(CGAffineTransformMakeRotation(-CGFloat(M_PI_2)))
})
}
}
}
}
}
Related
I'm try to understand what I'm doing wrong on my project.
I'm try to draw a box over a detected face using vision kit.
I first set up the back camera with the following method.
func configureSession(){
// controllo se ho ricevuto auth a usar camera else ret
if setupResult != .success { return }
var defaultVideoDevice: AVCaptureDevice?
session.beginConfiguration() // per poter sett la conf
session.sessionPreset = .vga640x480 // Model image size is smaller.
do {
// seleziono il device migliore da usare come imput
if let dualCameraDevice = AVCaptureDevice.default(.builtInWideAngleCamera,for: .video,position: .back) {
print("select input tripleCamera")
defaultVideoDevice = dualCameraDevice
}
guard let defaultVideoDevice = defaultVideoDevice else {
print("errore Can not find any camera in configurate session")
return
}
let videoDeviceInput = try AVCaptureDeviceInput(device: defaultVideoDevice)
//Aggiungo input alla sessione
if session.canAddInput(videoDeviceInput){
session.addInput(videoDeviceInput)
self.videoDeviceInput = videoDeviceInput
} else {
print("Could not add video device input to the session")
setupResult = .configurationFailed
session.commitConfiguration()
return
}// fine add input
} catch let error {
print("Could set input device to session err \(error.localizedDescription)")
setupResult = .configurationFailed
session.commitConfiguration()
return
}
//-----aggiungi Output
if session.canAddOutput(videoDataOutput) {
session.addOutput(videoDataOutput)
// Add a video data output
videoDataOutput.alwaysDiscardsLateVideoFrames = true
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)]
videoDataOutput.setSampleBufferDelegate(self, queue: sessionQueue)
}else {
print("Could not add video data output to the session")
session.commitConfiguration()
return
}
guard let captureConnection = videoDataOutput.connection(with: .video) else {return}
captureConnection.videoOrientation = .portrait //< DO I NEED TO CHANGE THIS??----------
captureConnection.isEnabled = true
if captureConnection.isVideoOrientationSupported {
print("capture connection orient \(captureConnection.videoOrientation.rawValue) / 3 landscape right")
}
// get the buffer size
do {
try defaultVideoDevice!.lockForConfiguration()
let dimensions = CMVideoFormatDescriptionGetDimensions((defaultVideoDevice?.activeFormat.formatDescription)!)
bufferSize.width = CGFloat(dimensions.width)
bufferSize.height = CGFloat(dimensions.height)
defaultVideoDevice!.unlockForConfiguration()
} catch {
print("// get the buffer size ERROR \(error.localizedDescription)")
}
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(tapAction))
cameraView.addGestureRecognizer(tapGesture)
// setting up the view to show
cameraView.videoPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
session.commitConfiguration()
cameraView.session = session
rootLayer = cameraView.videoPreviewLayer
guard let conn = self.cameraView.videoPreviewLayer.connection else {return}
print("cameraView conn video orient \(conn.videoOrientation.rawValue)")
}
First question..
how do I need to set captureConnection.videoOrientation ?? I can't understand how this need to be set.
my idea is using the phone in portrait and landscape..
Second question...
When I use Vision how do I need to set orientation in the Handler?
I tried to use a method from an apple example exifOrientationFromDeviceOrientation()
but it is completely wrong in my case.
it only work correctly if I set the orientation as leftMirrored...
but why leftMirrored since I'm using the backCamera as input??? all the other setting give me the wrong box position.
var faceLayersArray : [CAShapeLayer] = []
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
self.sessionQueue.async {
let faceRequest = VNDetectFaceLandmarksRequest { req, err in
DispatchQueue.main.async {
self.faceLayersArray.forEach { layer in
layer.removeFromSuperlayer()
}
if let result = req.results as? [VNFaceObservation], result.count > 0 {
self.handleFace(observation: result)
} else {
}
}
}
let exifOrientation = self.exifOrientationFromDeviceOrientation()
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: exifOrientation, options: [:])
do {
try imageRequestHandler.perform([faceRequest])
} catch {
print("Error sequance handler \(error)")
}
}
}
func handleFace(observation : [VNFaceObservation]){
for observation in observation {
let boundBoxFace = observation.boundingBox
let faceRectConverted = self.cameraView.videoPreviewLayer.layerRectConverted(fromMetadataOutputRect: boundBoxFace)
let faceRectPath = CGPath(rect: faceRectConverted, transform: nil)
let faceLayer = CAShapeLayer()
faceLayer.path = faceRectPath
faceLayer.fillColor = UIColor.clear.cgColor
faceLayer.strokeColor = UIColor.yellow.cgColor
faceLayersArray.append(faceLayer)
self.cameraView.videoPreviewLayer.addSublayer(faceLayer)
}
}
}
// from apple
public func exifOrientationFromDeviceOrientation() -> CGImagePropertyOrientation {
let curDeviceOrientation = UIDevice.current.orientation
let exifOrientation: CGImagePropertyOrientation
switch curDeviceOrientation {
case UIDeviceOrientation.portraitUpsideDown: // Device oriented vertically, home button on the top
exifOrientation = .left
case UIDeviceOrientation.landscapeLeft: // Device oriented horizontally, home button on the right
exifOrientation = .upMirrored
case UIDeviceOrientation.landscapeRight: // Device oriented horizontally, home button on the left
exifOrientation = .down
case UIDeviceOrientation.portrait: // Device oriented vertically, home button on the bottom
exifOrientation = .up
default:
exifOrientation = .up
}
return exifOrientation
}
I am trying to record depth data from the TrueDepth camera along with a photo. But when calling
AVCapturePhotoOutput capturePhoto(withSettings,delegate)
I get an exception stating:
No active and enabled video connection
I configure the camera and outputs like so (basically following the guide from Apple about photo capturing and capturing depth):
func configurePhotoOutput() throws {
self.captureSession = AVCaptureSession()
guard self.captureSession != nil else {
return
}
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice.lockForConfiguration()
videoDevice.activeDepthDataFormat = depthFormat
videoDevice.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
self.captureSession!.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
self.captureSession!.addInput(videoDeviceInput)
// add video output
if self.captureSession!.canAddOutput(videoOutput) {
self.captureSession!.addOutput(videoOutput)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
} else { fatalError("Can't add video output.") }
// Set up photo output for depth data capture.
let photoOutput = AVCapturePhotoOutput()
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
guard self.captureSession!.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession!.addOutput(photoOutput)
self.captureSession!.sessionPreset = .photo
self.captureSession!.commitConfiguration()
self.captureSession!.startRunning()
}
And the code responsible for capturing the photo:
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled =
self.photoOutput.isDepthDataDeliverySupported
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate) // <---- error is being thrown on this call
self.photoCaptureCompletionBlock = completion
}
What I am I doing wrong in this configuration?
solved it with the following implementation:
Any comments / remarks are highly appreciated!
import AVFoundation
import UIKit
class CameraController: NSObject {
var captureSession: AVCaptureSession?
var videoDevice: AVCaptureDevice?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoOutput = AVCaptureVideoDataOutput()
var photoOutput = AVCapturePhotoOutput()
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
// Select a depth-capable capture device.
guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
videoDevice = vd
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice!.lockForConfiguration()
videoDevice!.activeDepthDataFormat = depthFormat
videoDevice!.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
}
func configureDeviceInputs() throws {
if( captureSession == nil) {
throw CameraControllerError.captureSessionIsMissing
}
captureSession?.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
captureSession!.addInput(videoDeviceInput)
captureSession?.commitConfiguration()
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
captureSession.beginConfiguration()
// Set up photo output for depth data capture.
photoOutput = AVCapturePhotoOutput()
photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)
guard captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
captureSession.addOutput(photoOutput)
// must be set after photoOutput is added to captureSession. Why???
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
captureSession.sessionPreset = .photo
captureSession.commitConfiguration()
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
self.photoCaptureCompletionBlock = completion
}
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraController {
public enum CameraPosition {
case front
case rear
}
enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
}
I'm trying to use AVFoundation framework to take a picture and analyze it in my app. I want it to take a picture every second automatically, how do I do that?
Here is my current code, right now it takes a picture only when call capturePhoto().
func setupSession() {
session = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPresetPhoto
let camera = AVCaptureDevice
.defaultDeviceWithMediaType(AVMediaTypeVideo)
do { input = try AVCaptureDeviceInput(device: camera) } catch { return }
output = AVCaptureStillImageOutput()
output.outputSettings = [ AVVideoCodecKey: AVVideoCodecJPEG ]
guard session.canAddInput(input)
&& session.canAddOutput(output) else { return }
session.addInput(input)
session.addOutput(output)
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer!.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer!.connection?.videoOrientation = .Portrait
view.layer.addSublayer(previewLayer!)
session.startRunning()
}
func capturePhoto() {
guard let connection = output.connectionWithMediaType(AVMediaTypeVideo) else { return }
connection.videoOrientation = .Portrait
output.captureStillImageAsynchronouslyFromConnection(connection) { (sampleBuffer, error) in
guard sampleBuffer != nil && error == nil else { return }
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
guard let image = UIImage(data: imageData) else { return }
//do stuff with image
}
}
What should I change?
So create an NSTimer that fires once a second, and in that NSTimer's method call capturePhoto:
Create a timer that fires once a second:
var cameraTimer = NSTimer.scheduledTimerWithTimeInterval(1.0,
target: self,
#selector(timerCalled(_:)),
userInfo: nil,
repeats: true)
Your timerCalled function might look like this:
func timerCalled(timer: NSTimer) {
capturePhoto()
}
Hi I am getting this error.
It should be because of this code (it should switch between front and back camera in my custom camera). I am able to take a picture and everything works fine except this code...
#IBAction func switchCamera(sender: UIButton) {
var session:AVCaptureSession!
let currentCameraInput: AVCaptureInput = session.inputs[0] as! AVCaptureInput
session.removeInput(currentCameraInput)
do {
let newCamera: AVCaptureDevice?
if(captureDevice!.position == AVCaptureDevicePosition.Back){
print("Setting new camera with Front")
newCamera = self.cameraWithPosition(AVCaptureDevicePosition.Front)
} else {
print("Setting new camera with Back")
newCamera = self.cameraWithPosition(AVCaptureDevicePosition.Back)
}
let error = NSError?()
let newVideoInput = try AVCaptureDeviceInput(device: newCamera)
if (error == nil && captureSession?.canAddInput(newVideoInput) != nil) {
session.addInput(newVideoInput)
} else {
print("Error creating capture device input")
}
session.commitConfiguration()
captureDevice! = newCamera!
} catch let error as NSError {
// Handle any errors
print(error)
}
}
Thanks.
I use AVCamSwift to take pictures. But, when I take picture from internal camera, it save images as reversed vertically. The owner of the code is confirmed that this is a bug. These are codes that I use :
#IBAction func snapStillImage(sender: AnyObject) {
print("snapStillImage")
dispatch_async(self.sessionQueue, {
// Update the orientation on the still image output video connection before capturing.
let videoOrientation = (self.previewView.layer as! AVCaptureVideoPreviewLayer).connection.videoOrientation
self.stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo).videoOrientation = videoOrientation
// Flash set to Auto for Still Capture
takePhotoScreen.setFlashMode(AVCaptureFlashMode.Auto, device: self.videoDeviceInput!.device)
self.stillImageOutput!.captureStillImageAsynchronouslyFromConnection(self.stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo), completionHandler: {
(imageDataSampleBuffer: CMSampleBuffer!, error: NSError!) in
if error == nil {
let data:NSData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageDataSampleBuffer)
let image:UIImage = UIImage( data: data)!
let libaray:ALAssetsLibrary = ALAssetsLibrary()
let orientation: ALAssetOrientation = ALAssetOrientation(rawValue: image.imageOrientation.rawValue)!
libaray.writeImageToSavedPhotosAlbum(image.CGImage, orientation: orientation, completionBlock: nil)
print("save to album")
}else{
print("Did not capture still image")
print(error)
}
})
})
}
#IBAction func changeCamera(sender: AnyObject) {
print("change camera")
self.cameraButton.enabled = false
self.snapButton.enabled = false
dispatch_async(self.sessionQueue, {
let currentVideoDevice:AVCaptureDevice = self.videoDeviceInput!.device
let currentPosition: AVCaptureDevicePosition = currentVideoDevice.position
var preferredPosition: AVCaptureDevicePosition = AVCaptureDevicePosition.Unspecified
switch currentPosition{
case AVCaptureDevicePosition.Front:
preferredPosition = AVCaptureDevicePosition.Back
case AVCaptureDevicePosition.Back:
preferredPosition = AVCaptureDevicePosition.Front
case AVCaptureDevicePosition.Unspecified:
preferredPosition = AVCaptureDevicePosition.Back
}
let device:AVCaptureDevice = takePhotoScreen.deviceWithMediaType(AVMediaTypeVideo, preferringPosition: preferredPosition)
var videoDeviceInput: AVCaptureDeviceInput?
do {
videoDeviceInput = try AVCaptureDeviceInput(device: device)
} catch _ as NSError {
videoDeviceInput = nil
} catch {
fatalError()
}
self.session!.beginConfiguration()
self.session!.removeInput(self.videoDeviceInput)
if self.session!.canAddInput(videoDeviceInput){
NSNotificationCenter.defaultCenter().removeObserver(self, name:AVCaptureDeviceSubjectAreaDidChangeNotification, object:currentVideoDevice)
takePhotoScreen.setFlashMode(AVCaptureFlashMode.Auto, device: device)
NSNotificationCenter.defaultCenter().addObserver(self, selector: "subjectAreaDidChange:", name: AVCaptureDeviceSubjectAreaDidChangeNotification, object: device)
self.session!.addInput(videoDeviceInput)
self.videoDeviceInput = videoDeviceInput
}else{
self.session!.addInput(self.videoDeviceInput)
}
self.session!.commitConfiguration()
dispatch_async(dispatch_get_main_queue(), {
//
self.snapButton.enabled = true
self.cameraButton.enabled = true
})
})
}
I tried to find a solution with the following code but it failed in front cam. Do you have a recommendation to find a workaround ?
let reversedImage = UIImage(CGImage: pickedPhoto!.CGImage!, scale: 1.0, orientation: .LeftMirrored)
I solved the bug with following solution :
let currentVideoDevice:AVCaptureDevice = self.videoDeviceInput!.device
let currentPosition: AVCaptureDevicePosition = currentVideoDevice.position
if currentPosition == AVCaptureDevicePosition.Front
{let reversedImage = UIImage(CGImage: image.CGImage!, scale: 1.0, orientation: .LeftMirrored)
sp.pickedPhoto = reversedImage}