AvCaptureSession code extraction error - ios

Im trying to extract some code out of my view controller to clean it up an d maintain as a DRY as possible code. It works fine when as follows:
class InitialRegistrationViewController: UIViewController, UINavigationControllerDelegate, UIImagePickerControllerDelegate, NVActivityIndicatorViewable {
var session: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if Platform.isPhone {
session = AVCaptureSession()
session!.sessionPreset = AVCaptureSessionPresetPhoto
var frontCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let availableCameraDevices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo)
for device in availableCameraDevices as! [AVCaptureDevice] {
if device.position == .front {
frontCamera = device
}
}
var error: NSError?
var input: AVCaptureDeviceInput!
do {
input = try AVCaptureDeviceInput(device: frontCamera)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
if error == nil && session!.canAddInput(input) {
session!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if session!.canAddOutput(stillImageOutput) {
session!.addOutput(stillImageOutput)
session!.startRunning()
}
}
}
}
func capturePhoto() {
if let videoConnection = stillImageOutput!.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput?.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (sampleBuffer, error) -> Void in
if sampleBuffer != nil {
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProvider(data: imageData as! CFData)
let cgImageRef = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)
let image = UIImage(cgImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.right)
self.profileImage.image = image
}
})
}
}
}
but when i extract to a helper like below:
import UIKit
import AVFoundation
class ProfilePhoto {
var session: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
func startSession() {
if Platform.isPhone {
session = AVCaptureSession()
session!.sessionPreset = AVCaptureSessionPresetPhoto
var frontCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let availableCameraDevices = AVCaptureDevice.devices(withMediaType: AVMediaTypeVideo)
for device in availableCameraDevices as! [AVCaptureDevice] {
if device.position == .front {
frontCamera = device
}
}
var error: NSError?
var input: AVCaptureDeviceInput!
do {
input = try AVCaptureDeviceInput(device: frontCamera)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
if error == nil && session!.canAddInput(input) {
session!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if session!.canAddOutput(stillImageOutput) {
session!.addOutput(stillImageOutput)
session!.startRunning()
}
}
}
}
func capture() -> UIImage {
var image: UIImage!
if let videoConnection = stillImageOutput!.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput?.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (sampleBuffer, error) -> Void in
if sampleBuffer != nil {
let cgImageRef = self.setBufferData(sampleBuffer: sampleBuffer!)
image = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.right)
}
})
}
return image
}
func setBufferData(sampleBuffer: CMSampleBuffer ) -> CGImage {
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProvider(data: imageData as! CFData)
let cgImageRef = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)
return cgImageRef!
}
}
where in InitialRegistrationViewController i call:
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
profilePhoto.startSession()
}
func capturePhoto() {
profileImage.image = profilePhoto.capture()
}
I get fatal error: unexpectedly found nil while unwrapping an Optional value when returning the image in profilePhoto.capture().
I dont understand how the session works as im new to ios but i think its because the session is ending(?) when i try to capture the image? Any insight would be great. Thanks
UPDATE: I upvotes the answer given as its close enough, below is what worked for me.
func capture(completion: #escaping (UIImage?) -> Void) {
if let videoConnection = stillImageOutput!.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput?.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (sampleBuffer, error) -> Void in
if sampleBuffer != nil {
let cgImageRef = self.setBufferData(sampleBuffer: sampleBuffer!)
let image: UIImage! = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.right)
completion(image)
} else {
completion(nil)
}
})
} else {
completion(nil)
}
}

Your capture() method makes an asynchronous call to get the UIImage, therefore when it returns [immediately] the value returned is always nil.
The article #dan suggested shows a callback pattern that can be used to return the image to the caller, make sure you understand this mechanism before proceeding.
func capture(result: (image: UIImage?) -> Void) -> UIImage
{
if let videoConnection = stillImageOutput!.connection(withMediaType: AVMediaTypeVideo)
{
stillImageOutput?.captureStillImageAsynchronously(from: videoConnection, completionHandler:
{ (sampleBuffer, error) -> Void in
if sampleBuffer != nil
{
let cgImageRef = self.setBufferData(sampleBuffer: sampleBuffer!)
var image: UIImage! = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.right)
result(image: image)
}
else
result(image: nil)
})
}
else
result(image: nil)
}
And to call it, you could use
capture(){(image: UIImage?) -> Void in
//use the image that was just retrieved
}
You've now made your capture() method asynchronous, and report its return value via a callback.

Related

'No active and enabled video connection' error when capturing photo with TrueDepth cam

I am trying to record depth data from the TrueDepth camera along with a photo. But when calling
AVCapturePhotoOutput capturePhoto(withSettings,delegate)
I get an exception stating:
No active and enabled video connection
I configure the camera and outputs like so (basically following the guide from Apple about photo capturing and capturing depth):
func configurePhotoOutput() throws {
self.captureSession = AVCaptureSession()
guard self.captureSession != nil else {
return
}
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice.lockForConfiguration()
videoDevice.activeDepthDataFormat = depthFormat
videoDevice.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
self.captureSession!.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
self.captureSession!.addInput(videoDeviceInput)
// add video output
if self.captureSession!.canAddOutput(videoOutput) {
self.captureSession!.addOutput(videoOutput)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
} else { fatalError("Can't add video output.") }
// Set up photo output for depth data capture.
let photoOutput = AVCapturePhotoOutput()
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
guard self.captureSession!.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession!.addOutput(photoOutput)
self.captureSession!.sessionPreset = .photo
self.captureSession!.commitConfiguration()
self.captureSession!.startRunning()
}
And the code responsible for capturing the photo:
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled =
self.photoOutput.isDepthDataDeliverySupported
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate) // <---- error is being thrown on this call
self.photoCaptureCompletionBlock = completion
}
What I am I doing wrong in this configuration?
solved it with the following implementation:
Any comments / remarks are highly appreciated!
import AVFoundation
import UIKit
class CameraController: NSObject {
var captureSession: AVCaptureSession?
var videoDevice: AVCaptureDevice?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoOutput = AVCaptureVideoDataOutput()
var photoOutput = AVCapturePhotoOutput()
func prepare(completionHandler: #escaping (Error?) -> Void) {
func createCaptureSession() {
captureSession = AVCaptureSession()
}
func configureCaptureDevices() throws {
// Select a depth-capable capture device.
guard let vd = AVCaptureDevice.default(.builtInTrueDepthCamera,
for: .video, position: .unspecified)
else { fatalError("No dual camera.") }
videoDevice = vd
// Select a depth (not disparity) format that works with the active color format.
let availableFormats = videoDevice!.activeFormat.supportedDepthDataFormats
let depthFormat = availableFormats.first(where: { format in
let pixelFormatType = CMFormatDescriptionGetMediaSubType(format.formatDescription)
return (pixelFormatType == kCVPixelFormatType_DepthFloat16 ||
pixelFormatType == kCVPixelFormatType_DepthFloat32)
})
do {
try videoDevice!.lockForConfiguration()
videoDevice!.activeDepthDataFormat = depthFormat
videoDevice!.unlockForConfiguration()
} catch {
print("Could not lock device for configuration: \(error)")
return
}
}
func configureDeviceInputs() throws {
if( captureSession == nil) {
throw CameraControllerError.captureSessionIsMissing
}
captureSession?.beginConfiguration()
// add video input
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: self.videoDevice!),
captureSession!.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
captureSession!.addInput(videoDeviceInput)
captureSession?.commitConfiguration()
}
func configurePhotoOutput() throws {
guard let captureSession = self.captureSession else { throw CameraControllerError.captureSessionIsMissing }
captureSession.beginConfiguration()
// Set up photo output for depth data capture.
photoOutput = AVCapturePhotoOutput()
photoOutput.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])], completionHandler: nil)
guard captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
captureSession.addOutput(photoOutput)
// must be set after photoOutput is added to captureSession. Why???
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
captureSession.sessionPreset = .photo
captureSession.commitConfiguration()
captureSession.startRunning()
}
DispatchQueue(label: "prepare").async {
do {
createCaptureSession()
try configureCaptureDevices()
try configureDeviceInputs()
try configurePhotoOutput()
}
catch {
DispatchQueue.main.async {
completionHandler(error)
}
return
}
DispatchQueue.main.async {
completionHandler(nil)
}
}
}
func displayPreview(on view: UIView) throws {
guard let captureSession = self.captureSession, captureSession.isRunning else { throw CameraControllerError.captureSessionIsMissing }
self.previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer?.connection?.videoOrientation = .portrait
view.layer.insertSublayer(self.previewLayer!, at: 0)
self.previewLayer?.frame = view.frame
}
func captureImage(delegate: AVCapturePhotoCaptureDelegate,completion: #escaping (UIImage?, Error?) -> Void) {
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isDepthDataFiltered = false
self.photoOutput.capturePhoto(with: photoSettings, delegate: delegate)
self.photoCaptureCompletionBlock = completion
}
var photoCaptureCompletionBlock: ((UIImage?, Error?) -> Void)?
}
extension CameraController {
public enum CameraPosition {
case front
case rear
}
enum CameraControllerError: Swift.Error {
case captureSessionAlreadyRunning
case captureSessionIsMissing
case inputsAreInvalid
case invalidOperation
case noCamerasAvailable
case unknown
}
}

Recording videos with real-time filters in Swift

I am new to swift and trying to build a camera app which can apply real-time filters, and save with the applied filters.
So far i can preview real-time with the applied filters, but when i save the video its all black.
import UIKit
import AVFoundation
import AssetsLibrary
import CoreMedia
import Photos
class ViewController: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
var captureSession: AVCaptureSession!
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var recordButtton: UIButton!
#IBOutlet weak var imageView: UIImageView!
var assetWriter: AVAssetWriter?
var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor?
var isWriting = false
var currentSampleTime: CMTime?
var currentVideoDimensions: CMVideoDimensions?
override func viewDidLoad() {
super.viewDidLoad()
FilterVendor.register()
setupCaptureSession()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func setupCaptureSession() {
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
guard let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo), let input = try? AVCaptureDeviceInput(device: captureDevice) else {
print("Can't access the camera")
return
}
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if((previewLayer) != nil) {
view.layer.addSublayer(previewLayer!)
}
captureSession.startRunning()
}
#IBAction func record(_ sender: Any) {
if isWriting {
print("stop record")
self.isWriting = false
assetWriterPixelBufferInput = nil
assetWriter?.finishWriting(completionHandler: {[unowned self] () -> Void in
self.saveMovieToCameraRoll()
})
} else {
print("start record")
createWriter()
assetWriter?.startWriting()
assetWriter?.startSession(atSourceTime: currentSampleTime!)
isWriting = true
}
}
func saveMovieToCameraRoll() {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: self.movieURL() as URL)
}) { saved, error in
if saved {
print("saved")
}
}
}
func movieURL() -> NSURL {
let tempDir = NSTemporaryDirectory()
let url = NSURL(fileURLWithPath: tempDir).appendingPathComponent("tmpMov.mov")
return url! as NSURL
}
func checkForAndDeleteFile() {
let fm = FileManager.default
let url = movieURL()
let exist = fm.fileExists(atPath: url.path!)
if exist {
do {
try fm.removeItem(at: url as URL)
} catch let error as NSError {
print(error.localizedDescription)
}
}
}
func createWriter() {
self.checkForAndDeleteFile()
do {
assetWriter = try AVAssetWriter(outputURL: movieURL() as URL, fileType: AVFileTypeQuickTimeMovie)
} catch let error as NSError {
print(error.localizedDescription)
return
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : Int(currentVideoDimensions!.width),
AVVideoHeightKey : Int(currentVideoDimensions!.height)
] as [String : Any]
let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings as? [String : AnyObject])
assetWriterVideoInput.expectsMediaDataInRealTime = true
assetWriterVideoInput.transform = CGAffineTransform(rotationAngle: CGFloat(M_PI / 2.0))
let sourcePixelBufferAttributesDictionary = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32BGRA),
String(kCVPixelBufferWidthKey) : Int(currentVideoDimensions!.width),
String(kCVPixelBufferHeightKey) : Int(currentVideoDimensions!.height),
String(kCVPixelFormatOpenGLESCompatibility) : kCFBooleanTrue
] as [String : Any]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterVideoInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if assetWriter!.canAdd(assetWriterVideoInput) {
assetWriter!.add(assetWriterVideoInput)
} else {
print("no way\(assetWriterVideoInput)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.landscapeLeft;
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
let filter = CIFilter(name: "Fİlter")!
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if self.isWriting {
if self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true {
var newPixelBuffer: CVPixelBuffer? = nil
CVPixelBufferPoolCreatePixelBuffer(nil, self.assetWriterPixelBufferInput!.pixelBufferPool!, &newPixelBuffer)
let success = self.assetWriterPixelBufferInput?.append(newPixelBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
}
DispatchQueue.main.async {
if let outputValue = filter.value(forKey: kCIOutputImageKey) as? CIImage {
let filteredImage = UIImage(ciImage: outputValue)
self.imageView.image = filteredImage
}
}
}
}
}
I've added some comments to the critical part below:
func captureOutput(_ captureOutput: AVCaptureOutput, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.landscapeLeft;
// COMMENT: This line makes sense - this is your pixelbuffer from the camera.
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// COMMENT: OK, so you turn pixelBuffer into a CIImage...
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
// COMMENT: And now you've create a CIImage with a Filter instruction...
let filter = CIFilter(name: "Fİlter")!
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if self.isWriting {
if self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true {
// COMMENT: Here's where it gets weird. You've declared a new, empty pixelBuffer... but you already have one (pixelBuffer) that contains the image you want to write...
var newPixelBuffer: CVPixelBuffer? = nil
// COMMENT: And you grabbed memory from the pool.
CVPixelBufferPoolCreatePixelBuffer(nil, self.assetWriterPixelBufferInput!.pixelBufferPool!, &newPixelBuffer)
// COMMENT: And now you wrote an empty pixelBuffer back <-- this is what's causing the black frame.
let success = self.assetWriterPixelBufferInput?.append(newPixelBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
}
// COMMENT: And now you're sending the filtered image back to the screen.
DispatchQueue.main.async {
if let outputValue = filter.value(forKey: kCIOutputImageKey) as? CIImage {
let filteredImage = UIImage(ciImage: outputValue)
self.imageView.image = filteredImage
}
}
}
}
It looks to me like you're basically getting the screen image, creating a filtered copy, then making a NEW pixel buffer which is empty and writing that out.
If you write the pixelBuffer you grabbed instead of the new one you're creating, you should successfully write the image.
What you need to successfully write out the filtered video is to create a new CVPixelBuffer from a CIImage - that solution exists here on StackOverflow already, I know because I needed that step myself!

AVCamSwift front camera save image vertically reversed

I use AVCamSwift to take pictures. But, when I take picture from internal camera, it save images as reversed vertically. The owner of the code is confirmed that this is a bug. These are codes that I use :
#IBAction func snapStillImage(sender: AnyObject) {
print("snapStillImage")
dispatch_async(self.sessionQueue, {
// Update the orientation on the still image output video connection before capturing.
let videoOrientation = (self.previewView.layer as! AVCaptureVideoPreviewLayer).connection.videoOrientation
self.stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo).videoOrientation = videoOrientation
// Flash set to Auto for Still Capture
takePhotoScreen.setFlashMode(AVCaptureFlashMode.Auto, device: self.videoDeviceInput!.device)
self.stillImageOutput!.captureStillImageAsynchronouslyFromConnection(self.stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo), completionHandler: {
(imageDataSampleBuffer: CMSampleBuffer!, error: NSError!) in
if error == nil {
let data:NSData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageDataSampleBuffer)
let image:UIImage = UIImage( data: data)!
let libaray:ALAssetsLibrary = ALAssetsLibrary()
let orientation: ALAssetOrientation = ALAssetOrientation(rawValue: image.imageOrientation.rawValue)!
libaray.writeImageToSavedPhotosAlbum(image.CGImage, orientation: orientation, completionBlock: nil)
print("save to album")
}else{
print("Did not capture still image")
print(error)
}
})
})
}
#IBAction func changeCamera(sender: AnyObject) {
print("change camera")
self.cameraButton.enabled = false
self.snapButton.enabled = false
dispatch_async(self.sessionQueue, {
let currentVideoDevice:AVCaptureDevice = self.videoDeviceInput!.device
let currentPosition: AVCaptureDevicePosition = currentVideoDevice.position
var preferredPosition: AVCaptureDevicePosition = AVCaptureDevicePosition.Unspecified
switch currentPosition{
case AVCaptureDevicePosition.Front:
preferredPosition = AVCaptureDevicePosition.Back
case AVCaptureDevicePosition.Back:
preferredPosition = AVCaptureDevicePosition.Front
case AVCaptureDevicePosition.Unspecified:
preferredPosition = AVCaptureDevicePosition.Back
}
let device:AVCaptureDevice = takePhotoScreen.deviceWithMediaType(AVMediaTypeVideo, preferringPosition: preferredPosition)
var videoDeviceInput: AVCaptureDeviceInput?
do {
videoDeviceInput = try AVCaptureDeviceInput(device: device)
} catch _ as NSError {
videoDeviceInput = nil
} catch {
fatalError()
}
self.session!.beginConfiguration()
self.session!.removeInput(self.videoDeviceInput)
if self.session!.canAddInput(videoDeviceInput){
NSNotificationCenter.defaultCenter().removeObserver(self, name:AVCaptureDeviceSubjectAreaDidChangeNotification, object:currentVideoDevice)
takePhotoScreen.setFlashMode(AVCaptureFlashMode.Auto, device: device)
NSNotificationCenter.defaultCenter().addObserver(self, selector: "subjectAreaDidChange:", name: AVCaptureDeviceSubjectAreaDidChangeNotification, object: device)
self.session!.addInput(videoDeviceInput)
self.videoDeviceInput = videoDeviceInput
}else{
self.session!.addInput(self.videoDeviceInput)
}
self.session!.commitConfiguration()
dispatch_async(dispatch_get_main_queue(), {
//
self.snapButton.enabled = true
self.cameraButton.enabled = true
})
})
}
I tried to find a solution with the following code but it failed in front cam. Do you have a recommendation to find a workaround ?
let reversedImage = UIImage(CGImage: pickedPhoto!.CGImage!, scale: 1.0, orientation: .LeftMirrored)
I solved the bug with following solution :
let currentVideoDevice:AVCaptureDevice = self.videoDeviceInput!.device
let currentPosition: AVCaptureDevicePosition = currentVideoDevice.position
if currentPosition == AVCaptureDevicePosition.Front
{let reversedImage = UIImage(CGImage: image.CGImage!, scale: 1.0, orientation: .LeftMirrored)
sp.pickedPhoto = reversedImage}

Begin new captureSession without removing previewLayer

I'm creating a customView for the CameraView, which works fine however i'm now working on changing from the back camera to the front camera. i've at the moment done it by doing below. However this seem to create a bad user experience where it removes the previewLayer (the screen becomes white) and then show the front camera correctly. is there a way to create a better user experience by not making everything white in 1 sec before showing the new session?
switchCamera
func switchCamera() {
if usingbackCamera == true {
endSession()
beginSession(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
} else {
endSession()
beginSession(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
}
beginSession
func beginSession(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
self.previewLayer?.removeFromSuperlayer()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraView.layer.addSublayer(previewLayer!)
previewLayer?.frame = self.cameraView.bounds
captureSession.startRunning()
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
} catch let err as NSError {
print(err)
}
}
endSession
func endSession() {
self.previewLayer?.removeFromSuperlayer()
captureSession.stopRunning()
captureSession = AVCaptureSession()
}
Take Picture
func takePicture() {
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo){
videoConnection.videoOrientation = AVCaptureVideoOrientation.Portrait
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {
(sampleBuffer, error) in
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let dataProvider = CGDataProviderCreateWithCFData(imageData)
let cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)
let image = UIImage(CGImage: cgImageRef!, scale: 1.0, orientation: UIImageOrientation.Right)
self.previewImageView.image = image
self.previewImageView.hidden = false
self.cameraView.bringSubviewToFront(self.previewImageView)
})
}
}
You don't need to stop the captureSession and start it again when switching from back to front camera and vice versa.
All you need to do is remove the old capture session inputs, add the new one and all that in between a begin/commit session configuration block.
Here is a rough example:
func switchCamera() {
//begin configuration changes
captureSession.beginConfiguration()
//remove the previous inputs
let inputs = captureSession.inputs as! [AVCaptureInput]
for oldInput:AVCaptureInput in inputs {
captureSession.removeInput(oldInput)
}
//add the new input
if usingbackCamera == true {
addInput(frontCamera!)
usingbackCamera = false
self.cameraView.bringSubviewToFront(actionView)
}
else {
addInput(backCamera!)
usingbackCamera = true
self.cameraView.bringSubviewToFront(actionView)
}
//end the configuration
captureSession.commitConfiguration()
}
func addInput(device: AVCaptureDevice) {
do {
captureSession.addInput(try AVCaptureDeviceInput(device: device))
} catch let err as NSError {
print(err)
}
}
I think you don't have to remove the preview layer when changing the input device.
The layer is bond to the session, all you have to do is stop the session, remove the original input and add the new input then start the session again.
I make my capture view by custom rendering, but I thought the process will be the same.
The capture snippet:
for output in session.outputs {
if let capture = output as? AVCaptureStillImageOutput{
for connection in (capture.connections as! [AVCaptureConnection]){
for port in (connection.inputPorts as! [AVCaptureInputPort]){
if port.mediaType == AVMediaTypeVideo{
capture.captureStillImageAsynchronouslyFromConnection(connection, completionHandler: {(buffer, err) -> Void in
if err != nil{
print(err)
}
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer)
guard let image = CIImage(data: imageData) else{
completion(nil)
return
}
let rotatedImage = image.imageByApplyingTransform(CGAffineTransformMakeRotation(-CGFloat(M_PI_2)))
})
}
}
}
}
}

Capture current camera image using AVFoundation

I'm trying to capture the image and save it to a variable when I press "myButton". What should I do?
My code is as follows:
import UIKit
import AVFoundation
import MobileCoreServices
class ViewController: UIViewController {
let captureSession = AVCaptureSession()
var previewLayer : AVCaptureVideoPreviewLayer?
var captureDevice : AVCaptureDevice?
#IBOutlet var myTap: UITapGestureRecognizer!
#IBOutlet weak var myButton: UIButton!
#IBAction func shotPress(sender: UIButton) {
//Save image to variable somehow
})
var stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
override func viewDidLoad() {
super.viewDidLoad()
captureSession.sessionPreset = AVCaptureSessionPresetHigh
let devices = AVCaptureDevice.devices()
for device in devices {
if (device.hasMediaType(AVMediaTypeVideo)) {
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
beginSession()
}
}
}
}
}
func updateDeviceSettings(focusValue : Float, isoValue : Float) {
if let device = captureDevice {
if(device.lockForConfiguration(nil)) {
device.focusMode = AVCaptureFocusMode.ContinuousAutoFocus
device.unlockForConfiguration()
}
}
}
func beginSession() {
var err : NSError? = nil
captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err))
if err != nil {
println("error: \(err?.localizedDescription)")
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.view.layer.addSublayer(previewLayer)
self.view.bringSubviewToFront(myButton)
previewLayer?.frame = self.view.layer.frame
captureSession.startRunning()
}
}
This is what I am using on one of the app that I am working on. Should be helpful for your problem as well.
func capturePicture(){
println("Capturing image")
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
captureSession.addOutput(stillImageOutput)
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo){
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: {
(sampleBuffer, error) in
var imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
var dataProvider = CGDataProviderCreateWithCFData(imageData)
var cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider, nil, true, CGColorRenderingIntent.RenderingIntentDefault)
var image = UIImage(CGImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.Right)
var imageView = UIImageView(image: image)
imageView.frame = CGRect(x:0, y:0, width:self.screenSize.width, height:self.screenSize.height)
//Show the captured image to
self.view.addSubview(imageView)
//Save the captured preview to image
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
})
}
}
Updated for Swift 3:
var stillImageOutput = AVCaptureStillImageOutput.init()
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
self.cameraSession.addOutput(stillImageOutput)
if let videoConnection = stillImageOutput.connection(withMediaType:AVMediaTypeVideo){
stillImageOutput.captureStillImageAsynchronously(from:videoConnection, completionHandler: {
(sampleBuffer, error) in
var imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
var dataProvider = CGDataProvider.init(data: imageData as! CFData)
var cgImageRef = CGImage.init(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
var image = UIImage.init(cgImage: cgImageRef!, scale: 1.0, orientation: .right)
// do something with image
})
}

Resources