iOS Swift - AVCaptureSession - Capture frames respecting frame rate - ios

I'm trying to build an app which will capture frames from the camera and process them with OpenCV before saving those files to the device, but at a specific frame rate.
What I'm stuck on at the moment is the fact that AVCaptureVideoDataOutputSampleBufferDelegate doesn't appear to respect the AVCaptureDevice.activeVideoMinFrameDuration, or AVCaptureDevice.activeVideoMaxFrameDuration settings.
captureOutput runs far quicker than 2 frames per second as the above settings would indicate.
Do you happen to know how one could achieve this, with or without the delegate?
ViewController:
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidAppear(animated: Bool) {
setupCaptureSession()
}
func setupCaptureSession() {
let session : AVCaptureSession = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPreset1280x720
let videoDevices : [AVCaptureDevice] = AVCaptureDevice.devices() as! [AVCaptureDevice]
for device in videoDevices {
if device.position == AVCaptureDevicePosition.Back {
let captureDevice : AVCaptureDevice = device
do {
try captureDevice.lockForConfiguration()
captureDevice.activeVideoMinFrameDuration = CMTimeMake(1, 2)
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(1, 2)
captureDevice.unlockForConfiguration()
let input : AVCaptureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
if session.canAddInput(input) {
try session.addInput(input)
}
let output : AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
let dispatch_queue : dispatch_queue_t = dispatch_queue_create("streamoutput", nil)
output.setSampleBufferDelegate(self, queue: dispatch_queue)
session.addOutput(output)
session.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.connection.videoOrientation = .LandscapeRight
let previewBounds : CGRect = CGRectMake(0,0,self.view.frame.width/2,self.view.frame.height+20)
previewLayer.backgroundColor = UIColor.blackColor().CGColor
previewLayer.frame = previewBounds
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.imageView.layer.addSublayer(previewLayer)
self.previewMat.frame = CGRectMake(previewBounds.width, 0, previewBounds.width, previewBounds.height)
} catch _ {
}
break
}
}
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
self.wrapper.processBuffer(self.getUiImageFromBuffer(sampleBuffer), self.previewMat)
}

So I've figured out the problem.
In the comments section for AVCaptureDevice.h above the activeVideoMinFrameDuration property it states:
On iOS, the receiver's activeVideoMinFrameDuration resets to its
default value under the following conditions:
The receiver's activeFormat changes
The receiver's AVCaptureDeviceInput's session's sessionPreset changes
The receiver's AVCaptureDeviceInput is added to a session
The last bullet point was causing my problem, so doing the following solved the problem for me:
do {
let input : AVCaptureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
if session.canAddInput(input) {
try session.addInput(input)
}
try captureDevice.lockForConfiguration()
captureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: 2)
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: 2)
captureDevice.unlockForConfiguration()
let output : AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
let dispatch_queue : dispatch_queue_t = dispatch_queue_create("streamoutput", nil)
output.setSampleBufferDelegate(self, queue: dispatch_queue)
session.addOutput(output)

Related

captureOutput() function is never called swift4

I'm trying to capture camera frames in realtime to be processed using Firebase ML KIT. I've successfully displayed the camera view but I can't seem to get the captureOutput delegate function to be called.
P.s I'm new to iOS development.
private func startLiveVideo() {
self.session.sessionPreset = AVCaptureSession.Preset.photo
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
let deviceInput = try! AVCaptureDeviceInput(device: captureDevice!)
self.session.addInput(deviceInput)
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
self.session.addOutput(AVCaptureVideoDataOutput())
let imageLayer = AVCaptureVideoPreviewLayer(session: session)
imageLayer.frame = CGRect(x: 0, y: 0, width: self.imageView.frame.size.width + 100, height: self.imageView.frame.size.height)
imageLayer.videoGravity = .resizeAspectFill
imageView.layer.addSublayer(imageLayer)
self.session.startRunning()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("Frame captured")
}
You add the delegate for
let deviceOutput = AVCaptureVideoDataOutput()
deviceOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
deviceOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
but add another instance here
self.session.addOutput(AVCaptureVideoDataOutput())
so replace it with
self.session.addOutput(deviceOutput)
It worked just fine after converting to Swift 5.

How to enable Stabilization and HDR in IOS Camera?

What I Did:-
I have tried to enable stabilization and HDR but it's not working.I think I was in right path.When I was trying to check that the current device supports stabilization and HDR in that both case I had got only false case in all devices.
Please guide me if any mistakes had done in the below code snippet.
Thanks in advance!!
My Code Snippet:-
func createAVSession() throws -> AVCaptureSession {
AppLog.LogFunction(object: LOG_Start)
// Start out with low quality
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPresetPhoto
// Input from video camera
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let currentFormat = device?.activeFormat.isVideoHDRSupported
try device?.lockForConfiguration()
if device?.activeFormat.isVideoHDRSupported == true {
device?.automaticallyAdjustsVideoHDREnabled = false
device?.isVideoHDREnabled = true
print("device?.isVideoHDREnabled\(device?.isVideoHDREnabled)")
}
if (device?.isFocusModeSupported(.continuousAutoFocus))! {
device?.focusMode = AVCaptureFocusMode.continuousAutoFocus
print("device?.focusMode\(device?.focusMode.rawValue)")
}
if (device?.isSmoothAutoFocusSupported)! {
device?.isSmoothAutoFocusEnabled = true
print("device?.isSmoothAutoFocusEnabled\(device?.isSmoothAutoFocusEnabled)")
}
if (device?.isExposureModeSupported(.continuousAutoExposure))! {
device?.exposureMode = .continuousAutoExposure
print("device?.exposureMode\(device?.exposureMode.rawValue)")
}
device?.unlockForConfiguration()
let input = try AVCaptureDeviceInput(device: device)
do {
try input.device.lockForConfiguration()
input.device.activeVideoMaxFrameDuration = CMTimeMake(1, 30)
input.device.activeVideoMinFrameDuration = CMTimeMake(1, 30)
input.device.unlockForConfiguration()
}
catch {
print("Failed to set FPS")
}
// Output
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.videoSettings = [ kCVPixelBufferPixelFormatTypeKey as AnyHashable: kCVPixelFormatType_32BGRA]
videoOutput.alwaysDiscardsLateVideoFrames = true
videoOutput.setSampleBufferDelegate(self, queue: sessionQueue)
let stillImageOutput: AVCaptureStillImageOutput = AVCaptureStillImageOutput()
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
//stillImageOutput.isHighResolutionStillImageOutputEnabled = true
if stillImageOutput.isStillImageStabilizationSupported {
stillImageOutput.automaticallyEnablesStillImageStabilizationWhenAvailable = true
print("stillImageOutput.isStillImageStabilizationActive\(stillImageOutput.isStillImageStabilizationActive)")
}
// Join it all together
session.addInput(input)
session.addOutput(videoOutput)
if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
self.stillImageOutput = stillImageOutput
}
if let connection = videoOutput.connection(withMediaType: AVMediaTypeVideo) {
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
}
if connection.isVideoStabilizationSupported {
connection.preferredVideoStabilizationMode = .standard
print("connection.activeVideoStabilizationMode\(connection.activeVideoStabilizationMode.rawValue)")
}
}
AppLog.LogFunction(object: LOG_End)
return session
}
What worked for me on the stabilization issue was to test for it in the delegate. In my project, I use the AVCaptureVideoDataOutputSampleBufferDelegate to write to my file as I test for certain things in the pixel buffer before I decide to write. It was the one place I found where it would say stabilization is allowed. Anyway, here is how I did it for the stabilization issue. Hope it helps.
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!){
self.lockQueue.sync {
if !self.isCapturing || self.isPaused {
return
}
let isVideo = captureOutput is AVCaptureVideoDataOutput
if isVideo && self.videoWriter == nil {
// testing to make sure dealing with video and not audio
let connection = captureOutput.connection(withMediaType: AVMediaTypeVideo)
if (connection?.isVideoStabilizationSupported)! {
connection?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.cinematic
}
//other work excluded as irrelevant
}
}
}

How to implement 2x zoom from camera app with AVCaptureVideoPreviewLayer

I have a AVCaptureVideoPreviewLayerin my app that works well and is showing the same preview video as the camera app. I would like to implement the 2x zoom functionality of the camera app. How do I do this?
Basically I want my previewlayer to change the video feed to same scale as what you see in the camera app when you tap on the 1x icon to change it to 2x.
setting up preview layer
func startSession(){
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
// Catch error using the do catch block
do {
let input = try AVCaptureDeviceInput(device: backCamera)
if (captureSession?.canAddInput(input) != nil){
captureSession?.addInput(input)
// Setup the preview layer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.portrait
tempImageView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
// Set up AVCaptureVideoDataOutput
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA as UInt32)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if (captureSession?.canAddOutput(dataOutput) == true) {
captureSession?.addOutput(dataOutput)
}
let queue = DispatchQueue(label: "com.bigbob.videoQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
} catch _ {
print("Error setting up camera!")
}
Set the videoZoomFactor of your AVCaptureDevice.defaultDevice and the preview layer's zoom will follow suit. Note Swift 4 it is now called AVCaptureDevice.default.
do {
try backCamera?.lockForConfiguration()
let zoomFactor:CGFloat = 2
backCamera?.videoZoomFactor = zoomFactor
backCamera?.unlockForConfiguration()
} catch {
//Catch error from lockForConfiguration
}
Here's a bit of an updated answer that fist checks to make sure the zoom factor is available before you even attempt to set it. The will prevent possibly unneeded exception catches and you can adjust the zoom check and set easily with one variable.
if let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) {
let zoomFactor : CGFloat = 2
if (captureDevice.maxAvailableVideoZoomFactor >= zoomFactor) {
try? captureDevice.lockForConfiguration()
captureDevice.videoZoomFactor = zoomFactor
captureDevice.unlockForConfiguration()
}
}

Why is the video output ALWAYS at 30 fps although it's set otherwise?

I configured the rear camera to be 120 fps. However, when I checked the sample output with captureOutput() by printing the time such function is called (see below), the difference is roughly 33ms (30fps). No matter what fps I set with activeVideoMinFrameDuration and activeVideoMaxFrameDuration, the resulting fps observed in captureOutput() is always 30 fps.
I've tested this on a iPhone 6 which can handle slow-motion video. I've read the Apple official doc at https://developer.apple.com/library/ios/documentation/AudioVideo/Conceptual/AVFoundationPG/Articles/04_MediaCapture.html. Any clue?
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate
{
var captureDevice: AVCaptureDevice?
let captureSession = AVCaptureSession()
let videoCaptureOutput = AVCaptureVideoDataOutput()
var startTime = NSDate.timeIntervalSinceReferenceDate()
// press button to start the video session
#IBAction func startPressed() {
if captureSession.inputs.count > 0 && captureSession.outputs.count > 0 {
startTime = NSDate.timeIntervalSinceReferenceDate()
captureSession.startRunning()
}
}
override func viewDidLoad() {
super.viewDidLoad()
// set capture session resolution
captureSession.sessionPreset = AVCaptureSessionPresetLow
let devices = AVCaptureDevice.devices()
var avFormat: AVCaptureDeviceFormat? = nil
for device in devices {
if (device.hasMediaType(AVMediaTypeVideo)) {
if (device.position == AVCaptureDevicePosition.Back) {
for vFormat in device.formats {
let ranges = vFormat.videoSupportedFrameRateRanges as! [AVFrameRateRange]
let filtered: Array<Double> = ranges.map({ $0.maxFrameRate } ).filter( {$0 >= 119.0} )
if !filtered.isEmpty {
// found a good device with good format!
captureDevice = device as? AVCaptureDevice
avFormat = vFormat as? AVCaptureDeviceFormat
}
}
}
}
}
// use the found capture device and format to set things up
if let dv = captureDevice {
// configure
do {
try dv.lockForConfiguration()
} catch _ {
print("failed locking device")
}
dv.activeFormat = avFormat
dv.activeVideoMinFrameDuration = CMTimeMake(1, 120)
dv.activeVideoMaxFrameDuration = CMTimeMake(1, 120)
dv.unlockForConfiguration()
// input -> session
do {
let input = try AVCaptureDeviceInput(device: dv)
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
} catch _ {
print("failed adding capture device as input to capture session")
}
}
// output -> session
let videoQueue = dispatch_queue_create("videoQueue", DISPATCH_QUEUE_SERIAL)
videoCaptureOutput.setSampleBufferDelegate(self, queue: videoQueue)
videoCaptureOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey: Int(kCVPixelFormatType_32BGRA)]
videoCaptureOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(videoCaptureOutput) {
captureSession.addOutput(videoCaptureOutput)
}
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
print( "\(NSDate.timeIntervalSinceReferenceDate() - startTime)" )
// More pixel/frame processing here
}
}
Answer found. Swapping orders of the two blocks "configure" and "input -> session".

Front Camera to Fill Circular UIView

In an app I'm developing the user is required to take a "selfie" (Yes, I know, but the app is for private use only).
I've got everything working with the camera showing in the circular UIView region, however I cannot get it to scale and fill the circle properly. Here's what it's doing now:
And here's what I want it to be doing:
Here's the code for my UIView:
var cameraView = UIView()
cameraView.frame = CGRectMake(100, self.view.center.y-260, 568, 568)
cameraView.backgroundColor = UIColor(red:26/255, green:188/255, blue:156/255, alpha:1)
cameraView.layer.cornerRadius = 284
cameraView.layer.borderColor = UIColor.whiteColor().CGColor
cameraView.layer.borderWidth = 15
cameraView.contentMode = UIViewContentMode.ScaleToFill
cameraView.layer.masksToBounds = true
I have tried a few different contentMode options, include ScaleToFill, ScaleAspectFill, and ScaleAspectFit. They all generate the same exact result.
As it turns out, the camera's "self.previewLayer" has a property that determines how the camera's content fills a View.
In the following code I changed "self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspect" to "self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill"
extension SelfieViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func setupAVCapture(){
session.sessionPreset = AVCaptureSessionPreset640x480
let devices = AVCaptureDevice.devices();
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the front camera
if(device.position == AVCaptureDevicePosition.Front) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
beginSession()
break
}
}
}
}
}
func beginSession(){
var err : NSError? = nil
var deviceInput:AVCaptureDeviceInput = AVCaptureDeviceInput(device: captureDevice, error: &err)
if err != nil {
println("error: \(err?.localizedDescription)")
}
if self.session.canAddInput(deviceInput){
self.session.addInput(deviceInput)
}
self.videoDataOutput = AVCaptureVideoDataOutput()
var rgbOutputSettings = [NSNumber(integer: kCMPixelFormat_32BGRA):kCVPixelBufferPixelFormatTypeKey]
self.videoDataOutput.alwaysDiscardsLateVideoFrames=true
self.videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL)
self.videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
if session.canAddOutput(self.videoDataOutput){
session.addOutput(self.videoDataOutput)
}
self.videoDataOutput.connectionWithMediaType(AVMediaTypeVideo).enabled = true
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
var rootLayer :CALayer = self.cameraView.layer
rootLayer.masksToBounds=true
self.previewLayer.frame = rootLayer.bounds
rootLayer.addSublayer(self.previewLayer)
session.startRunning()
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
// do stuff here
}
// clean up AVCapture
func stopCamera(){
session.stopRunning()
}
}

Resources