Screen capture during video preview fails - ios

I'm trying to capture the screen while doing a video preview with AVFoundation (AVCaptureDeviceInput and AVCaptureVideoDataOutput)
Initiate preview:
func startCamera(){
var screenSize = UIScreen.mainScreen().bounds.size;
self.previewView = UIView(frame: CGRectMake(0, 0, UIScreen.mainScreen().bounds.size.width, UIScreen.mainScreen().bounds.size.height));
self.previewView.contentMode = UIViewContentMode.ScaleAspectFit
self.view.addSubview(previewView);
session.sessionPreset = AVCaptureSessionPresetHigh
let devices = AVCaptureDevice.devices();
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the front camera
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice;
if captureDevice != nil {
beginSession();
break;
}
}
}
}
}
func beginSession() {
var err : NSError? = nil
var deviceInput:AVCaptureDeviceInput = AVCaptureDeviceInput(device: captureDevice!, error: &err);
if err != nil {
println("error: \(err?.localizedDescription)");
}
if session.canAddInput(deviceInput){
session.addInput(deviceInput);
}
videoDataOutput = AVCaptureVideoDataOutput()
if let videoDataOutput = videoDataOutput {
var rgbOutputSettings = [NSNumber(integer: kCMPixelFormat_32BGRA):kCVPixelBufferPixelFormatTypeKey]
videoDataOutput.alwaysDiscardsLateVideoFrames=true;
videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
if session.canAddOutput(self.videoDataOutput){
session.addOutput(self.videoDataOutput)
}
videoDataOutput.connectionWithMediaType(AVMediaTypeVideo).enabled = true
if let previewLayer = AVCaptureVideoPreviewLayer(session: self.session) {
self.previewLayer = previewLayer
previewLayer.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
var rootLayer :CALayer = self.previewView.layer;
rootLayer.masksToBounds=true;
previewLayer.frame = rootLayer.bounds;
rootLayer.addSublayer(self.previewLayer);
session.startRunning();
delay(8, closure: { () -> () in
self.processImage()
})
}
}
}
Code to capture the screen:
func processImage() {
UIGraphicsBeginImageContextWithOptions(view.bounds.size, false, 0)
previewLayer!.renderInContext(UIGraphicsGetCurrentContext())
// tried previewView!.layer.render... to no avail
let previewImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
savePDFImage(previewImage, name: "front.pdf")
}
The image returned is just all white. How do I grab a screenshot of what's on the screen while doing a video preview?

Don't capture the screen. Instead, capture a frame from the buffer and use that.
Implement the AVCaptureVideoDataOutputSampleBufferDelegate.
On the VideoDataOuput, set the setSampleBufferDelegate
Implement the captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) method.
When you store the image to the device, play the shutter sound yourself.
In the end, your code looks more like this:
var videoDataOutput:AVCaptureVideoDataOutput?;
var videoDataOutputQueue:dispatch_queue_t = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
var stillImageOutput:AVCaptureStillImageOutput?
var previewLayer:AVCaptureVideoPreviewLayer?
var captureDevice:AVCaptureDevice?
let session = AVCaptureSession()
func beginSession() {
var err : NSError? = nil
var deviceInput:AVCaptureDeviceInput = AVCaptureDeviceInput(device: captureDevice!, error: &err);
if err != nil {
println("error: \(err?.localizedDescription)");
}
if session.canAddInput(deviceInput){
session.addInput(deviceInput);
}
stillImageOutput = AVCaptureStillImageOutput()
videoDataOutput = AVCaptureVideoDataOutput()
if let videoDataOutput = videoDataOutput, stillImageOutput = stillImageOutput {
videoDataOutput.alwaysDiscardsLateVideoFrames=true;
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey:Int(kCVPixelFormatType_32BGRA)]
videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
if session.canAddOutput(videoDataOutput){
session.addOutput(videoDataOutput)
}
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
}
videoDataOutput.connectionWithMediaType(AVMediaTypeVideo).enabled = true
if let previewLayer = AVCaptureVideoPreviewLayer(session: self.session) {
self.previewLayer = previewLayer
previewLayer.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer.connection.videoOrientation = AVCaptureVideoOrientation.LandscapeRight
var rootLayer :CALayer = self.previewView.layer;
rootLayer.masksToBounds=true;
previewLayer.frame = rootLayer.bounds;
rootLayer.addSublayer(self.previewLayer);
session.startRunning();
}
}
}
// this gets called periodically with an image
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
if let image = CheckResponse.imageFromSampleBuffer(sampleBuffer) {
if keepImage(image) {
AudioServicesPlaySystemSound(1108)
session.stopRunning()
}
}
}
// This is in the Objective-C CheckResponse class to get an image from the buffer:
+ (UIImage *) imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer {
CVPixelBufferRef pb = CMSampleBufferGetImageBuffer(sampleBuffer);
CIImage *ciimg = [CIImage imageWithCVPixelBuffer:pb];
// show result
CIContext *context = [CIContext contextWithOptions:nil];
CGImageRef ref = [context createCGImage:ciimg fromRect:ciimg.extent];
UIImage *image = [UIImage imageWithCGImage:ref scale:1.0 orientation:(UIImageOrientationUp)];
CFRelease(ref);
return image;
}

Related

iOS: Capture image during camera preview without action

I'm trying to captur image from camera preview but can't get image from preview layer. What I want to do is kinda similar to iOS 15 OCR mode in Photo app which processes image during camera preview, does not require user to take a shot nor start recording video, just process image in preview. I looked into docs and searched on net but could not find any useful info.
What I tried was, save previewLayer and call previewLayer.draw(in: context) periodically. But the image drawn in the context is blank. Now I wonder if it is possible first of all.
There might be some security issue there to restrict processing image in camera preview that only genuine app is allowed to access I guess, so I probably need to find other ways.
Please enlighten me if any workaround.
Thanks!
Ok. With MadProgrammer's help I got things working properly. Anurag Ajwani's site is very helpful.
Here is my simple snippet to capture video frames. You need to ensure permissions before CameraView gets instantiated.
class VideoCapture: NSObject, AVCaptureVideoDataOutputSampleBufferDelegate {
//private var previewLayer: AVCaptureVideoPreviewLayer? = nil
private var session: AVCaptureSession? = nil
private var videoOutput: AVCaptureVideoDataOutput? = nil
private var videoHandler: ((UIImage) -> Void)?
override init() {
super.init()
let deviceSession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualWideCamera, .builtInWideAngleCamera], mediaType: .video, position: .back)
guard deviceSession.devices.count > 0 else { return }
if let input = try? AVCaptureDeviceInput(device: deviceSession.devices.first!) {
let session = AVCaptureSession()
session.addInput(input)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)] as [String:Any]
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "my.image.handling.queue"))
videoOutput.alwaysDiscardsLateVideoFrames = true
if session.canAddOutput(videoOutput) {
session.sessionPreset = .high
session.addOutput(videoOutput)
self.videoOutput = videoOutput
}
for connection in videoOutput.connections {
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
}
}
session.commitConfiguration()
self.session = session
/*
self.previewLayer = AVCaptureVideoPreviewLayer(session: session)
if let previewLayer = self.previewLayer {
previewLayer.videoGravity = .resizeAspectFill
layer.insertSublayer(previewLayer, at: 0)
CameraPreviewView.initialized = true
}
*/
}
}
func startCapturing(_ videoHandler: #escaping (UIImage) -> Void) -> Void {
if let session = session {
session.startRunning()
}
self.videoHandler = videoHandler
}
// AVCaptureVideoDataOutputSampleBufferDelegate
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
debugPrint("unable to get video frame")
return
}
//print("got video frame")
if let videoHandler = self.videoHandler {
let rect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(imageBuffer), height: CVPixelBufferGetHeight(imageBuffer))
let ciImage = CIImage.init(cvImageBuffer: imageBuffer)
let ciContext = CIContext()
let cgImage = ciContext.createCGImage(ciImage, from: rect)
guard cgImage != nil else {return }
let uiImage = UIImage(cgImage: cgImage!)
videoHandler(uiImage)
}
}
}
struct CameraView: View {
#State var capturedVideo: UIImage? = nil
let videoCapture = VideoCapture()
var body: some View {
VStack {
ZStack(alignment: .center) {
if let capturedVideo = self.capturedVideo {
Image(uiImage: capturedVideo)
.resizable()
.scaledToFill()
}
}
}
.background(Color.black)
.onAppear {
self.videoCapture.startCapturing { uiImage in
self.capturedVideo = uiImage
}
}
}

How to enable Stabilization and HDR in IOS Camera?

What I Did:-
I have tried to enable stabilization and HDR but it's not working.I think I was in right path.When I was trying to check that the current device supports stabilization and HDR in that both case I had got only false case in all devices.
Please guide me if any mistakes had done in the below code snippet.
Thanks in advance!!
My Code Snippet:-
func createAVSession() throws -> AVCaptureSession {
AppLog.LogFunction(object: LOG_Start)
// Start out with low quality
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPresetPhoto
// Input from video camera
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let currentFormat = device?.activeFormat.isVideoHDRSupported
try device?.lockForConfiguration()
if device?.activeFormat.isVideoHDRSupported == true {
device?.automaticallyAdjustsVideoHDREnabled = false
device?.isVideoHDREnabled = true
print("device?.isVideoHDREnabled\(device?.isVideoHDREnabled)")
}
if (device?.isFocusModeSupported(.continuousAutoFocus))! {
device?.focusMode = AVCaptureFocusMode.continuousAutoFocus
print("device?.focusMode\(device?.focusMode.rawValue)")
}
if (device?.isSmoothAutoFocusSupported)! {
device?.isSmoothAutoFocusEnabled = true
print("device?.isSmoothAutoFocusEnabled\(device?.isSmoothAutoFocusEnabled)")
}
if (device?.isExposureModeSupported(.continuousAutoExposure))! {
device?.exposureMode = .continuousAutoExposure
print("device?.exposureMode\(device?.exposureMode.rawValue)")
}
device?.unlockForConfiguration()
let input = try AVCaptureDeviceInput(device: device)
do {
try input.device.lockForConfiguration()
input.device.activeVideoMaxFrameDuration = CMTimeMake(1, 30)
input.device.activeVideoMinFrameDuration = CMTimeMake(1, 30)
input.device.unlockForConfiguration()
}
catch {
print("Failed to set FPS")
}
// Output
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.videoSettings = [ kCVPixelBufferPixelFormatTypeKey as AnyHashable: kCVPixelFormatType_32BGRA]
videoOutput.alwaysDiscardsLateVideoFrames = true
videoOutput.setSampleBufferDelegate(self, queue: sessionQueue)
let stillImageOutput: AVCaptureStillImageOutput = AVCaptureStillImageOutput()
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
//stillImageOutput.isHighResolutionStillImageOutputEnabled = true
if stillImageOutput.isStillImageStabilizationSupported {
stillImageOutput.automaticallyEnablesStillImageStabilizationWhenAvailable = true
print("stillImageOutput.isStillImageStabilizationActive\(stillImageOutput.isStillImageStabilizationActive)")
}
// Join it all together
session.addInput(input)
session.addOutput(videoOutput)
if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
self.stillImageOutput = stillImageOutput
}
if let connection = videoOutput.connection(withMediaType: AVMediaTypeVideo) {
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
}
if connection.isVideoStabilizationSupported {
connection.preferredVideoStabilizationMode = .standard
print("connection.activeVideoStabilizationMode\(connection.activeVideoStabilizationMode.rawValue)")
}
}
AppLog.LogFunction(object: LOG_End)
return session
}
What worked for me on the stabilization issue was to test for it in the delegate. In my project, I use the AVCaptureVideoDataOutputSampleBufferDelegate to write to my file as I test for certain things in the pixel buffer before I decide to write. It was the one place I found where it would say stabilization is allowed. Anyway, here is how I did it for the stabilization issue. Hope it helps.
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!){
self.lockQueue.sync {
if !self.isCapturing || self.isPaused {
return
}
let isVideo = captureOutput is AVCaptureVideoDataOutput
if isVideo && self.videoWriter == nil {
// testing to make sure dealing with video and not audio
let connection = captureOutput.connection(withMediaType: AVMediaTypeVideo)
if (connection?.isVideoStabilizationSupported)! {
connection?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.cinematic
}
//other work excluded as irrelevant
}
}
}

How to AVCaptureSession.sessionPreset to 720x1280px(or 1/1.77 scale of anything)

I am using swift3 and can't change my resolution to custom values when i use AVCaptureSessionPresetMedium etc. it doesn't fit the screen scale(1/1.77).
let output = AVCaptureVideoDataOutput()
output.setSampleBufferDelegate(self, queue: sampleQueue)
let metaOutput = AVCaptureMetadataOutput()
metaOutput.setMetadataObjectsDelegate(self, queue: faceQueue)
session.beginConfiguration()
// Desired resolution : 720x1280px
// session.sessionPreset = AVCaptureSessionPresetMedium;
if session.canAddInput(input) {
session.addInput(input)
}
if session.canAddOutput(output) {
output .alwaysDiscardsLateVideoFrames = true;
session.addOutput(output)
connection1 = output.connection(withMediaType: AVMediaTypeVideo)
connection1?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto;
connection1?.videoOrientation = .portrait
connection1?.isVideoMirrored = true;
}
if session.canAddOutput(metaOutput) {
output .alwaysDiscardsLateVideoFrames = true;
session.addOutput(metaOutput)
connection2 = metaOutput.connection(withMediaType: AVMediaTypeMetadata)
connection2?.preferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.auto;
connection2?.videoOrientation = .portrait
connection2?.isVideoMirrored = true
}
You should use the AVCaptureSessionPreset1280x720 preset. The presets are denoted in landscape but the capture setting of 1280x720 is the same as 720x1280 the only difference is the orientation. For example with an app that supports rotation:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
{
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
print(cameraImage?.extent ?? "")
}
Will print (0.0, 0.0, 1280.0, 720.0) when in landscape, and (0.0, 0.0, 720.0, 1280.0) when in portrait.

iOS Swift - AVCaptureSession - Capture frames respecting frame rate

I'm trying to build an app which will capture frames from the camera and process them with OpenCV before saving those files to the device, but at a specific frame rate.
What I'm stuck on at the moment is the fact that AVCaptureVideoDataOutputSampleBufferDelegate doesn't appear to respect the AVCaptureDevice.activeVideoMinFrameDuration, or AVCaptureDevice.activeVideoMaxFrameDuration settings.
captureOutput runs far quicker than 2 frames per second as the above settings would indicate.
Do you happen to know how one could achieve this, with or without the delegate?
ViewController:
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewDidAppear(animated: Bool) {
setupCaptureSession()
}
func setupCaptureSession() {
let session : AVCaptureSession = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPreset1280x720
let videoDevices : [AVCaptureDevice] = AVCaptureDevice.devices() as! [AVCaptureDevice]
for device in videoDevices {
if device.position == AVCaptureDevicePosition.Back {
let captureDevice : AVCaptureDevice = device
do {
try captureDevice.lockForConfiguration()
captureDevice.activeVideoMinFrameDuration = CMTimeMake(1, 2)
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(1, 2)
captureDevice.unlockForConfiguration()
let input : AVCaptureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
if session.canAddInput(input) {
try session.addInput(input)
}
let output : AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
let dispatch_queue : dispatch_queue_t = dispatch_queue_create("streamoutput", nil)
output.setSampleBufferDelegate(self, queue: dispatch_queue)
session.addOutput(output)
session.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.connection.videoOrientation = .LandscapeRight
let previewBounds : CGRect = CGRectMake(0,0,self.view.frame.width/2,self.view.frame.height+20)
previewLayer.backgroundColor = UIColor.blackColor().CGColor
previewLayer.frame = previewBounds
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
self.imageView.layer.addSublayer(previewLayer)
self.previewMat.frame = CGRectMake(previewBounds.width, 0, previewBounds.width, previewBounds.height)
} catch _ {
}
break
}
}
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
self.wrapper.processBuffer(self.getUiImageFromBuffer(sampleBuffer), self.previewMat)
}
So I've figured out the problem.
In the comments section for AVCaptureDevice.h above the activeVideoMinFrameDuration property it states:
On iOS, the receiver's activeVideoMinFrameDuration resets to its
default value under the following conditions:
The receiver's activeFormat changes
The receiver's AVCaptureDeviceInput's session's sessionPreset changes
The receiver's AVCaptureDeviceInput is added to a session
The last bullet point was causing my problem, so doing the following solved the problem for me:
do {
let input : AVCaptureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
if session.canAddInput(input) {
try session.addInput(input)
}
try captureDevice.lockForConfiguration()
captureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: 2)
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: 2)
captureDevice.unlockForConfiguration()
let output : AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
let dispatch_queue : dispatch_queue_t = dispatch_queue_create("streamoutput", nil)
output.setSampleBufferDelegate(self, queue: dispatch_queue)
session.addOutput(output)

Front Camera to Fill Circular UIView

In an app I'm developing the user is required to take a "selfie" (Yes, I know, but the app is for private use only).
I've got everything working with the camera showing in the circular UIView region, however I cannot get it to scale and fill the circle properly. Here's what it's doing now:
And here's what I want it to be doing:
Here's the code for my UIView:
var cameraView = UIView()
cameraView.frame = CGRectMake(100, self.view.center.y-260, 568, 568)
cameraView.backgroundColor = UIColor(red:26/255, green:188/255, blue:156/255, alpha:1)
cameraView.layer.cornerRadius = 284
cameraView.layer.borderColor = UIColor.whiteColor().CGColor
cameraView.layer.borderWidth = 15
cameraView.contentMode = UIViewContentMode.ScaleToFill
cameraView.layer.masksToBounds = true
I have tried a few different contentMode options, include ScaleToFill, ScaleAspectFill, and ScaleAspectFit. They all generate the same exact result.
As it turns out, the camera's "self.previewLayer" has a property that determines how the camera's content fills a View.
In the following code I changed "self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspect" to "self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill"
extension SelfieViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func setupAVCapture(){
session.sessionPreset = AVCaptureSessionPreset640x480
let devices = AVCaptureDevice.devices();
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the front camera
if(device.position == AVCaptureDevicePosition.Front) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
beginSession()
break
}
}
}
}
}
func beginSession(){
var err : NSError? = nil
var deviceInput:AVCaptureDeviceInput = AVCaptureDeviceInput(device: captureDevice, error: &err)
if err != nil {
println("error: \(err?.localizedDescription)")
}
if self.session.canAddInput(deviceInput){
self.session.addInput(deviceInput)
}
self.videoDataOutput = AVCaptureVideoDataOutput()
var rgbOutputSettings = [NSNumber(integer: kCMPixelFormat_32BGRA):kCVPixelBufferPixelFormatTypeKey]
self.videoDataOutput.alwaysDiscardsLateVideoFrames=true
self.videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL)
self.videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
if session.canAddOutput(self.videoDataOutput){
session.addOutput(self.videoDataOutput)
}
self.videoDataOutput.connectionWithMediaType(AVMediaTypeVideo).enabled = true
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
var rootLayer :CALayer = self.cameraView.layer
rootLayer.masksToBounds=true
self.previewLayer.frame = rootLayer.bounds
rootLayer.addSublayer(self.previewLayer)
session.startRunning()
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
// do stuff here
}
// clean up AVCapture
func stopCamera(){
session.stopRunning()
}
}

Resources