Swift, Firebase - Use CMSampleBufferRef with live feed of camera - ios

I'm currently trying to implement the MLKit from Firebase to use text recognition.
So far, I've got the code for the camera, which shows its live feed inside of an UIView. My intention is now to recognize text in this live feed, which I reckon is possible with the help of CMSampleBufferRef (let image = VisionImage(buffer: bufferRef) - see linked Firebase tutorial, Step 2).
How am I able to create such CMSampleBufferRef and to make it hold the live feed of the camera (UIView)?
My code for the camera:
#IBOutlet weak var cameraView: UIView!
var session: AVCaptureSession?
var device: AVCaptureDevice?
var input: AVCaptureDeviceInput?
var output: AVCaptureMetadataOutput?
var prevLayer: AVCaptureVideoPreviewLayer?
override func viewDidLoad() {
super.viewDidLoad()
prevLayer?.frame.size = cameraView.frame.size
}
func createSession() {
session = AVCaptureSession()
device = AVCaptureDevice.default(for: AVMediaType.video)
do{
input = try AVCaptureDeviceInput(device: device!)
}
catch{
print(error)
}
if let input = input{
session?.addInput(input)
}
prevLayer = AVCaptureVideoPreviewLayer(session: session!)
prevLayer?.frame.size = cameraView.frame.size
prevLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
prevLayer?.connection?.videoOrientation = transformOrientation(orientation: UIInterfaceOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!)
cameraView.layer.addSublayer(prevLayer!)
session?.startRunning()
}
func cameraWithPosition(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera, .builtInTelephotoCamera, .builtInTrueDepthCamera, .builtInWideAngleCamera, ], mediaType: .video, position: position)
if let device = deviceDiscoverySession.devices.first {
return device
}
return nil
}
override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) {
coordinator.animate(alongsideTransition: { (context) -> Void in
self.prevLayer?.connection?.videoOrientation = self.transformOrientation(orientation: UIInterfaceOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!)
self.prevLayer?.frame.size = self.cameraView.frame.size
}, completion: { (context) -> Void in
})
super.viewWillTransition(to: size, with: coordinator)
}
func transformOrientation(orientation: UIInterfaceOrientation) -> AVCaptureVideoOrientation {
switch orientation {
case .landscapeLeft:
return .landscapeLeft
case .landscapeRight:
return .landscapeRight
case .portraitUpsideDown:
return .portraitUpsideDown
default:
return .portrait
}
}

Edit: I have added a functional Swift sample matching your language requirement:
import UIKit
import AVFoundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var cameraView: UIView!
var session: AVCaptureSession!
var device: AVCaptureDevice?
var input: AVCaptureDeviceInput?
var videoOutput: AVCaptureVideoDataOutput!
var output: AVCaptureMetadataOutput?
var prevLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
session = AVCaptureSession()
device = AVCaptureDevice.default(for: AVMediaType.video)
do{
input = try AVCaptureDeviceInput(device: device!)
}
catch{
print(error)
return
}
if let input = input {
if session.canAddInput(input) {
session.addInput(input)
}
}
videoOutput = AVCaptureVideoDataOutput()
videoOutput.videoSettings = [
String(kCVPixelBufferPixelFormatTypeKey): NSNumber(value: kCVPixelFormatType_32BGRA)
]
videoOutput.alwaysDiscardsLateVideoFrames = true
let queue = DispatchQueue(label: "video-frame-sampler")
videoOutput!.setSampleBufferDelegate(self, queue: queue)
if session.canAddOutput(videoOutput) {
session.addOutput(videoOutput)
if let connection = videoOutput.connection(with: .video) {
connection.videoOrientation = videoOrientationFromInterfaceOrientation()
if connection.isVideoStabilizationSupported {
connection.preferredVideoStabilizationMode = .auto
}
}
}
prevLayer = AVCaptureVideoPreviewLayer(session: session)
prevLayer.frame.size = cameraView.frame.size
prevLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraView.layer.addSublayer(prevLayer!)
session.startRunning()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//pass your sampleBuffer to vision API
//I recommend not to pass every frame however, skip some frames until camera is steady and focused
print("frame received")
}
func videoOrientationFromInterfaceOrientation() -> AVCaptureVideoOrientation {
return AVCaptureVideoOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!
}
}
I see that you already have set up your input and preview layer but you need to set up your video capture output, as well, to capture your CMSampleBufferRef frames.
To do this set up an object of type AVCaptureVideoDataOutput with the following steps:
Create instance of AVCaptureVideoDataOutput and configure
AVCaptureVideoDataOutput* videoOutput = [[AVCaptureVideoDataOutput new] autorelease];
videoOutput.videoSettings = #{(id)kCVPixelBufferPixelFormatTypeKey:#(kCVPixelFormatType_32BGRA)};
videoOutput.alwaysDiscardsLateVideoFrames = YES;
Set up frame capture (sample buffer) delegate of the configured output and add it to the session
dispatch_queue_t queue = dispatch_queue_create("video-frame-sampler", 0);
[videoOutput setSampleBufferDelegate:self queue:queue];
if ([self.session canAddOutput:videoOutput]) {
[self.session addOutput:videoOutput];
AVCaptureConnection* connection = [videoOutput connectionWithMediaType:AVMediaTypeVideo];
connection.videoOrientation = [self videoOrientationFromDeviceOrientation];
if (connection.supportsVideoStabilization) {
connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationModeAuto;
}
}
Implement captureOutput:didOutputSampleBuffer:fromConnection: method where you are going to get your required CMSampleBufferRef
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
//pass your sampleBuffer to vision API
//I recommend not to pass every frame however, skip some frames until camera is steady and focused
}
I'm a plain old Objective-C developer, but you can easily convert the code to Swift as per your need.
Additionally, here is the code for videoOrientationFromDeviceOrientation method:
-(AVCaptureVideoOrientation)videoOrientationFromDeviceOrientation {
UIDeviceOrientation orientation = [UIDevice currentDevice].orientation;
AVCaptureVideoOrientation result = (AVCaptureVideoOrientation)orientation;
if ( orientation == UIDeviceOrientationLandscapeLeft )
result = AVCaptureVideoOrientationLandscapeRight;
else if ( orientation == UIDeviceOrientationLandscapeRight )
result = AVCaptureVideoOrientationLandscapeLeft;
return result;
}

Related

captureOutput from AVCaptureVideoDataOutputSampleBufferDelegate is not being called

I am seeking to leverage the device camera as a light sensor as described in this post. Unfortunately, the captureObject function is never called by the AVCaptureVideoDataOutputSampleBufferDelegate. It may be relevant that I am attempting this inside of a SwiftUI app, I have not seen this problem posted about or resolved in the context of a SwiftUI app.
class VideoStream: NSObject, ObservableObject,
AVCaptureVideoDataOutputSampleBufferDelegate {
#Published var luminosityReading : Double = 0.0
var session : AVCaptureSession!
override init() {
super.init()
authorizeCapture()
}
func authorizeCapture() {
// request camera permissions and call beginCapture()
...
}
func beginCapture() {
print("beginCapture entered") // prints
session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = bestDevice() // func def omitted for readability
print("Device: \(videoDevice)") // prints a valid device
guard
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
session.canAddInput(videoDeviceInput)
else {
print("Camera selection failed")
return
}
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label:
"VideoQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
}
// From: https://stackoverflow.com/questions/41921326/how-to-get-light-value-from-
avfoundation/46842115#46842115
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
print("captureOutput entered") // never printed
// Retrieving EXIF data of camara frame buffer
let rawMetadata = CMCopyDictionaryOfAttachments(allocator: nil, target: sampleBuffer, attachmentMode: CMAttachmentMode(kCMAttachmentMode_ShouldPropagate))
let metadata = CFDictionaryCreateMutableCopy(nil, 0, rawMetadata) as NSMutableDictionary
let exifData = metadata.value(forKey: "{Exif}") as? NSMutableDictionary
let FNumber : Double = exifData?["FNumber"] as! Double
let ExposureTime : Double = exifData?["ExposureTime"] as! Double
let ISOSpeedRatingsArray = exifData!["ISOSpeedRatings"] as? NSArray
let ISOSpeedRatings : Double = ISOSpeedRatingsArray![0] as! Double
let CalibrationConstant : Double = 50
//Calculating the luminosity
let luminosity : Double = (CalibrationConstant * FNumber * FNumber ) / ( ExposureTime * ISOSpeedRatings )
luminosityReading = luminosity
}
}
Lastly, I instantiate VideoStream as a StatreObject in my ContentView and attempt to read the updated luminosityReading:
struct ContentView: View {
#StateObject var videoStream = VideoStream()
var body: some View {
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
.padding()
}
}
I have read and implemented the solutions described in these similar posts:
Using AVCaptureVideoDataOutputSampleBufferDelegate without a preview window
captureOutput not being called
captureOutput not being called from delegate
captureOutput not being called by AVCaptureAudioDataOutputSampleBufferDelegate
In Swift, adapted AVCaptureVideoDataOutputSampleBufferDelegate, but captureOutput never getting called
AVCaptureVideoDataOutput captureOutput not being called
Swift - captureOutput is not being executed
Why AVCaptureVideoDataOutputSampleBufferDelegate method is not called
Why captureOutput is never called?
func captureOutput is never called
captureOutput() function is never called swift4
Minimal Reproducible Example:
import SwiftUI
import AVKit
struct ContentView: View {
#StateObject var videoStream = VideoStream()
var body: some View {
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
}
}
class VideoStream: NSObject, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
#Published var luminosityReading : Double = 0.0
var session : AVCaptureSession!
override init() {
super.init()
authorizeCapture()
}
func authorizeCapture() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized: // The user has previously granted access to the camera.
beginCapture()
case .notDetermined: // The user has not yet been asked for camera access.
AVCaptureDevice.requestAccess(for: .video) { granted in
if granted {
self.beginCapture()
}
}
case .denied: // The user has previously denied access.
return
case .restricted: // The user can't grant access due to restrictions.
return
}
}
func beginCapture() {
print("beginCapture entered")
let testDevice = AVCaptureDevice.default(for: .video)
print("Image Capture Device: \(testDevice)")
guard
let videoDeviceInput = try? AVCaptureDeviceInput(device: testDevice!),
session.canAddInput(videoDeviceInput)
else {
print("Camera selection failed")
return
}
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("captureOutput entered") // never printed
// light meter logic to update luminosityReading
}
}
You are missing adding the input
if session.canAddInput(videoDeviceInput){
session.addInput(videoDeviceInput)
}

How to get camera frame to process using swiftUI?

I'm trying to get frame like VideoCapture in opencv using swiftUI
here is my code which is not working
I expected that "Got a frame!" should print in console when each frame from camera is available
at least, I can see the preview on the screen
How can I solve it or is there an any other idea?
If you have some reference to do same thing, It would be helpful
// For camera streaming
struct DemoVideoStreaming: View {
var body: some View {
VStack {
PreviewHolder()
}.frame(minWidth: 0, maxWidth: .infinity, minHeight: 0, maxHeight: .infinity, alignment: .center)
}
}
struct PreviewHolder: UIViewRepresentable {
func makeUIView(context: UIViewRepresentableContext<PreviewHolder>) -> PreviewView {
PreviewView()
}
func updateUIView(_ uiView: PreviewView, context: UIViewRepresentableContext<PreviewHolder>) {
}
typealias UIViewType = PreviewView
}
class PreviewView: UIView, AVCaptureVideoDataOutputSampleBufferDelegate {
private var captureSession: AVCaptureSession?
init() {
super.init(frame: .zero)
var allowedAccess = false
let blocker = DispatchGroup()
blocker.enter()
AVCaptureDevice.requestAccess(for: .video) { flag in
allowedAccess = flag
blocker.leave()
}
blocker.wait()
if !allowedAccess {
print("!!! NO ACCESS TO CAMERA")
return
}
// setup session
// Step 3. AVCaptureSession
let session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for : .video, position: .front) //alternate AVCaptureDevice.default(for: .video)
guard videoDevice != nil, let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!), session.canAddInput(videoDeviceInput) else {
print("!!! NO CAMERA DETECTED")
return
}
session.addInput(videoDeviceInput)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "sample buffer"))
guard session.canAddOutput(videoOutput) else { return }
session.addOutput(videoOutput)
session.commitConfiguration()
self.captureSession = session
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
// wrapping dispatch
override func didMoveToSuperview() {
super.didMoveToSuperview()
if nil != self.superview {
self.videoPreviewLayer.session = self.captureSession
self.videoPreviewLayer.videoGravity = .resizeAspect
self.captureSession?.startRunning()
} else {
self.captureSession?.stopRunning()
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
print("Got a frame!")
}
}
that is because you didn't sign the AVCaptureVideoDataOutputSampleBufferDelegate delegate to your code
here is how should it look like
import UIKit
import AVFoundation
final class PreviewView: UIView ,AVCaptureVideoDataOutputSampleBufferDelegate{
private var captureSession: AVCaptureSession?
var imageBufferHandler: ImageBufferHandler?
private var videoConnection: AVCaptureConnection!
init() {
super.init(frame: .zero)
var allowedAccess = false
let blocker = DispatchGroup()
blocker.enter()
AVCaptureDevice.requestAccess(for: .video) { flag in
allowedAccess = flag
blocker.leave()
}
blocker.wait()
if !allowedAccess {
print("!!! NO ACCESS TO CAMERA")
return
}
// setup session
let session = AVCaptureSession()
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: .unspecified) //alternate AVCaptureDevice.default(for: .video)
guard videoDevice != nil, let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!), session.canAddInput(videoDeviceInput) else {
print("!!! NO CAMERA DETECTED")
return
}
session.addInput(videoDeviceInput)
session.commitConfiguration()
self.captureSession = session
// setup video output
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
videoDataOutput.alwaysDiscardsLateVideoFrames = true
let queue = DispatchQueue(label: "com.queue.videosamplequeue")
videoDataOutput.setSampleBufferDelegate(self, queue: queue)
guard captureSession!.canAddOutput(videoDataOutput) else {
fatalError()
}
captureSession!.addOutput(videoDataOutput)
videoConnection = videoDataOutput.connection(with: .video)
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
override func didMoveToSuperview() {
super.didMoveToSuperview()
if nil != self.superview {
self.videoPreviewLayer.session = self.captureSession
self.videoPreviewLayer.videoGravity = .resize
self.captureSession?.startRunning()
}
else {
self.captureSession?.stopRunning()
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print("Got a frame!")
if connection.videoOrientation != .portrait {
connection.videoOrientation = .portrait
return
}
if let imageBufferHandler = imageBufferHandler
{
imageBufferHandler(sampleBuffer)
}
}}
don't forget to add the image buffer handler
typealias ImageBufferHandler = ((_ imageBuffer: CMSampleBuffer) -> ())

Record depth map from iPhone as sequence

I want to create an application on IOS that can record and save RGB+Depth data. I have been able to capture both data from the dual-camera and preview on the screen in real-time. Now I want to save it as two sequences in the library (one RGB sequence and one depth map sequence).
So my question is how can I save this depth information on the iPhone gallery as a video or sequence, saving at the same time the RGB info, for future deep processing?
I am working with Xcode 10.2, Swift 5 and an iPhone XS.
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var previewView: UIImageView!
#IBOutlet weak var previewModeControl: UISegmentedControl!
var previewMode = PreviewMode.original //Original(RGB) or Depth
let session = AVCaptureSession()
let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
var background: CIImage?
var depthMap: CIImage?
var scale: CGFloat = 0.0
override func viewDidLoad() {
super.viewDidLoad()
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
configureCaptureSession()
session.startRunning()
}
override var shouldAutorotate: Bool {
return false
}
func configureCaptureSession() {
session.beginConfiguration()
//Add input to the session
guard let camera = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .unspecified) else {
fatalError("No depth video camera available")
}
session.sessionPreset = .photo
do{
let cameraInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(cameraInput){
session.addInput(cameraInput)
}else{
fatalError("Error adding input device to session")
}
}catch{
fatalError(error.localizedDescription)
}
//Add output to the session
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
if session.canAddOutput(videoOutput){
session.addOutput(videoOutput)
}else{
fatalError("Error adding output to session")
}
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
//Add output to the session DEPTH
let depthOutput = AVCaptureDepthDataOutput()
//Set the current view controller as the delegate for the new object
depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
depthOutput.isFilteringEnabled = true //take advantge of holesin the data
if session.canAddOutput(depthOutput){
session.addOutput(depthOutput)
}else{
fatalError("Error adding output to session")
}
let depthConnection = depthOutput.connection(with: .depthData)
depthConnection?.videoOrientation = .portrait
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
do{
try camera.lockForConfiguration()
if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
camera.activeVideoMinFrameDuration = frameDuration
}
camera.unlockForConfiguration()
}catch{
fatalError(error.localizedDescription)
}
session.commitConfiguration()
}
#IBAction func previewModeChanged(_ sender: UISegmentedControl) {
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
switch previewMode {
case .original:
previewImage = image
case .depth:
previewImage = depthMap ?? image
//default:
//previewImage = image
}
let displayImage = UIImage(ciImage: previewImage)
DispatchQueue.main.async {
[weak self] in self?.previewView.image = displayImage
}
}
}
extension ViewController: AVCaptureDepthDataOutputDelegate{
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
if previewMode == .original{
return
}
var convertedDepth: AVDepthData
if depthData.depthDataType != kCVPixelFormatType_DisparityFloat32{
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DisparityFloat32)
}else{
convertedDepth = depthData
}
let pixelBuffer = convertedDepth.depthDataMap
pixelBuffer.clamp()
let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
DispatchQueue.main.async {
[weak self] in self?.depthMap = depthMap
}
}
}
Actual result preview on screen in real-time the different CIImage selected on the UI (image or depthMap)

CMSampleBufferGetImageBuffer(sampleBuffer) return nil

I use this code to capture video from camera, but the CMSampleBufferGetImageBuffer(sampleBuffer) always return nil. What is the problem?. Here is the code, I modify the code from this source to adapt for Swift 4 https://github.com/FlexMonkey/CoreImageHelpers/blob/master/CoreImageHelpers/coreImageHelpers/CameraCaptureHelper.swift
import AVFoundation
import CoreMedia
import CoreImage
import UIKit
class CameraCaptureHelper: NSObject
{
let captureSession = AVCaptureSession()
let cameraPosition: AVCaptureDevice.Position
weak var delegate: CameraCaptureHelperDelegate?
required init(cameraPosition: AVCaptureDevice.Position)
{
self.cameraPosition = cameraPosition
super.init()
initialiseCaptureSession()
}
fileprivate func initialiseCaptureSession()
{
captureSession.sessionPreset = AVCaptureSession.Preset.photo
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: cameraPosition)
else {
fatalError("Unable to access camera")
}
do
{
let input = try AVCaptureDeviceInput(device: camera)
captureSession.addInput(input)
}
catch
{
fatalError("Unable to access back camera")
}
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self,
queue: DispatchQueue(label: "sample buffer delegate", attributes: []))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
}
}
extension CameraCaptureHelper: AVCaptureVideoDataOutputSampleBufferDelegate
{
func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
connection.videoOrientation = .landscapeRight //AVCaptureVideoOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else
{
return
}
DispatchQueue.main.async
{
self.delegate?.newCameraImage(self,
image: CIImage(cvPixelBuffer: pixelBuffer))
}
}
}
protocol CameraCaptureHelperDelegate: class
{
func newCameraImage(_ cameraCaptureHelper: CameraCaptureHelper, image: CIImage)
}
You're trying to access the pixel buffer from the "just dropped a sample buffer" callback. The header file says:
CMSampleBuffer object passed to this delegate method will contain metadata about the dropped video frame, such as its duration and presentation time stamp, but will contain no actual video data.
You should be doing that from the didOutputSampleBuffer: delegate callback.

Swift iOS, wondering if someone could explain why output is rotated to the right degrees?

I am following this link here How to apply filter to Video real-time using Swift and for whatever reason the UIImageOrientation is rotated to the left 90 degrees.I have tried to rectify this by setting the orientation to Up but it still appears the same. Does anyone have any idea why this is? I'm not sure if it is the image, previewlayer or image view that is causing this
Here is the code:
import UIKit
import AVFoundation
import CoreMedia
let noirFilter = CIFilter(name: "CIPhotoEffectNoir")!
let sepiaFilter = CIFilter(name: "CISepiaTone")!
let vignetteEffect = CIFilter(name: "CIVignetteEffect")!
let Filters = [noirFilter,sepiaFilter,vignetteEffect]
class TestViewController:UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var Photo: UIButton!
var sessionQueue: dispatch_queue_t!
var stillImageOutput: AVCaptureStillImageOutput?
var videoDeviceInput: AVCaptureDeviceInput?
var buttonTapped = false
override func viewDidLoad() {
super.viewDidLoad()
sessionQueue = dispatch_queue_create("com.bradleymackey.Backchat.sessionQueue",DISPATCH_QUEUE_SERIAL)
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var videoDeviceInput: AVCaptureVideoDeviceInput?
do
{
let input = try AVCaptureDeviceInput(device: backCamera)
captureSession.addInput(input)
self.videoDeviceInput = videoDeviceInput
}
catch
{
print("can't access camera")
return
}
// although we don't use this, it's required to get captureOutput invoked
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
imageView.contentMode = UIViewContentMode.ScaleAspectFill
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
let stillImageOutput: AVCaptureStillImageOutput = AVCaptureStillImageOutput()
if captureSession.canAddOutput(stillImageOutput) {
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
captureSession.addOutput(stillImageOutput)
self.stillImageOutput = stillImageOutput
}
}
override func viewWillLayoutSubviews() {
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
let filter = Filters[1]
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(CVPixelBuffer: pixelBuffer!)
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let filteredImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!)
var newImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!, scale:1.0, orientation: .Up)
dispatch_async(dispatch_get_main_queue())
{
self.imageView.image = newImage
}
}
#IBAction func snapStill(sender: UIButton) {
print("snapStillImage")
let previewController = PreviewViewController(nibName: "PreviewViewController", bundle: nil)
previewController.media = Media.Photo(image: imageView.image!)
previewController.isFrontCamera = false
self.presentViewController(previewController, animated: true, completion: nil)
}
override func viewDidLayoutSubviews()
{
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
The reason for this is that the orientation is not set.
You need to set the orientation in captureOutput()
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
connection.videoOrientation = .portrait
...
This should solve your problem.

Resources