Record depth map from iPhone as sequence - ios

I want to create an application on IOS that can record and save RGB+Depth data. I have been able to capture both data from the dual-camera and preview on the screen in real-time. Now I want to save it as two sequences in the library (one RGB sequence and one depth map sequence).
So my question is how can I save this depth information on the iPhone gallery as a video or sequence, saving at the same time the RGB info, for future deep processing?
I am working with Xcode 10.2, Swift 5 and an iPhone XS.
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var previewView: UIImageView!
#IBOutlet weak var previewModeControl: UISegmentedControl!
var previewMode = PreviewMode.original //Original(RGB) or Depth
let session = AVCaptureSession()
let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
var background: CIImage?
var depthMap: CIImage?
var scale: CGFloat = 0.0
override func viewDidLoad() {
super.viewDidLoad()
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
configureCaptureSession()
session.startRunning()
}
override var shouldAutorotate: Bool {
return false
}
func configureCaptureSession() {
session.beginConfiguration()
//Add input to the session
guard let camera = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .unspecified) else {
fatalError("No depth video camera available")
}
session.sessionPreset = .photo
do{
let cameraInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(cameraInput){
session.addInput(cameraInput)
}else{
fatalError("Error adding input device to session")
}
}catch{
fatalError(error.localizedDescription)
}
//Add output to the session
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
if session.canAddOutput(videoOutput){
session.addOutput(videoOutput)
}else{
fatalError("Error adding output to session")
}
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
//Add output to the session DEPTH
let depthOutput = AVCaptureDepthDataOutput()
//Set the current view controller as the delegate for the new object
depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
depthOutput.isFilteringEnabled = true //take advantge of holesin the data
if session.canAddOutput(depthOutput){
session.addOutput(depthOutput)
}else{
fatalError("Error adding output to session")
}
let depthConnection = depthOutput.connection(with: .depthData)
depthConnection?.videoOrientation = .portrait
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
do{
try camera.lockForConfiguration()
if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
camera.activeVideoMinFrameDuration = frameDuration
}
camera.unlockForConfiguration()
}catch{
fatalError(error.localizedDescription)
}
session.commitConfiguration()
}
#IBAction func previewModeChanged(_ sender: UISegmentedControl) {
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
switch previewMode {
case .original:
previewImage = image
case .depth:
previewImage = depthMap ?? image
//default:
//previewImage = image
}
let displayImage = UIImage(ciImage: previewImage)
DispatchQueue.main.async {
[weak self] in self?.previewView.image = displayImage
}
}
}
extension ViewController: AVCaptureDepthDataOutputDelegate{
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
if previewMode == .original{
return
}
var convertedDepth: AVDepthData
if depthData.depthDataType != kCVPixelFormatType_DisparityFloat32{
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DisparityFloat32)
}else{
convertedDepth = depthData
}
let pixelBuffer = convertedDepth.depthDataMap
pixelBuffer.clamp()
let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
DispatchQueue.main.async {
[weak self] in self?.depthMap = depthMap
}
}
}
Actual result preview on screen in real-time the different CIImage selected on the UI (image or depthMap)

Related

Process frames of video from AVFoundation in multiple thread Swift

I have an an app, where I get all the frames from camera using AVFoundation, and process using the code below. I was wondering if there is a way to make this part multi threaded, so it can run faster. Maybe putting each frame in a queue in one thread, another thread to process the queue, and one queue to show the output of each frame as the output? I don't know if this can be done, but this is because the processing of each frame might take more time to process, and the image freeze in the output as a result.
This is the code for CaptureManager class:
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
var isBackCamera = true
override init() {
super.init()
session = AVCaptureSession()
session?.sessionPreset = .high
//setup input
var device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let defaults = UserDefaults.standard
if let stringOne = defaults.string(forKey: defaultsKeys.rememberCamera) {
if(stringOne != "back"){
device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .front)
}
}else{
defaults.set("back", forKey: defaultsKeys.rememberCamera)
}
if(device != nil){
device?.set(frameRate: 30)
let input = try! AVCaptureDeviceInput(device: device!)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.alwaysDiscardsLateVideoFrames = true
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable as! String: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}else{
print("no camera")
}
}
func startSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func switchCamera(){
//Remove existing input
guard let currentCameraInput: AVCaptureInput = session?.inputs.first else {
return
}
//Indicate that some changes will be made to the session
session?.beginConfiguration()
session?.removeInput(currentCameraInput)
let defaults = UserDefaults.standard
if let stringOne = defaults.string(forKey: defaultsKeys.rememberCamera) {
if(stringOne == "back"){
defaults.set("front", forKey: defaultsKeys.rememberCamera)
}else{
defaults.set("back", forKey: defaultsKeys.rememberCamera)
}
}
//Get new input
var newCamera: AVCaptureDevice! = nil
if let input = currentCameraInput as? AVCaptureDeviceInput {
if (input.device.position == .back) {
newCamera = cameraWithPosition(position: .front)
} else {
newCamera = cameraWithPosition(position: .back)
}
}
newCamera.set(frameRate: 30)
//Add input to session
var err: NSError?
var newVideoInput: AVCaptureDeviceInput!
do {
newVideoInput = try AVCaptureDeviceInput(device: newCamera)
} catch let err1 as NSError {
err = err1
newVideoInput = nil
}
if newVideoInput == nil || err != nil {
print("Error creating capture device input: \(err?.localizedDescription)")
} else {
session?.addInput(newVideoInput)
}
isBackCamera.toggle()
//Commit all the configuration changes at once
session?.commitConfiguration()
}
func cameraWithPosition(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .unspecified)
for device in discoverySession.devices {
if device.position == position {
return device
}
}
return nil
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
var image: UIImage
let defaults = UserDefaults.standard
if let stringOne = defaults.string(forKey: defaultsKeys.rememberCamera) {
if(stringOne == "back"){
image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
}else{
image = UIImage(cgImage: cgImage, scale: 1, orientation:.leftMirrored)
}
}else{
image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
}
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
This is the extention to process the each frame:
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Process function:
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = ...
//process image
}
}
And this is how its called in the ViewController:
CaptureManager.shared.startSession()
I fear that your question has more queue mentioned than in code samples.. but you don't need to fear anymore, we got this!
Before we modify any code, let's agree on this; camera itself deserves to have its own thread. Not on DispatchQueue.main, never.
Let's create a queue for our camera, something like;
var ourCameraQueue = DispatchQueue(label: "our-camera-queue-label")
Then use this queue over all the code you shared and wrap all the code inside each function in this;
func oneOfTheFuncs() {
ourCameraQueue.async {
...
}
}
and this should make things tiny bit faster.
One note is that you might want to initialize (or better inject but we will come that later, maybe..) ourCameraQueue as a first thing in init method. After initialization, make sure to wrap all remaining code in init method into ourCameraQueue.async {} as well.
Also skip the ViewController on wrapping and then read about code injection, that will help you on the future of your journey on this implementation.

How can I call captureOutput at 240fps?

I want to save image at a high frame rate.
IO is expensive, not 240fps exactly.But maybe more than 120fps.
What I'm I do is set captureDivece in viewDidLoad,
and log timestamp in captureOutput and view the rate.
But I notice that captureOutput is always 30fps.
Could you please tell me which I get wrong?
Thanks for your time and answer!
Here is my code and result:
//
// ViewController.swift
// CustomCamera
//
// Created by chunibyo on 2021/3/8.
//
import UIKit
import AVFoundation
import Vision
import VideoToolbox
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var captureButton: UIButton!
let sessionQueue = DispatchQueue(label: "Session Queue")
var status = false
private var MyCaptureDevice: AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
captureButton.layer.cornerRadius = captureButton.frame.width / 2
captureButton.layer.masksToBounds = true
captureButton.layer.zPosition = 10
guard let captureDevice = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back) else {return}
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else {return}
// 1
for vFormat in captureDevice.formats {
// 2
let ranges = vFormat.videoSupportedFrameRateRanges as [AVFrameRateRange]
let frameRates = ranges[0]
// 3
if frameRates.maxFrameRate == 240 {
// 4
try? captureDevice.lockForConfiguration()
captureDevice.activeFormat = vFormat as AVCaptureDevice.Format
captureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: Int32(240))
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: Int32(240))
captureDevice.videoZoomFactor = captureDevice.minAvailableVideoZoomFactor
captureDevice.unlockForConfiguration()
}
}
let captureSession = AVCaptureSession();
// captureSession.sessionPreset = .photo
captureSession.addInput(input)
captureSession.startRunning();
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
dataOutput.alwaysDiscardsLateVideoFrames = true;
captureSession.addOutput(dataOutput)
print(captureDevice.minAvailableVideoZoomFactor)
print(captureDevice.maxAvailableVideoZoomFactor)
MyCaptureDevice = captureDevice
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print(CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)))
// if !status { return }
// guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// guard let uiImage = UIImage(pixelBuffer: pixelBuffer) else { return }
//
// sessionQueue.async {
// guard let captureDevice = self.MyCaptureDevice else { return }
// if captureDevice.videoZoomFactor >= (captureDevice.maxAvailableVideoZoomFactor - 0.2) { return }
// UIImageWriteToSavedPhotosAlbum(uiImage, nil, nil, nil)
// try? captureDevice.lockForConfiguration()
// captureDevice.videoZoomFactor += 0.1
// captureDevice.unlockForConfiguration()
// }
}
#IBAction func captureControl(_ sender: UIButton) {
DispatchQueue.main.async {
if self.status {
self.captureButton.backgroundColor = .white
print("stop")
self.status = !self.status
}
else {
self.captureButton.backgroundColor = .red
print("recording...")
self.status = !self.status
}
}
}
}
extension UIImage {
public convenience init?(pixelBuffer: CVPixelBuffer) {
var cgImage: CGImage?
VTCreateCGImageFromCVPixelBuffer(pixelBuffer, options: nil, imageOut: &cgImage)
guard let _cgImage = cgImage else { return nil }
self.init(cgImage: _cgImage)
}
}
console log (stackoverflow not allow me to post image, sorry)
captureOutput only logs is 240fps.
captureOutput with take photos to album is about 70~100fps.
This code can get 240fps logs.
//
// ViewController.swift
// CustomCamera
//
// Created by chunibyo on 2021/3/8.
//
import UIKit
import AVFoundation
import Vision
import VideoToolbox
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var captureButton: UIButton!
let sessionQueue = DispatchQueue(label: "Session Queue")
var status = false
var zoomStatus = 1
private var MyCaptureDevice: AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
captureButton.layer.cornerRadius = captureButton.frame.width / 2
captureButton.layer.masksToBounds = true
captureButton.layer.zPosition = 10
guard let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) else {return}
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else {return}
let captureSession = AVCaptureSession();
// captureSession.sessionPreset = .photo
captureSession.addInput(input)
// 1
for vFormat in captureDevice.formats {
// 2
let ranges = vFormat.videoSupportedFrameRateRanges as [AVFrameRateRange]
let frameRates = ranges[0]
// 3
if frameRates.maxFrameRate == 240 {
// 4
try? captureDevice.lockForConfiguration()
captureDevice.activeFormat = vFormat as AVCaptureDevice.Format
captureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: Int32(240))
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: Int32(240))
captureDevice.videoZoomFactor = captureDevice.minAvailableVideoZoomFactor
captureDevice.unlockForConfiguration()
}
}
captureSession.startRunning();
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
dataOutput.alwaysDiscardsLateVideoFrames = true;
captureSession.addOutput(dataOutput)
print(captureDevice.minAvailableVideoZoomFactor)
print(captureDevice.maxAvailableVideoZoomFactor)
MyCaptureDevice = captureDevice
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print(CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)))
// if !status { return }
// guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// guard let uiImage = UIImage(pixelBuffer: pixelBuffer) else { return }
// UIImageWriteToSavedPhotosAlbum(uiImage, nil, nil, nil)
//
// guard let captureDevice = self.MyCaptureDevice else { return }
// if self.zoomStatus == 1 && captureDevice.videoZoomFactor >= CGFloat(Int32(captureDevice.maxAvailableVideoZoomFactor * 0.6)) { self.zoomStatus = -1
// }
// else if self.zoomStatus == -1 && captureDevice.videoZoomFactor <= (captureDevice.minAvailableVideoZoomFactor + 1.0) {
// self.zoomStatus = 1
// }
// UIImageWriteToSavedPhotosAlbum(uiImage, nil, nil, nil)
// try? captureDevice.lockForConfiguration()
// captureDevice.videoZoomFactor += (0.1 * CGFloat(self.zoomStatus))
// captureDevice.unlockForConfiguration()
}
#IBAction func captureControl(_ sender: UIButton) {
DispatchQueue.main.async {
if self.status {
self.captureButton.backgroundColor = .white
print("stop")
self.status = !self.status
}
else {
self.captureButton.backgroundColor = .red
print("recording...")
self.status = !self.status
}
}
}
}
extension UIImage {
public convenience init?(pixelBuffer: CVPixelBuffer) {
var cgImage: CGImage?
VTCreateCGImageFromCVPixelBuffer(pixelBuffer, options: nil, imageOut: &cgImage)
guard let _cgImage = cgImage else { return nil }
self.init(cgImage: _cgImage)
}
}

the camera preview does not appear when I print the label

I share the code below when I turn on the camera, I print the label and the label does not appear. What is the problem?
but the label does not appear and it does not appear in my program My program detects the object and I need to print it with the label at the end but unfortunately I can not print the label
#IBOutlet weak var label: UILabel!
override func viewDidLoad() {
super.viewDidLoad()
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else{ return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "CameraQueue"))
captureSession.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let model = try? VNCoreMLModel(for: Resnet50().model) else { return }
let request = VNCoreMLRequest(model : model) {(finishedReq,error) in
guard let results = finishedReq.results as? [VNClassificationObservation] else{ return }
guard let firstObservation = results.first else { return }
print(firstObservation.identifier , firstObservation.confidence)
DispatchQueue.main.async {
self.label.text = "\(firstObservation.identifier) ,\(firstObservation.confidence)"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
You need to call self.view.bringSubview(toFront: self.label) after you add the preview layer to the view's subview. When you add the preview layer, it will be on top of the existing subviews.

Recording videos with real-time filters in Swift

I am new to swift and trying to build a camera app which can apply real-time filters, and save with the applied filters.
So far i can preview real-time with the applied filters, but when i save the video its all black.
import UIKit
import AVFoundation
import AssetsLibrary
import CoreMedia
import Photos
class ViewController: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
var captureSession: AVCaptureSession!
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var recordButtton: UIButton!
#IBOutlet weak var imageView: UIImageView!
var assetWriter: AVAssetWriter?
var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor?
var isWriting = false
var currentSampleTime: CMTime?
var currentVideoDimensions: CMVideoDimensions?
override func viewDidLoad() {
super.viewDidLoad()
FilterVendor.register()
setupCaptureSession()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func setupCaptureSession() {
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
guard let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo), let input = try? AVCaptureDeviceInput(device: captureDevice) else {
print("Can't access the camera")
return
}
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if((previewLayer) != nil) {
view.layer.addSublayer(previewLayer!)
}
captureSession.startRunning()
}
#IBAction func record(_ sender: Any) {
if isWriting {
print("stop record")
self.isWriting = false
assetWriterPixelBufferInput = nil
assetWriter?.finishWriting(completionHandler: {[unowned self] () -> Void in
self.saveMovieToCameraRoll()
})
} else {
print("start record")
createWriter()
assetWriter?.startWriting()
assetWriter?.startSession(atSourceTime: currentSampleTime!)
isWriting = true
}
}
func saveMovieToCameraRoll() {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: self.movieURL() as URL)
}) { saved, error in
if saved {
print("saved")
}
}
}
func movieURL() -> NSURL {
let tempDir = NSTemporaryDirectory()
let url = NSURL(fileURLWithPath: tempDir).appendingPathComponent("tmpMov.mov")
return url! as NSURL
}
func checkForAndDeleteFile() {
let fm = FileManager.default
let url = movieURL()
let exist = fm.fileExists(atPath: url.path!)
if exist {
do {
try fm.removeItem(at: url as URL)
} catch let error as NSError {
print(error.localizedDescription)
}
}
}
func createWriter() {
self.checkForAndDeleteFile()
do {
assetWriter = try AVAssetWriter(outputURL: movieURL() as URL, fileType: AVFileTypeQuickTimeMovie)
} catch let error as NSError {
print(error.localizedDescription)
return
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : Int(currentVideoDimensions!.width),
AVVideoHeightKey : Int(currentVideoDimensions!.height)
] as [String : Any]
let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings as? [String : AnyObject])
assetWriterVideoInput.expectsMediaDataInRealTime = true
assetWriterVideoInput.transform = CGAffineTransform(rotationAngle: CGFloat(M_PI / 2.0))
let sourcePixelBufferAttributesDictionary = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32BGRA),
String(kCVPixelBufferWidthKey) : Int(currentVideoDimensions!.width),
String(kCVPixelBufferHeightKey) : Int(currentVideoDimensions!.height),
String(kCVPixelFormatOpenGLESCompatibility) : kCFBooleanTrue
] as [String : Any]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterVideoInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if assetWriter!.canAdd(assetWriterVideoInput) {
assetWriter!.add(assetWriterVideoInput)
} else {
print("no way\(assetWriterVideoInput)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.landscapeLeft;
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
let filter = CIFilter(name: "Fİlter")!
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if self.isWriting {
if self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true {
var newPixelBuffer: CVPixelBuffer? = nil
CVPixelBufferPoolCreatePixelBuffer(nil, self.assetWriterPixelBufferInput!.pixelBufferPool!, &newPixelBuffer)
let success = self.assetWriterPixelBufferInput?.append(newPixelBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
}
DispatchQueue.main.async {
if let outputValue = filter.value(forKey: kCIOutputImageKey) as? CIImage {
let filteredImage = UIImage(ciImage: outputValue)
self.imageView.image = filteredImage
}
}
}
}
}
I've added some comments to the critical part below:
func captureOutput(_ captureOutput: AVCaptureOutput, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.landscapeLeft;
// COMMENT: This line makes sense - this is your pixelbuffer from the camera.
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// COMMENT: OK, so you turn pixelBuffer into a CIImage...
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
// COMMENT: And now you've create a CIImage with a Filter instruction...
let filter = CIFilter(name: "Fİlter")!
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if self.isWriting {
if self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true {
// COMMENT: Here's where it gets weird. You've declared a new, empty pixelBuffer... but you already have one (pixelBuffer) that contains the image you want to write...
var newPixelBuffer: CVPixelBuffer? = nil
// COMMENT: And you grabbed memory from the pool.
CVPixelBufferPoolCreatePixelBuffer(nil, self.assetWriterPixelBufferInput!.pixelBufferPool!, &newPixelBuffer)
// COMMENT: And now you wrote an empty pixelBuffer back <-- this is what's causing the black frame.
let success = self.assetWriterPixelBufferInput?.append(newPixelBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
}
// COMMENT: And now you're sending the filtered image back to the screen.
DispatchQueue.main.async {
if let outputValue = filter.value(forKey: kCIOutputImageKey) as? CIImage {
let filteredImage = UIImage(ciImage: outputValue)
self.imageView.image = filteredImage
}
}
}
}
It looks to me like you're basically getting the screen image, creating a filtered copy, then making a NEW pixel buffer which is empty and writing that out.
If you write the pixelBuffer you grabbed instead of the new one you're creating, you should successfully write the image.
What you need to successfully write out the filtered video is to create a new CVPixelBuffer from a CIImage - that solution exists here on StackOverflow already, I know because I needed that step myself!

Swift iOS, wondering if someone could explain why output is rotated to the right degrees?

I am following this link here How to apply filter to Video real-time using Swift and for whatever reason the UIImageOrientation is rotated to the left 90 degrees.I have tried to rectify this by setting the orientation to Up but it still appears the same. Does anyone have any idea why this is? I'm not sure if it is the image, previewlayer or image view that is causing this
Here is the code:
import UIKit
import AVFoundation
import CoreMedia
let noirFilter = CIFilter(name: "CIPhotoEffectNoir")!
let sepiaFilter = CIFilter(name: "CISepiaTone")!
let vignetteEffect = CIFilter(name: "CIVignetteEffect")!
let Filters = [noirFilter,sepiaFilter,vignetteEffect]
class TestViewController:UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var Photo: UIButton!
var sessionQueue: dispatch_queue_t!
var stillImageOutput: AVCaptureStillImageOutput?
var videoDeviceInput: AVCaptureDeviceInput?
var buttonTapped = false
override func viewDidLoad() {
super.viewDidLoad()
sessionQueue = dispatch_queue_create("com.bradleymackey.Backchat.sessionQueue",DISPATCH_QUEUE_SERIAL)
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var videoDeviceInput: AVCaptureVideoDeviceInput?
do
{
let input = try AVCaptureDeviceInput(device: backCamera)
captureSession.addInput(input)
self.videoDeviceInput = videoDeviceInput
}
catch
{
print("can't access camera")
return
}
// although we don't use this, it's required to get captureOutput invoked
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
imageView.contentMode = UIViewContentMode.ScaleAspectFill
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
let stillImageOutput: AVCaptureStillImageOutput = AVCaptureStillImageOutput()
if captureSession.canAddOutput(stillImageOutput) {
stillImageOutput.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
captureSession.addOutput(stillImageOutput)
self.stillImageOutput = stillImageOutput
}
}
override func viewWillLayoutSubviews() {
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
let filter = Filters[1]
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(CVPixelBuffer: pixelBuffer!)
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let filteredImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!)
var newImage = UIImage(CIImage: filter.valueForKey(kCIOutputImageKey) as! CIImage!, scale:1.0, orientation: .Up)
dispatch_async(dispatch_get_main_queue())
{
self.imageView.image = newImage
}
}
#IBAction func snapStill(sender: UIButton) {
print("snapStillImage")
let previewController = PreviewViewController(nibName: "PreviewViewController", bundle: nil)
previewController.media = Media.Photo(image: imageView.image!)
previewController.isFrontCamera = false
self.presentViewController(previewController, animated: true, completion: nil)
}
override func viewDidLayoutSubviews()
{
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
The reason for this is that the orientation is not set.
You need to set the orientation in captureOutput()
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
connection.videoOrientation = .portrait
...
This should solve your problem.

Resources