Variables show up as nil - swift 4 IOS - ios

For some reason my variables stringy and stringy are printing to the console just fine, but when I try to set them to a label, they show up as nil.
My goal is to print out the string and the float to the app view controller but this is just not working.
I think it has something to do with the viewdidload, as if its hiding the global variables. however if I try to set my label outside the viewdidload I get a declaration error.
// ViewController.swift
// Intellicam
//
import UIKit
import AVKit
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
var stringy:String!
var stringie:Float!
override func viewDidLoad() {
super.viewDidLoad()
//here we start the camera
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else {return}
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
// let request = VNCoreMLModel(model: VNCoreMLModel, completionHandler: VNRequestCompletionHandler)
// VNImageRequestHandler(cgImage: <#T##CGImage#>, options: <#T##[VNImageOption : Any]#>)
self.Labele.text = "Guess: \(stringy) + Certainty: \(stringie)"
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
//print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {return}
guard let model = try? VNCoreMLModel(for: Resnet50().model) else {return}
let request = VNCoreMLRequest(model: model){
(finishedReq, err) in
//print(finishedReq.results)
guard let results = finishedReq.results as? [VNClassificationObservation] else {return}
guard let firstObservastion = results.first else {return}
//print("Guess: \(firstObservastion.identifier) Certainty: \(firstObservastion.confidence)%")
self.stringy = firstObservastion.identifier
self.stringie = firstObservastion.confidence
print(self.stringy)
print(self.stringie)
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
#IBOutlet weak var Labele: UILabel!
}

First thing is never use forcefully unwrap until you are sure about value. In your case VNCoreModelRequest can fail and your both variable will be un assigned so it will defiantly crash your app.
One more thing make sure you use proper naming convention to your label.
Your issue is you are not setting label value from result you are getting.
To fix this
var stringy:String? {
didSet {
DispatchQueue.main.async {
self.Labele.text = self.stringy
}
}
}
OR
self.stringy = firstObservastion.identifier
self.stringie = firstObservastion.confidence
DispatchQueue.main.async {
self.Labele.text = "Guess: \(stringy) + Certainty: \(stringie)"
}

Related

How to get depth data from camera images

I am writing an app to get the depth data and disparity data from pictures taken from the camera. I can get the disparity data but not the depth data it always returns nil. I need to get the depth information
and save it as a jpg
I have tried the below code where user can switch between front and back camera and take pictures then the picture we took will be the process
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var ImageView: UIView!
var img:UIImage?
var rgbImage:UIImage?
var captureSession: AVCaptureSession?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var backCamera = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
var frontCamera = AVCaptureDevice.default(.builtInTrueDepthCamera, for: .video, position: .front)
var capturePhotoOut : AVCapturePhotoOutput?
override func viewDidLoad() {
super.viewDidLoad()
if #available(iOS 10.2, *){
let captureDevice = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
do{
let input = try AVCaptureDeviceInput(device: captureDevice!)
captureSession = AVCaptureSession()
captureSession?.addInput(input)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession!)
videoPreviewLayer?.frame = view.layer.bounds
ImageView.layer.addSublayer(videoPreviewLayer!)
captureSession?.startRunning()
}catch{
print("error")
}
}
capturePhotoOut = AVCapturePhotoOutput()
capturePhotoOut?.isHighResolutionCaptureEnabled = true
captureSession?.sessionPreset = .photo
captureSession?.addOutput(capturePhotoOut!)
capturePhotoOut!.isDepthDataDeliveryEnabled = capturePhotoOut!.isDepthDataDeliverySupported
capturePhotoOut!.isPortraitEffectsMatteDeliveryEnabled = capturePhotoOut!.isPortraitEffectsMatteDeliverySupported
}
#IBAction func imageCapture(_ sender: Any) {
guard let capturePhotoOutput = self.capturePhotoOut else {return}
let photoSettings = AVCapturePhotoSettings()
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.isDepthDataDeliveryEnabled = true
photoSettings.isPortraitEffectsMatteDeliveryEnabled = true
capturePhotoOut?.capturePhoto(with: photoSettings, delegate: self)
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
let vc = segue.destination as! DepthImageView
vc.img = img
vc.rgbImg = rgbImage
}
extension ViewController : AVCapturePhotoCaptureDelegate{
public func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard error == nil else{return}
guard let imageData = photo.fileDataRepresentation() else {return}
let detailImage = UIImage.init(data: imageData,scale: 1.0)
rgbImage = detailImage
let nsData = imageData as NSData
let ptr = nsData.bytes.assumingMemoryBound(to: UInt8.self)
let cfDataset = CFDataCreate(nil,ptr,imageData.count)
guard let source = CGImageSourceCreateWithData(cfDataset!,nil) else {return}
guard let auxDataInfo = CGImageSourceCopyAuxiliaryDataInfoAtIndex(source, 0, kCGImageAuxiliaryDataTypeDepth) as? [String : AnyObject] else {
return
}
var depthData: AVDepthData
do {
depthData = try AVDepthData(fromDictionaryRepresentation: auxDataInfo)
if depthData.depthDataType != kCVPixelFormatType_DepthFloat32 {
depthData = depthData.converting(toDepthDataType: kCVPixelFormatType_DepthFloat32)
}
let depthDataMap = depthData.depthDataMap
let ciImage = CIImage(cvPixelBuffer: depthDataMap)
let depthDataMapImage = UIImage(ciImage: ciImage,scale: 1.0,orientation: .down)
img = depthDataMapImage
self.performSegue(withIdentifier: "ImageViewScreen", sender: self)
} catch {
print("Error")
}
}
}
I always get nil at auxDataInfo guard
AVCapturePhoto containing information about AVDepthData. Try to get depth Data from photo
let depthData = photo.depthData

Record depth map from iPhone as sequence

I want to create an application on IOS that can record and save RGB+Depth data. I have been able to capture both data from the dual-camera and preview on the screen in real-time. Now I want to save it as two sequences in the library (one RGB sequence and one depth map sequence).
So my question is how can I save this depth information on the iPhone gallery as a video or sequence, saving at the same time the RGB info, for future deep processing?
I am working with Xcode 10.2, Swift 5 and an iPhone XS.
import UIKit
import AVFoundation
class ViewController: UIViewController {
#IBOutlet weak var previewView: UIImageView!
#IBOutlet weak var previewModeControl: UISegmentedControl!
var previewMode = PreviewMode.original //Original(RGB) or Depth
let session = AVCaptureSession()
let dataOutputQueue = DispatchQueue(label: "video data queue", qos: .userInitiated, attributes: [], autoreleaseFrequency: .workItem)
var background: CIImage?
var depthMap: CIImage?
var scale: CGFloat = 0.0
override func viewDidLoad() {
super.viewDidLoad()
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
configureCaptureSession()
session.startRunning()
}
override var shouldAutorotate: Bool {
return false
}
func configureCaptureSession() {
session.beginConfiguration()
//Add input to the session
guard let camera = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .unspecified) else {
fatalError("No depth video camera available")
}
session.sessionPreset = .photo
do{
let cameraInput = try AVCaptureDeviceInput(device: camera)
if session.canAddInput(cameraInput){
session.addInput(cameraInput)
}else{
fatalError("Error adding input device to session")
}
}catch{
fatalError(error.localizedDescription)
}
//Add output to the session
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
if session.canAddOutput(videoOutput){
session.addOutput(videoOutput)
}else{
fatalError("Error adding output to session")
}
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
//Add output to the session DEPTH
let depthOutput = AVCaptureDepthDataOutput()
//Set the current view controller as the delegate for the new object
depthOutput.setDelegate(self, callbackQueue: dataOutputQueue)
depthOutput.isFilteringEnabled = true //take advantge of holesin the data
if session.canAddOutput(depthOutput){
session.addOutput(depthOutput)
}else{
fatalError("Error adding output to session")
}
let depthConnection = depthOutput.connection(with: .depthData)
depthConnection?.videoOrientation = .portrait
let outputRect = CGRect(x: 0, y: 0, width: 1, height: 1)
let videoRect = videoOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
let depthRect = depthOutput.outputRectConverted(fromMetadataOutputRect: outputRect)
scale = max(videoRect.width, videoRect.height) / max(depthRect.width, depthRect.height)
do{
try camera.lockForConfiguration()
if let frameDuration = camera.activeDepthDataFormat?.videoSupportedFrameRateRanges.first?.minFrameDuration{
camera.activeVideoMinFrameDuration = frameDuration
}
camera.unlockForConfiguration()
}catch{
fatalError(error.localizedDescription)
}
session.commitConfiguration()
}
#IBAction func previewModeChanged(_ sender: UISegmentedControl) {
previewMode = PreviewMode(rawValue: previewModeControl.selectedSegmentIndex) ?? .original
}
}
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let image = CIImage(cvPixelBuffer: pixelBuffer!)
let previewImage: CIImage
switch previewMode {
case .original:
previewImage = image
case .depth:
previewImage = depthMap ?? image
//default:
//previewImage = image
}
let displayImage = UIImage(ciImage: previewImage)
DispatchQueue.main.async {
[weak self] in self?.previewView.image = displayImage
}
}
}
extension ViewController: AVCaptureDepthDataOutputDelegate{
func depthDataOutput(_ output: AVCaptureDepthDataOutput, didOutput depthData: AVDepthData, timestamp: CMTime, connection: AVCaptureConnection) {
if previewMode == .original{
return
}
var convertedDepth: AVDepthData
if depthData.depthDataType != kCVPixelFormatType_DisparityFloat32{
convertedDepth = depthData.converting(toDepthDataType: kCVPixelFormatType_DisparityFloat32)
}else{
convertedDepth = depthData
}
let pixelBuffer = convertedDepth.depthDataMap
pixelBuffer.clamp()
let depthMap = CIImage(cvPixelBuffer: pixelBuffer)
DispatchQueue.main.async {
[weak self] in self?.depthMap = depthMap
}
}
}
Actual result preview on screen in real-time the different CIImage selected on the UI (image or depthMap)

The Vision API in Xcode 10/Swift doesn't detect anything. Am I doing something obviously wrong?

I have been tinkering with this for a long time and as I'm new to Swift I'm struggling. I am trying to detect a rectangle in the camera's live feed (the eventual goal is to detect when a crossword puzzle is seen) but the included code picks up nothing. I'm looking for the "Rectangle Detected" string in the console but it looks like that code is never reached. Can anyone see why? Here is my ViewController code:
Many thanks in advance.
#IBOutlet var cameraView: UIView!
var rootLayer: CALayer! = nil
private lazy var captureSession: AVCaptureSession = {
let session = AVCaptureSession()
session.sessionPreset = AVCaptureSession.Preset.photo
guard let backCamera = AVCaptureDevice.default(for: .video),
let input = try? AVCaptureDeviceInput(device: backCamera) else {
return session
}
session.addInput(input)
return session
}()
private lazy var cameraLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private let handler = VNSequenceRequestHandler()
fileprivate var lastObservation: VNDetectedObjectObservation?
lazy var highlightView: UIView = {
let view = UIView()
view.layer.borderColor = UIColor.red.cgColor
view.layer.borderWidth = 4
view.backgroundColor = .clear
return view
}()
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
/*cameraView.layer.addSublayer(cameraLayer)
cameraView.addSubview(highlightView)*/
rootLayer = cameraView.layer
cameraLayer.frame = rootLayer.bounds
rootLayer.insertSublayer(cameraLayer, at: 0)
cameraView.addSubview(highlightView)
let output = AVCaptureVideoDataOutput()
output.setSampleBufferDelegate(self, queue: DispatchQueue(label: "queue"))
captureSession.addOutput(output)
captureSession.startRunning()
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
let observation = lastObservation else {
return
}
let request = VNDetectRectanglesRequest(completionHandler: self.handleRectangle)
do {
try handler.perform([request], on: pixelBuffer)
}
catch {
print(error)
}
}
fileprivate func handleRectangle(request: VNRequest, error: Error?) {
DispatchQueue.main.async {
guard let newObservation = request.results?.first as? VNDetectedObjectObservation else {
return
}
self.lastObservation = newObservation
print("Rectangle Detected")
var transformedRect = newObservation.boundingBox
transformedRect.origin.y = 1 - transformedRect.origin.y
let convertedRect = self.cameraLayer.layerRectConverted(fromMetadataOutputRect: transformedRect)
self.highlightView.frame = convertedRect
}
}
}
fileprivate var lastObservation: VNDetectedObjectObservation?
This property is Optional, so starts as nil.
Every call of captureOutput didOutput will check if its nil and exit. so it will never be set. Remove let observation = lastObservation from your guard statement and this will fix your initial issue. You may then need to change the logic slightly

the camera preview does not appear when I print the label

I share the code below when I turn on the camera, I print the label and the label does not appear. What is the problem?
but the label does not appear and it does not appear in my program My program detects the object and I need to print it with the label at the end but unfortunately I can not print the label
#IBOutlet weak var label: UILabel!
override func viewDidLoad() {
super.viewDidLoad()
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
guard let captureDevice = AVCaptureDevice.default(for: .video) else{ return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "CameraQueue"))
captureSession.addOutput(dataOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let model = try? VNCoreMLModel(for: Resnet50().model) else { return }
let request = VNCoreMLRequest(model : model) {(finishedReq,error) in
guard let results = finishedReq.results as? [VNClassificationObservation] else{ return }
guard let firstObservation = results.first else { return }
print(firstObservation.identifier , firstObservation.confidence)
DispatchQueue.main.async {
self.label.text = "\(firstObservation.identifier) ,\(firstObservation.confidence)"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
You need to call self.view.bringSubview(toFront: self.label) after you add the preview layer to the view's subview. When you add the preview layer, it will be on top of the existing subviews.

Recording videos with real-time filters in Swift

I am new to swift and trying to build a camera app which can apply real-time filters, and save with the applied filters.
So far i can preview real-time with the applied filters, but when i save the video its all black.
import UIKit
import AVFoundation
import AssetsLibrary
import CoreMedia
import Photos
class ViewController: UIViewController , AVCaptureVideoDataOutputSampleBufferDelegate {
var captureSession: AVCaptureSession!
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var recordButtton: UIButton!
#IBOutlet weak var imageView: UIImageView!
var assetWriter: AVAssetWriter?
var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor?
var isWriting = false
var currentSampleTime: CMTime?
var currentVideoDimensions: CMVideoDimensions?
override func viewDidLoad() {
super.viewDidLoad()
FilterVendor.register()
setupCaptureSession()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
func setupCaptureSession() {
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
guard let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo), let input = try? AVCaptureDeviceInput(device: captureDevice) else {
print("Can't access the camera")
return
}
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if((previewLayer) != nil) {
view.layer.addSublayer(previewLayer!)
}
captureSession.startRunning()
}
#IBAction func record(_ sender: Any) {
if isWriting {
print("stop record")
self.isWriting = false
assetWriterPixelBufferInput = nil
assetWriter?.finishWriting(completionHandler: {[unowned self] () -> Void in
self.saveMovieToCameraRoll()
})
} else {
print("start record")
createWriter()
assetWriter?.startWriting()
assetWriter?.startSession(atSourceTime: currentSampleTime!)
isWriting = true
}
}
func saveMovieToCameraRoll() {
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: self.movieURL() as URL)
}) { saved, error in
if saved {
print("saved")
}
}
}
func movieURL() -> NSURL {
let tempDir = NSTemporaryDirectory()
let url = NSURL(fileURLWithPath: tempDir).appendingPathComponent("tmpMov.mov")
return url! as NSURL
}
func checkForAndDeleteFile() {
let fm = FileManager.default
let url = movieURL()
let exist = fm.fileExists(atPath: url.path!)
if exist {
do {
try fm.removeItem(at: url as URL)
} catch let error as NSError {
print(error.localizedDescription)
}
}
}
func createWriter() {
self.checkForAndDeleteFile()
do {
assetWriter = try AVAssetWriter(outputURL: movieURL() as URL, fileType: AVFileTypeQuickTimeMovie)
} catch let error as NSError {
print(error.localizedDescription)
return
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : Int(currentVideoDimensions!.width),
AVVideoHeightKey : Int(currentVideoDimensions!.height)
] as [String : Any]
let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings as? [String : AnyObject])
assetWriterVideoInput.expectsMediaDataInRealTime = true
assetWriterVideoInput.transform = CGAffineTransform(rotationAngle: CGFloat(M_PI / 2.0))
let sourcePixelBufferAttributesDictionary = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32BGRA),
String(kCVPixelBufferWidthKey) : Int(currentVideoDimensions!.width),
String(kCVPixelBufferHeightKey) : Int(currentVideoDimensions!.height),
String(kCVPixelFormatOpenGLESCompatibility) : kCFBooleanTrue
] as [String : Any]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterVideoInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
if assetWriter!.canAdd(assetWriterVideoInput) {
assetWriter!.add(assetWriterVideoInput)
} else {
print("no way\(assetWriterVideoInput)")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.landscapeLeft;
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
let filter = CIFilter(name: "Fİlter")!
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if self.isWriting {
if self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true {
var newPixelBuffer: CVPixelBuffer? = nil
CVPixelBufferPoolCreatePixelBuffer(nil, self.assetWriterPixelBufferInput!.pixelBufferPool!, &newPixelBuffer)
let success = self.assetWriterPixelBufferInput?.append(newPixelBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
}
DispatchQueue.main.async {
if let outputValue = filter.value(forKey: kCIOutputImageKey) as? CIImage {
let filteredImage = UIImage(ciImage: outputValue)
self.imageView.image = filteredImage
}
}
}
}
}
I've added some comments to the critical part below:
func captureOutput(_ captureOutput: AVCaptureOutput, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection) {
autoreleasepool {
connection.videoOrientation = AVCaptureVideoOrientation.landscapeLeft;
// COMMENT: This line makes sense - this is your pixelbuffer from the camera.
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// COMMENT: OK, so you turn pixelBuffer into a CIImage...
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer)
// COMMENT: And now you've create a CIImage with a Filter instruction...
let filter = CIFilter(name: "Fİlter")!
filter.setValue(cameraImage, forKey: kCIInputImageKey)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if self.isWriting {
if self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true {
// COMMENT: Here's where it gets weird. You've declared a new, empty pixelBuffer... but you already have one (pixelBuffer) that contains the image you want to write...
var newPixelBuffer: CVPixelBuffer? = nil
// COMMENT: And you grabbed memory from the pool.
CVPixelBufferPoolCreatePixelBuffer(nil, self.assetWriterPixelBufferInput!.pixelBufferPool!, &newPixelBuffer)
// COMMENT: And now you wrote an empty pixelBuffer back <-- this is what's causing the black frame.
let success = self.assetWriterPixelBufferInput?.append(newPixelBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
}
// COMMENT: And now you're sending the filtered image back to the screen.
DispatchQueue.main.async {
if let outputValue = filter.value(forKey: kCIOutputImageKey) as? CIImage {
let filteredImage = UIImage(ciImage: outputValue)
self.imageView.image = filteredImage
}
}
}
}
It looks to me like you're basically getting the screen image, creating a filtered copy, then making a NEW pixel buffer which is empty and writing that out.
If you write the pixelBuffer you grabbed instead of the new one you're creating, you should successfully write the image.
What you need to successfully write out the filtered video is to create a new CVPixelBuffer from a CIImage - that solution exists here on StackOverflow already, I know because I needed that step myself!

Resources