iOS11:How can I use Vision framework track face across video? - vision

i can track object across video ,but i can't track face.
when i use camera track face . the code print []
extension FaceTrackingViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let request = VNDetectFaceLandmarksRequest { [unowned self] request, error in
if let error = error {
self.presentAlertController(withTitle: self.title,
message: error.localizedDescription)
}
else {
print("\(request.results!)")
}
}
do {
try handler.perform([request], on: pixelBuffer!)
}
catch {
print(error)
}
}
}

Related

How to change pixel colors for CGImage created from CVPixelBuffer?

I have a simple function that gets CVPixelBuffer, turns it into CGImage and assign to the CALayer displayed on the screen.
func processResults(_ results: [Any]){
if let observation = (results as? [VNPixelBufferObservation])?.first {
var cgImage: CGImage?
VTCreateCGImageFromCVPixelBuffer(observation.pixelBuffer, options: nil, imageOut: &cgImage)
caLayer.contents = cgImage
if caLayer.superlayer == nil {
segmentationOverlay.addSublayer(caLayer)
}
}
}
The result is the following:
How do I get it?
lazy var request: VNCoreMLRequest = {
let model = try! VNCoreMLModel(for: espnetv2_fp16_new().model)
let request = VNCoreMLRequest(model: model) { request, error in
DispatchQueue.main.async {
if let results = request.results {
self.processResults(results)
}
}
}
request.imageCropAndScaleOption = .scaleFill
return request
}()
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:])
try! handler.perform([request])
}
}
I would like to make the black color totally transparent, and gray one change to green one. Is it possible?

Swift 5 not reading EAN 13 barcodes

In swift 5 I can capture QR codes fine, but this fails to detect EAN13 barcodes. Can anyone point me in the right direction, thanks
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if metadataObjects.count != 0 {
if let object = metadataObjects[0] as? AVMetadataMachineReadableCodeObject
{
if object.type == AVMetadataObject.ObjectType.qr {
do something
}
else if object.type == AVMetadataObject.ObjectType.ean13 {
do something else
}
}
}
Did you add .ean13 to the list of metadata object types when initialising your capture session?
For example:
let metadataOutput = AVCaptureMetadataOutput()
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr, .ean13]
}

Is there any way for object detection in ios without classifying the objects?

Purpose - To Detect an object without classifying in ios.
I have a tflite model to use in xcode but the possible ways I found are working as classifier. I tried to convert the model in CoreML too but it doesn't work properly.
Below is the code which called everytime when a frame is captured and loads the model:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let model = try? VNCoreMLModel(for: Resnet50().model) else { return }
let request = VNCoreMLRequest(model: model) { (finishedRequest, error) in
guard let results = finishedRequest.results as? [VNClassificationObservation] else { return }
guard let Observation = results.first else { return }
DispatchQueue.main.async(execute: {
self.label.text = "\(Observation.identifier)"
print(Observation.confidence)
})
}
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// executes request
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
Can anyone help me out with this?

CMSampleBufferGetImageBuffer(sampleBuffer) return nil

I use this code to capture video from camera, but the CMSampleBufferGetImageBuffer(sampleBuffer) always return nil. What is the problem?. Here is the code, I modify the code from this source to adapt for Swift 4 https://github.com/FlexMonkey/CoreImageHelpers/blob/master/CoreImageHelpers/coreImageHelpers/CameraCaptureHelper.swift
import AVFoundation
import CoreMedia
import CoreImage
import UIKit
class CameraCaptureHelper: NSObject
{
let captureSession = AVCaptureSession()
let cameraPosition: AVCaptureDevice.Position
weak var delegate: CameraCaptureHelperDelegate?
required init(cameraPosition: AVCaptureDevice.Position)
{
self.cameraPosition = cameraPosition
super.init()
initialiseCaptureSession()
}
fileprivate func initialiseCaptureSession()
{
captureSession.sessionPreset = AVCaptureSession.Preset.photo
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: cameraPosition)
else {
fatalError("Unable to access camera")
}
do
{
let input = try AVCaptureDeviceInput(device: camera)
captureSession.addInput(input)
}
catch
{
fatalError("Unable to access back camera")
}
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self,
queue: DispatchQueue(label: "sample buffer delegate", attributes: []))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
}
}
extension CameraCaptureHelper: AVCaptureVideoDataOutputSampleBufferDelegate
{
func captureOutput(_ output: AVCaptureOutput, didDrop sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
connection.videoOrientation = .landscapeRight //AVCaptureVideoOrientation(rawValue: UIApplication.shared.statusBarOrientation.rawValue)!
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else
{
return
}
DispatchQueue.main.async
{
self.delegate?.newCameraImage(self,
image: CIImage(cvPixelBuffer: pixelBuffer))
}
}
}
protocol CameraCaptureHelperDelegate: class
{
func newCameraImage(_ cameraCaptureHelper: CameraCaptureHelper, image: CIImage)
}
You're trying to access the pixel buffer from the "just dropped a sample buffer" callback. The header file says:
CMSampleBuffer object passed to this delegate method will contain metadata about the dropped video frame, such as its duration and presentation time stamp, but will contain no actual video data.
You should be doing that from the didOutputSampleBuffer: delegate callback.

ios didOutputSampleBuffer drop first few frames

Iam making an app which needs to record video and audio usingAVCaptureVideoDataOutputSampleBufferDelegate
the functions i use are :
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!)
{
self.captureQueue.async {
if !self.isCapturing
{
return
}
var isVideo = true
if connection != self.videoConnection
{
isVideo = false
}
self.encoder!.encodeFrame(sampleBuffer: sampleBuffer, isVideo: isVideo)
}
}
and the encode frame function :
func encodeFrame(sampleBuffer : CMSampleBuffer , isVideo : Bool)
{
if (CMSampleBufferDataIsReady(sampleBuffer))
{
if self.writer.status == .unknown
{
print("INIT")
let startTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
self.writer.startWriting()
self.writer.startSession(atSourceTime: startTime)
}
if self.writer.status == .failed
{
print("writer failed : \(self.writer.error!)")
}
if isVideo
{
if self.writerVideoInput.isReadyForMoreMediaData
{
if self.writerVideoInput.append(sampleBuffer)
{
print("writing video")
}
else
{
print("failed to append video")
}
}
else
{
print("video input data isn't ready ")
}
}
else
{
if self.writerAudioInput.isReadyForMoreMediaData
{
if self.writerAudioInput.append(sampleBuffer)
{
print("writing audio")
}
else
{
print("failed to append audio")
}
}
else
{
print("audio input isn't ready")
}
}
}
else
{
print("sample buffer isnt ready ")
}
}
the problem is that when I start recording (setting isCapturing flag to true) the first few frames get dropped (the reason is FrameWasLate) , the documentation of Apple says that its because the sampleBuffer doesn't get released fast enough ! , but all I do is initializing the theAvassetwriter nothing more ! .
I tried to put the encoding function in a serial Queue but it didn't work !
whats wrong ?!

Resources