Live Rectangle detection using Vision Framework - ios

I'm trying to detect rectangle from live preview layer, but not able to detect all rectangles.
What I'm doing
To setup Vision Request
func setupVision() {
let rectanglesDetectionRequest = VNDetectRectanglesRequest(completionHandler: self.handleRectangles)
rectanglesDetectionRequest.maximumObservations = 0
rectanglesDetectionRequest.quadratureTolerance = 45.0
rectanglesDetectionRequest.minimumAspectRatio = 0.64
self.requests = [rectanglesDetectionRequest]
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let exifOrientation = self.exifOrientationFromDeviceOrientation()
DispatchQueue.main.asyncAfter(deadline: .now() + 2) {
var requestOptions: [VNImageOption : Any] = [:]
if let cameraIntrinsicData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil) {
requestOptions = [.cameraIntrinsics: cameraIntrinsicData]
}
DispatchQueue.global(qos: .background).async {
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation:exifOrientation, options: requestOptions)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
}
var arr = Array<VNTrackRectangleRequest>()
for obs in self.rectanglesss{
let trackRequest = VNTrackRectangleRequest(rectangleObservation: obs, completionHandler: self.handleSequenceRequestUpdate)
trackRequest.trackingLevel = .accurate
arr.append(trackRequest)
}
do {
try self.sequenceHandler.perform(arr, on: pixelBuffer, orientation: exifOrientation)
} catch {
print(error)
}
}
Can someone help me to figure out what I'm doing wrong ?
When I try with Right angle sometimes it detect few of them, with Acute angle its detect only near by 2-3 rectangles. Here I try with SET cards, I added 2images of what I'm getting.
Result

Try iphone to see them via bird view? And get a different table with non-white color

I suggest you put your phone flat and shoot ,then setting minimumConfidence

Use this...
https://developer.apple.com/documentation/vision/vndetectrectanglesrequest/2875373-maximumobservations
let request = VNDetectRectanglesRequest { (request, error) in
// Your completion handler code
}
request.maximumObservations = 2

Related

X and Y-axis swapped in Vision Framework Swift

I'm using Vision Framework to detecting faces with iPhone's front camera. My code looks like
func detect(_ cmSampleBuffer: CMSampleBuffer) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(cmSampleBuffer) else {return}
var requests: [VNRequest] = []
let requestLandmarks = VNDetectFaceLandmarksRequest { request, _ in
DispatchQueue.main.async {
guard let results = request.results as? [VNFaceObservation],
print(results)
}
}
requests.append(requestLandmarks)
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: .leftMirrored)
do {
try handler.perform(requests)
} catch {
print(error)
}
}
However, I noticed that when I move my face horizontally, the coordinates change vertically and vice versa. The image bellow can help to understand
If anyone can help me i'm going crazy about it
For some reason, remove
let connectionVideo = videoDataOutput.connection(with: AVMediaType.video)
connectionVideo?.videoOrientation = AVCaptureVideoOrientation.portrait
from my AVCaptureVideoDataOutput solved the problem 🤡

How do you create a new AVAsset video that consists of only frames from given `CMTimeRange`s of another video?

Apple's sample code Identifying Trajectories in Video contains the following delegate callback:
func cameraViewController(_ controller: CameraViewController, didReceiveBuffer buffer: CMSampleBuffer, orientation: CGImagePropertyOrientation) {
let visionHandler = VNImageRequestHandler(cmSampleBuffer: buffer, orientation: orientation, options: [:])
if gameManager.stateMachine.currentState is GameManager.TrackThrowsState {
DispatchQueue.main.async {
// Get the frame of rendered view
let normalizedFrame = CGRect(x: 0, y: 0, width: 1, height: 1)
self.jointSegmentView.frame = controller.viewRectForVisionRect(normalizedFrame)
self.trajectoryView.frame = controller.viewRectForVisionRect(normalizedFrame)
}
// Perform the trajectory request in a separate dispatch queue.
trajectoryQueue.async {
do {
try visionHandler.perform([self.detectTrajectoryRequest])
if let results = self.detectTrajectoryRequest.results {
DispatchQueue.main.async {
self.processTrajectoryObservations(controller, results)
}
}
} catch {
AppError.display(error, inViewController: self)
}
}
}
}
However, instead of drawing UI whenever detectTrajectoryRequest.results exist (https://developer.apple.com/documentation/vision/vndetecttrajectoriesrequest/3675672-results), I'm interested in using the CMTimeRange provided by each result to construct a new video. In effect, this would filter down the original video to only frames with trajectories.
What would be a good approach to transferring only frames with trajectories from an AVAssetReader to an AVAssetWriter?
By the time you identify a trajectory in captured video frames or from frames decoded from a file you may not have the initial frames in memory any more, so the easiest way to create your file containing only trajectories is to keep the original file on hand, and then insert its trajectory snippets into an AVComposition which you then export using AVAssetExportSession.
This sample captures frames from the camera, encodes them to a file whilst analysing them for trajectories and after 20 seconds, it closes the file and then creates the new file containing only trajectory snippets.
If you're interested in detecting trajectories in a pre-existing file, it's not too hard to rewire this code.
import UIKit
import AVFoundation
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
let session = AVCaptureSession()
var assetWriter: AVAssetWriter!
var assetWriterInput: AVAssetWriterInput!
var assetWriterStartTime: CMTime = .zero
var assetWriterStarted = false
var referenceFileURL: URL!
var timeRangesOfInterest: [Double : CMTimeRange] = [:]
func startWritingFile(outputURL: URL, initialSampleBuffer: CMSampleBuffer) {
try? FileManager.default.removeItem(at: outputURL)
assetWriter = try! AVAssetWriter(outputURL: outputURL, fileType: .mov)
let dimensions = initialSampleBuffer.formatDescription!.dimensions
assetWriterInput = AVAssetWriterInput(mediaType: .video, outputSettings: [AVVideoCodecKey: AVVideoCodecType.h264, AVVideoWidthKey: dimensions.width, AVVideoHeightKey: dimensions.height])
assetWriter.add(assetWriterInput)
assetWriter.startWriting()
self.assetWriterStartTime = CMSampleBufferGetPresentationTimeStamp(initialSampleBuffer)
assetWriter.startSession(atSourceTime: self.assetWriterStartTime)
}
func stopWritingFile(completion: #escaping (() -> Void)) {
let assetWriterToFinish = self.assetWriter!
self.assetWriterInput = nil
self.assetWriter = nil
assetWriterToFinish.finishWriting {
print("finished writing: \(assetWriterToFinish.status.rawValue)")
completion()
}
}
func exportVideoTimeRanges(inputFileURL: URL, outputFileURL: URL, timeRanges: [CMTimeRange]) {
let inputAsset = AVURLAsset(url: inputFileURL)
let inputVideoTrack = inputAsset.tracks(withMediaType: .video).first!
let composition = AVMutableComposition()
let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid)!
var insertionPoint: CMTime = .zero
for timeRange in timeRanges {
try! compositionTrack.insertTimeRange(timeRange, of: inputVideoTrack, at: insertionPoint)
insertionPoint = insertionPoint + timeRange.duration
}
let exportSession = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)!
try? FileManager.default.removeItem(at: outputFileURL)
exportSession.outputURL = outputFileURL
exportSession.outputFileType = .mov
exportSession.exportAsynchronously {
print("export finished: \(exportSession.status.rawValue) - \(exportSession.error)")
}
}
override func viewDidLoad() {
super.viewDidLoad()
let inputDevice = AVCaptureDevice.default(for: .video)!
let input = try! AVCaptureDeviceInput(device: inputDevice)
let output = AVCaptureVideoDataOutput()
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session.addInput(input)
session.addOutput(output)
session.startRunning()
DispatchQueue.main.asyncAfter(deadline: .now() + 20) {
self.stopWritingFile {
print("finished writing")
let trajectoriesFileURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] .appendingPathComponent("trajectories.mov")
self.exportVideoTimeRanges(inputFileURL: self.referenceFileURL, outputFileURL: trajectoriesFileURL, timeRanges: self.timeRangesOfInterest.map { $0.1 })
}
}
}
// Lazily create a single instance of VNDetectTrajectoriesRequest.
private lazy var request: VNDetectTrajectoriesRequest = {
return VNDetectTrajectoriesRequest(frameAnalysisSpacing: .zero,
trajectoryLength: 10,
completionHandler: completionHandler)
}()
// AVCaptureVideoDataOutputSampleBufferDelegate callback.
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
if !assetWriterStarted {
self.referenceFileURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0] .appendingPathComponent("reference.mov")
startWritingFile(outputURL: self.referenceFileURL, initialSampleBuffer: sampleBuffer)
assetWriterStarted = true
}
if assetWriterInput != nil && assetWriterInput.isReadyForMoreMediaData {
assetWriterInput.append(sampleBuffer)
}
do {
let requestHandler = VNImageRequestHandler(cmSampleBuffer: sampleBuffer)
try requestHandler.perform([request])
} catch {
// Handle the error.
}
}
func completionHandler(request: VNRequest, error: Error?) {
guard let request = request as? VNDetectTrajectoriesRequest else { return }
if let results = request.results,
results.count > 0 {
NSLog("\(results)")
for result in results {
var fileRelativeTimeRange = result.timeRange
fileRelativeTimeRange.start = fileRelativeTimeRange.start - self.assetWriterStartTime
self.timeRangesOfInterest[fileRelativeTimeRange.start.seconds] = fileRelativeTimeRange
}
}
}
}

How to overcome slowness of live camera view in IOS

I am trying to develop an image segmentation app and process the live camera view in my coreml model. However I see some slowness on the output. Camera view with masked prediction is slower. Below is my vision manager class to predict the pixelbuffer and function calling this class to convert to colors before proceed to camera output. Anyone facing this issue before? Do you see an error in my code causing slowness?
Vision Manager Class:
class VisionManager: NSObject {
static let shared = VisionManager()
static let MODEL = ba_224_segm().model
private lazy var predictionRequest: VNCoreMLRequest = {
do{
let model = try VNCoreMLModel(for: VisionManager.MODEL)
let request = VNCoreMLRequest(model: model)
request.imageCropAndScaleOption = VNImageCropAndScaleOption.centerCrop
return request
} catch {
fatalError("can't load Vision ML Model")
}
}()
func predict(pixelBuffer: CVImageBuffer, sampleBuffer: CMSampleBuffer, onResult: ((_ observations: [VNCoreMLFeatureValueObservation]) -> Void)) {
var requestOptions: [VNImageOption: Any] = [:]
if let cameraIntrinsicData = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, attachmentModeOut: nil) {
requestOptions = [.cameraIntrinsics: cameraIntrinsicData]
}
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: requestOptions)
do {
try handler.perform([predictionRequest])
} catch {
print("error handler")
}
guard let observations = predictionRequest.results as? [VNCoreMLFeatureValueObservation] else {
fatalError("unexpected result type from VNCoreMLRequest")
}
onResult(observations)
}
Predicted Camera Output function:
func handleCameraOutput(pixelBuffer: CVImageBuffer, sampleBuffer: CMSampleBuffer, onFinish: #escaping ((_ image: UIImage?) -> Void)) {
VisionManager.shared.predict(pixelBuffer: pixelBuffer, sampleBuffer: sampleBuffer) { [weak self ] (observations) in
if let multiArray: MLMultiArray = observations[0].featureValue.multiArrayValue {
mask = maskEdit.maskToRGBA(maskArray: MultiArray<Float32>(multiArray), rgba: (Float(r),Float(g),Float(b),Float(a)))!
maskInverted = maskEdit.maskToRGBAInvert(maskArray: MultiArray<Float32>(multiArray), rgba: (r: 1.0, g: 1.0, b:1.0, a: 0.4))!
let image = maskEdit.mergeMaskAndBackground( invertedMask: maskInverted, mask: mask, background: pixelBuffer, size: Int(size))
DispatchQueue.main.async {
onFinish(image)
}
}
}
I call these models under viwDidAppear as below:
CameraManager.shared.setDidOutputHandler { [weak self] (output, pixelBuffer, sampleBuffer, connection) in
self!.maskColor.getRed(&self!.r, green:&self!.g, blue:&self!.b, alpha:&self!.a)
self!.a = 0.5
self?.handleCameraOutput(pixelBuffer: pixelBuffer, sampleBuffer: sampleBuffer, onFinish: { (image) in
self?.predictionView.image = image
})
}
It takes time for your model to perform the segmentation, and then it takes time to convert the output into an image. There is not much you can do to make this delay shorter, except for making the model smaller and making sure the output -> image conversion code is as fast as possible.
I have found out my issue about not using different thread. Since I am new developer I don't know such details and still learning thanks to experts in the field and their shared knowledge. Please see my old and new captureOutput function. To use a different thread solved my problem:
old status:
public func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
else { return }
self.handler?(output, pixelBuffer, sampleBuffer, connection)
self.onCapture?(pixelBuffer, sampleBuffer)
self.onCapture = nil
}
and new status:
public func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
if currentBuffer == nil{
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
currentBuffer = pixelBuffer
DispatchQueue.global(qos: .userInitiated).async {
self.handler?(output, self.currentBuffer!, sampleBuffer, connection)
self.currentBuffer = nil
}
}
}

Anyone know how to use Apple's vision framework for real-time text recognition?

I can't seem to find a way to not use the document scanner, and supplement it with AVFoundation instead. I'm trying to create a feature where the user can click a button, scan text, and then save that to some textview w/o having the user click the camera button, keep scan, save, etc.
I've got it to work with object detection, but I can't get it to work for text-recognition. So, is there any way to use Apple's vision framework for real-time text recognition? Any help would be much appreciated
For performance reasons, I'd prefer to not convert the CMSampleBuffer to a UIImage, and would instead use the following to create an AVCaptureVideoPreviewLayer for live video:
class CameraFeedView: UIView {
private var previewLayer: AVCaptureVideoPreviewLayer!
override class var layerClass: AnyClass {
return AVCaptureVideoPreviewLayer.self
}
init(frame: CGRect, session: AVCaptureSession, videoOrientation: AVCaptureVideoOrientation) {
super.init(frame: frame)
previewLayer = layer as? AVCaptureVideoPreviewLayer
previewLayer.session = session
previewLayer.videoGravity = .resizeAspect
previewLayer.connection?.videoOrientation = videoOrientation
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
Once you have this, you can work on the live video data using Vision:
class CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
private let videoDataOutputQueue = DispatchQueue(label: "CameraFeedDataOutput", qos: .userInitiated,
attributes: [], autoreleaseFrequency: .workItem)
private var drawingView: UILabel = {
let view = UILabel(frame: UIScreen.main.bounds)
view.font = UIFont.boldSystemFont(ofSize: 30.0)
view.textColor = .red
view.translatesAutoresizingMaskIntoConstraints = false
return view
}()
private var cameraFeedSession: AVCaptureSession?
private var cameraFeedView: CameraFeedView! //Wrap
override func viewDidLoad() {
super.viewDidLoad()
do {
try setupAVSession()
} catch {
print("setup av session failed")
}
}
func setupAVSession() throws {
// Create device discovery session for a wide angle camera
let wideAngle = AVCaptureDevice.DeviceType.builtInWideAngleCamera
let discoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [wideAngle], mediaType: .video, position: .back)
// Select a video device, make an input
guard let videoDevice = discoverySession.devices.first else {
print("Could not find a wide angle camera device.")
}
guard let deviceInput = try? AVCaptureDeviceInput(device: videoDevice) else {
print("Could not create video device input.")
}
let session = AVCaptureSession()
session.beginConfiguration()
// We prefer a 1080p video capture but if camera cannot provide it then fall back to highest possible quality
if videoDevice.supportsSessionPreset(.hd1920x1080) {
session.sessionPreset = .hd1920x1080
} else {
session.sessionPreset = .high
}
// Add a video input
guard session.canAddInput(deviceInput) else {
print("Could not add video device input to the session")
}
session.addInput(deviceInput)
let dataOutput = AVCaptureVideoDataOutput()
if session.canAddOutput(dataOutput) {
session.addOutput(dataOutput)
// Add a video data output
dataOutput.alwaysDiscardsLateVideoFrames = true
dataOutput.videoSettings = [
String(kCVPixelBufferPixelFormatTypeKey): Int(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)
]
dataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
} else {
print("Could not add video data output to the session")
}
let captureConnection = dataOutput.connection(with: .video)
captureConnection?.preferredVideoStabilizationMode = .standard
captureConnection?.videoOrientation = .portrait
// Always process the frames
captureConnection?.isEnabled = true
session.commitConfiguration()
cameraFeedSession = session
// Get the interface orientaion from window scene to set proper video orientation on capture connection.
let videoOrientation: AVCaptureVideoOrientation
switch view.window?.windowScene?.interfaceOrientation {
case .landscapeRight:
videoOrientation = .landscapeRight
default:
videoOrientation = .portrait
}
// Create and setup video feed view
cameraFeedView = CameraFeedView(frame: view.bounds, session: session, videoOrientation: videoOrientation)
setupVideoOutputView(cameraFeedView)
cameraFeedSession?.startRunning()
}
The key functions to implement once you've got an AVCaptureSession set up are the delegate and request handler:
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let requestHandler = VNImageRequestHandler(cmSampleBuffer: sampleBuffer, orientation: .down)
let request = VNRecognizeTextRequest(completionHandler: textDetectHandler)
do {
// Perform the text-detection request.
try requestHandler.perform([request])
} catch {
print("Unable to perform the request: \(error).")
}
}
func textDetectHandler(request: VNRequest, error: Error?) {
guard let observations =
request.results as? [VNRecognizedTextObservation] else { return }
// Process each observation to find the recognized body pose points.
let recognizedStrings = observations.compactMap { observation in
// Return the string of the top VNRecognizedText instance.
return observation.topCandidates(1).first?.string
}
DispatchQueue.main.async {
self.drawingView.text = recognizedStrings.first
}
}
}
Note, you will probably want to process each of the recognizedStrings in order to choose the one with the highest confidence, but this is a proof of concept. You could also add a bounding box, and the docs have an example of that.

Real time face detect from live camera (not from static image) using Vision & AVFoundation Framework

I need to detect real face from iPhone front camera. So I have used vision framework to achieve it. But it is detecting the face from static image (human photo) also which is not required. Here is my code snippet.
class ViewController {
func sessionPrepare() {
session = AVCaptureSession()
guard let session = session, let captureDevice = frontCamera else { return }
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice)
session.beginConfiguration()
if session.canAddInput(deviceInput) {
session.addInput(deviceInput)
}
let output = AVCaptureVideoDataOutput()
output.videoSettings = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)
]
output.alwaysDiscardsLateVideoFrames = true
if session.canAddOutput(output) {
session.addOutput(output)
}
session.commitConfiguration()
let queue = DispatchQueue(label: "output.queue")
output.setSampleBufferDelegate(self, queue: queue)
print("setup delegate")
} catch {
print("can't setup session")
}
}
}
}
It is also detecting face from a static image if I place it in front of camera.
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let attachments = CMCopyDictionaryOfAttachments(kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate)
let ciImage = CIImage(cvImageBuffer: pixelBuffer!, options: attachments as! [String : Any]?)
let ciImageWithOrientation = ciImage.applyingOrientation(Int32(UIImageOrientation.leftMirrored.rawValue))
detectFace(on: ciImageWithOrientation)
}
}
func detectFace(on image: CIImage) {
try? faceDetectionRequest.perform([faceDetection], on: image)
if let results = faceDetection.results as? [VNFaceObservation] {
if !results.isEmpty {
faceLandmarks.inputFaceObservations = results
detectLandmarks(on: image)
DispatchQueue.main.async {
self.shapeLayer.sublayers?.removeAll()
}
}
}
}
func detectLandmarks(on image: CIImage) {
try? faceLandmarksDetectionRequest.perform([faceLandmarks], on: image)
if let landmarksResults = faceLandmarks.results as? [VNFaceObservation] {
for observation in landmarksResults {
DispatchQueue.main.async {
if let boundingBox = self.faceLandmarks.inputFaceObservations?.first?.boundingBox {
let faceBoundingBox = boundingBox.scaled(to: self.view.bounds.size)
//different types of landmarks
let faceContour = observation.landmarks?.faceContour
let leftEye = observation.landmarks?.leftEye
let rightEye = observation.landmarks?.rightEye
let nose = observation.landmarks?.nose
let lips = observation.landmarks?.innerLips
let leftEyebrow = observation.landmarks?.leftEyebrow
let rightEyebrow = observation.landmarks?.rightEyebrow
let noseCrest = observation.landmarks?.noseCrest
let outerLips = observation.landmarks?.outerLips
}
}
}
}
}
So is there any way to get it done using only from real time camera detection? I would be very grateful for your help and advice
I need to do the same and after a lot of experiments finally, I have found this
https://github.com/syaringan357/iOS-MobileFaceNet-MTCNN-FaceAntiSpoofing
It is detecting only Live camera faces. But it is not using Vision framework.

Resources