I'm using Vision Framework to detecting faces with iPhone's front camera. My code looks like
func detect(_ cmSampleBuffer: CMSampleBuffer) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(cmSampleBuffer) else {return}
var requests: [VNRequest] = []
let requestLandmarks = VNDetectFaceLandmarksRequest { request, _ in
DispatchQueue.main.async {
guard let results = request.results as? [VNFaceObservation],
print(results)
}
}
requests.append(requestLandmarks)
let handler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation: .leftMirrored)
do {
try handler.perform(requests)
} catch {
print(error)
}
}
However, I noticed that when I move my face horizontally, the coordinates change vertically and vice versa. The image bellow can help to understand
If anyone can help me i'm going crazy about it
For some reason, remove
let connectionVideo = videoDataOutput.connection(with: AVMediaType.video)
connectionVideo?.videoOrientation = AVCaptureVideoOrientation.portrait
from my AVCaptureVideoDataOutput solved the problem 🤡
Related
I'm trying to detect rectangle from live preview layer, but not able to detect all rectangles.
What I'm doing
To setup Vision Request
func setupVision() {
let rectanglesDetectionRequest = VNDetectRectanglesRequest(completionHandler: self.handleRectangles)
rectanglesDetectionRequest.maximumObservations = 0
rectanglesDetectionRequest.quadratureTolerance = 45.0
rectanglesDetectionRequest.minimumAspectRatio = 0.64
self.requests = [rectanglesDetectionRequest]
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let exifOrientation = self.exifOrientationFromDeviceOrientation()
DispatchQueue.main.asyncAfter(deadline: .now() + 2) {
var requestOptions: [VNImageOption : Any] = [:]
if let cameraIntrinsicData = CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, nil) {
requestOptions = [.cameraIntrinsics: cameraIntrinsicData]
}
DispatchQueue.global(qos: .background).async {
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer, orientation:exifOrientation, options: requestOptions)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
}
var arr = Array<VNTrackRectangleRequest>()
for obs in self.rectanglesss{
let trackRequest = VNTrackRectangleRequest(rectangleObservation: obs, completionHandler: self.handleSequenceRequestUpdate)
trackRequest.trackingLevel = .accurate
arr.append(trackRequest)
}
do {
try self.sequenceHandler.perform(arr, on: pixelBuffer, orientation: exifOrientation)
} catch {
print(error)
}
}
Can someone help me to figure out what I'm doing wrong ?
When I try with Right angle sometimes it detect few of them, with Acute angle its detect only near by 2-3 rectangles. Here I try with SET cards, I added 2images of what I'm getting.
Result
Try iphone to see them via bird view? And get a different table with non-white color
I suggest you put your phone flat and shoot ,then setting minimumConfidence
Use this...
https://developer.apple.com/documentation/vision/vndetectrectanglesrequest/2875373-maximumobservations
let request = VNDetectRectanglesRequest { (request, error) in
// Your completion handler code
}
request.maximumObservations = 2
I am trying to detect bar code from user selected image. I am able to detect QR code from the image but can not find anything related to bar code scanning from Image. The code I am using to detect QR code from image is like below:
func detectQRCode(_ image: UIImage?) -> [CIFeature]? {
if let image = image, let ciImage = CIImage.init(image: image){
var options: [String: Any]
let context = CIContext()
options = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
let qrDetector = CIDetector(ofType: CIDetectorTypeQRCode, context: context, options: options)
if ciImage.properties.keys.contains((kCGImagePropertyOrientation as String)){
options = [CIDetectorImageOrientation: ciImage.properties[(kCGImagePropertyOrientation as String)] ?? 1]
}else {
options = [CIDetectorImageOrientation: 1]
}
let features = qrDetector?.features(in: ciImage, options: options)
return features
}
return nil
}
When I go into the documentation of the CIDetectorTypeQRCode it says
/* Specifies a detector type for barcode detection. */
#available(iOS 8.0, *)
public let CIDetectorTypeQRCode: String
Though this is QR code Type documentation says it can detect barcode also.
Fine. But when I use the same function to decode barcode it returns me empty array of features. Even if it returns me some features how will I be able to convert it to the bar code alternative of CIQRCodeFeature ? I do not see any bar code alternative in the documentation of CIQRCodeFeature. I know with ZBar SDK you can do this, but I am trying not to use any third party library here, or is it mandatory to use this in this regard??.
Please help, Thanks a lot.
You can use Vision Framework
Barcode detection request code
var vnBarCodeDetectionRequest : VNDetectBarcodesRequest{
let request = VNDetectBarcodesRequest { (request,error) in
if let error = error as NSError? {
print("Error in detecting - \(error)")
return
}
else {
guard let observations = request.results as? [VNDetectedObjectObservation]
else {
return
}
print("Observations are \(observations)")
}
}
return request
}
Function in which you need to pass the image.
func createVisionRequest(image: UIImage)
{
guard let cgImage = image.cgImage else {
return
}
let requestHandler = VNImageRequestHandler(cgImage: cgImage, orientation: image.cgImageOrientation, options: [:])
let vnRequests = [vnBarCodeDetectionRequest]
DispatchQueue.global(qos: .background).async {
do{
try requestHandler.perform(vnRequests)
}catch let error as NSError {
print("Error in performing Image request: \(error)")
}
}
}
Reference Link
I need to detect real face from iPhone front camera. So I have used vision framework to achieve it. But it is detecting the face from static image (human photo) also which is not required. Here is my code snippet.
class ViewController {
func sessionPrepare() {
session = AVCaptureSession()
guard let session = session, let captureDevice = frontCamera else { return }
do {
let deviceInput = try AVCaptureDeviceInput(device: captureDevice)
session.beginConfiguration()
if session.canAddInput(deviceInput) {
session.addInput(deviceInput)
}
let output = AVCaptureVideoDataOutput()
output.videoSettings = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_420YpCbCr8BiPlanarFullRange)
]
output.alwaysDiscardsLateVideoFrames = true
if session.canAddOutput(output) {
session.addOutput(output)
}
session.commitConfiguration()
let queue = DispatchQueue(label: "output.queue")
output.setSampleBufferDelegate(self, queue: queue)
print("setup delegate")
} catch {
print("can't setup session")
}
}
}
}
It is also detecting face from a static image if I place it in front of camera.
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let attachments = CMCopyDictionaryOfAttachments(kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate)
let ciImage = CIImage(cvImageBuffer: pixelBuffer!, options: attachments as! [String : Any]?)
let ciImageWithOrientation = ciImage.applyingOrientation(Int32(UIImageOrientation.leftMirrored.rawValue))
detectFace(on: ciImageWithOrientation)
}
}
func detectFace(on image: CIImage) {
try? faceDetectionRequest.perform([faceDetection], on: image)
if let results = faceDetection.results as? [VNFaceObservation] {
if !results.isEmpty {
faceLandmarks.inputFaceObservations = results
detectLandmarks(on: image)
DispatchQueue.main.async {
self.shapeLayer.sublayers?.removeAll()
}
}
}
}
func detectLandmarks(on image: CIImage) {
try? faceLandmarksDetectionRequest.perform([faceLandmarks], on: image)
if let landmarksResults = faceLandmarks.results as? [VNFaceObservation] {
for observation in landmarksResults {
DispatchQueue.main.async {
if let boundingBox = self.faceLandmarks.inputFaceObservations?.first?.boundingBox {
let faceBoundingBox = boundingBox.scaled(to: self.view.bounds.size)
//different types of landmarks
let faceContour = observation.landmarks?.faceContour
let leftEye = observation.landmarks?.leftEye
let rightEye = observation.landmarks?.rightEye
let nose = observation.landmarks?.nose
let lips = observation.landmarks?.innerLips
let leftEyebrow = observation.landmarks?.leftEyebrow
let rightEyebrow = observation.landmarks?.rightEyebrow
let noseCrest = observation.landmarks?.noseCrest
let outerLips = observation.landmarks?.outerLips
}
}
}
}
}
So is there any way to get it done using only from real time camera detection? I would be very grateful for your help and advice
I need to do the same and after a lot of experiments finally, I have found this
https://github.com/syaringan357/iOS-MobileFaceNet-MTCNN-FaceAntiSpoofing
It is detecting only Live camera faces. But it is not using Vision framework.
While running ARKit on iPhone XS (with iOS 12.1.2 and Xcode 10.1), I'm getting errors and crashes/hangs while running Vision code to detect face bounds.
Errors I'm getting are:
2019-01-04 03:03:03.155867-0800 ARKit Vision Demo[12969:3307770] Execution of the command buffer was aborted due to an error during execution. Caused GPU Timeout Error (IOAF code 2)
2019-01-04 03:03:03.155786-0800 ARKit Vision Demo[12969:3307850] Execution of the command buffer was aborted due to an error during execution. Discarded (victim of GPU error/recovery) (IOAF code 5)
[SceneKit] Error: display link thread seems stuck
This happens on iPhone XS while running the following proof of concept code to reproduce the error (always happens within a few seconds of running the app) - https://github.com/xta/ARKit-Vision-Demo
The relevant ViewController.swift contains the problematic methods:
func classifyCurrentImage() {
guard let buffer = currentBuffer else { return }
let image = CIImage(cvPixelBuffer: buffer)
let options: [VNImageOption: Any] = [:]
let imageRequestHandler = VNImageRequestHandler(ciImage: image, orientation: self.imageOrientation, options: options)
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
func handleFaces(request: VNRequest, error: Error?) {
DispatchQueue.main.async {
guard let results = request.results as? [VNFaceObservation] else { return }
// TODO - something here with results
print(results)
self.currentBuffer = nil
}
}
What is the correct way to use Apple's ARKit + Vision with VNDetectFaceRectanglesRequest? Getting mysterious IOAF code errors is not correct.
Ideally, I'd also like to use VNTrackObjectRequest & VNSequenceRequestHandler to track requests.
There is decent online documentation for using VNDetectFaceRectanglesRequest with Vision (and without ARKit). Apple has a page here (https://developer.apple.com/documentation/arkit/using_vision_in_real_time_with_arkit) which I've followed, but I'm still getting the errors/crashes.
For anyone else who goes through the pain I just did trying to fix this exact error for VNDetectRectanglesRequest, here was my solution:
It seems that using a CIImage:
let imageRequestHandler = VNImageRequestHandler(ciImage: image, orientation: self.imageOrientation, options: options)
caused Metal to retain a large amount Internal Functions in my memory graph.
I noticed that Apple's example projects all use this instead:
let handler: VNImageRequestHandler! = VNImageRequestHandler(cvPixelBuffer: pixelBuffer,
orientation: orientation,
options: requestHandlerOptions)
Switching to use cvPixelBuffer instead of a CIImage fixed all of my random GPU timeout errors!
I used these functions to get the orientation (I'm using the back camera. I think you may have to mirror for the front camera depending on what you're trying to do):
func exifOrientationForDeviceOrientation(_ deviceOrientation: UIDeviceOrientation) -> CGImagePropertyOrientation {
switch deviceOrientation {
case .portraitUpsideDown:
return .right
case .landscapeLeft:
return .down
case .landscapeRight:
return .up
default:
return .left
}
}
func exifOrientationForCurrentDeviceOrientation() -> CGImagePropertyOrientation {
return exifOrientationForDeviceOrientation(UIDevice.current.orientation)
}
and the following as the options:
var requestHandlerOptions: [VNImageOption: AnyObject] = [:]
let cameraIntrinsicData = CMGetAttachment(pixelBuffer, key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, attachmentModeOut: nil)
if cameraIntrinsicData != nil {
requestHandlerOptions[VNImageOption.cameraIntrinsics] = cameraIntrinsicData
}
Hopefully this saves someone the week that I lost!
You need to call perform method async, just as it is done in the link you've shared.
Try below code:
func classifyCurrentImage() {
guard let buffer = currentBuffer else { return }
let image = CIImage(cvPixelBuffer: buffer)
let options: [VNImageOption: Any] = [:]
let imageRequestHandler = VNImageRequestHandler(ciImage: image, orientation: self.imageOrientation, options: options)
DispatchQueue.global(qos: .userInteractive).async {
do {
try imageRequestHandler.perform(self.requests)
} catch {
print(error)
}
}
}
Update: from what I can tell, the issue was retain cycles (or the lack of [weak self]) in my demo repo. In Apple's sample project, they properly use [weak self] to avoid retain cycles and ARKit + Vision app runs on the iPhone XS.
I am having a hard time for something I think shouldn’t be so difficult, so I presume I must be looking at the problem from the wrong angle.
In order to understand how AVCaptureStillImageOutput and the camera work I made a tiny app.
This app is able to take a picture and save it as a PNG file (I do not want JPEG). The next time the app is launched, it checks if a file is present and if it is, the image stored inside the file is used as the background view of the app. The idea is rather simple.
The problem is that it does not work. If someone can tell me what I am doing wrong that will be very helpful.
I would like the picture to appear as a background the same way it was on the display when it was taken, but it is rotated or has the wrong scale ..etc..
Here is the relevant code (I can provide more information if ever needed).
The viewDidLoad method:
override func viewDidLoad() {
super.viewDidLoad()
// For the photo capture:
captureSession.sessionPreset = AVCaptureSessionPresetHigh
// Select the appropriate capture devices:
for device in AVCaptureDevice.devices() {
// Make sure this particular device supports video.
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera.
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice
}
}
}
tapGesture = UITapGestureRecognizer(target: self, action: Selector("takePhoto"))
self.view.addGestureRecognizer(tapGesture)
let filePath = self.toolBox.getDocumentsDirectory().stringByAppendingPathComponent("BackGroundImage#2x.png")
if !NSFileManager.defaultManager().fileExistsAtPath(filePath) {return}
let bgImage = UIImage(contentsOfFile: filePath),
bgView = UIImageView(image: bgImage)
self.view.addSubview(bgView)
}
The method to handle the picture taking:
func takePhoto() {
if !captureSession.running {
beginPhotoCaptureSession()
return
}
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) {
(imageDataSampleBuffer, error) -> Void in
if error == nil {
var localImage = UIImage(fromSampleBuffer: imageDataSampleBuffer)
UIGraphicsBeginImageContext(localImage!.size)
CGContextRotateCTM (UIGraphicsGetCurrentContext(), CGFloat(M_PI_2))
//localImage!.drawAtPoint(CGPointZero)
localImage!.drawAtPoint(CGPoint(x: -localImage!.size.height, y: -localImage!.size.width))
//localImage!.drawAtPoint(CGPoint(x: -localImage!.size.width, y: -localImage!.size.height))
localImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
localImage = resizeImage(localImage!, toSize: self.view.frame.size)
if let data = UIImagePNGRepresentation(localImage!) {
let bitMapName = "BackGroundImage#2x"
let filename = self.toolBox.getDocumentsDirectory().stringByAppendingPathComponent("\(bitMapName).png")
data.writeToFile(filename, atomically: true)
print("Picture saved: \(bitMapName)\n\(filename)")
}
} else {print("Error on taking a picture:\n\(error)")}
}
}
captureSession.stopRunning()
previewLayer!.removeFromSuperlayer()
}
The method to start the AVCaptureSession:
func beginPhotoCaptureSession() {
do {let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(input)
} catch let error as NSError {
// Handle any errors:
print(error)
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = self.view.layer.frame
self.view.layer.addSublayer(previewLayer!)
captureSession.startRunning()
stillImageOutput.outputSettings = [kCVPixelBufferPixelFormatTypeKey:Int(kCVPixelFormatType_32BGRA)]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
As an example here is an image of a picture taken with the app:
Now here is what I get as the background of the app when it is relaunched:
If it was working correctly the 2 pictures would be similar.
I cant see rotation in screenshot.. But the scale is a problem which is related to your code when you are drawing it in function takePhoto. You can try
func takePhoto() {
if !captureSession.running {
beginPhotoCaptureSession()
return
}
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) {
(imageDataSampleBuffer, error) -> Void in
if error == nil {
if let data = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageDataSampleBuffer) {
let bitMapName = "BackGroundImage#2x"
let filename = self.toolBox.getDocumentsDirectory().stringByAppendingPathComponent("\(bitMapName).png")
data.writeToFile(filename, atomically: true)
print("Picture saved: \(bitMapName)\n\(filename)")
}
} else {print("Error on taking a picture:\n\(error)")}
}
}
captureSession.stopRunning()
previewLayer!.removeFromSuperlayer()
}
For those who might be hitting the same issue at one point, I post below what I fixed in the code to make it work. There may still be some improvement needed to support all possible orientation, but it's OK for a start.
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) {
(imageDataSampleBuffer, error) -> Void in
if error == nil {
var localImage = UIImage(fromSampleBuffer: imageDataSampleBuffer)
var imageSize = CGSize(width: UIScreen.mainScreen().bounds.height * UIScreen.mainScreen().scale,
height: UIScreen.mainScreen().bounds.width * UIScreen.mainScreen().scale)
localImage = resizeImage(localImage!, toSize: imageSize)
imageSize = CGSize(width: imageSize.height, height: imageSize.width)
UIGraphicsBeginImageContext(imageSize)
CGContextRotateCTM (UIGraphicsGetCurrentContext(), CGFloat(M_PI_2))
localImage!.drawAtPoint(CGPoint(x: 0.0, y: -imageSize.width))
localImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
if let data = UIImagePNGRepresentation(localImage!) {
data.writeToFile(self.bmpFilePath, atomically: true)
}
} else {print("Error on taking a picture:\n\(error)")}
}
}