Swift iOS Vision inaccurate rectangle detection result - ios

xcode: Version 12.5.1,
ios: 14.7.1,
device: iPhone 12 Pro
Hello, help needed!
iOS Vision framework returns inaccurate rectangle results:
incorrect rect result
The recognized rectangle (red) does not fit real one (black).
It is not square and narrower then real one.
I don't understand why it happening. And get same inaccurate result if drawing observed rectangle bounding box instead.
Full code:
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
private let captureSession = AVCaptureSession()
private lazy var previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private let videoDataOutput = AVCaptureVideoDataOutput()
private var maskLayer = CAShapeLayer()
override func viewDidLoad() {
super.viewDidLoad()
self.setCameraInput()
self.showCameraFeed()
self.setCameraOutput()
self.videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue"))
self.captureSession.startRunning()
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.previewLayer.frame = self.view.frame
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let frame = CMSampleBufferGetImageBuffer(sampleBuffer) else {
debugPrint("unable to get image from sample buffer")
return
}
self.detectRectangle(in: frame)
}
private func setCameraInput() {
guard let device = AVCaptureDevice.DiscoverySession(
deviceTypes: [.builtInWideAngleCamera, .builtInDualCamera, .builtInTrueDepthCamera],
mediaType: .video,
position: .back).devices.first else {
fatalError("No back camera device found.")
}
let cameraInput = try! AVCaptureDeviceInput(device: device)
self.captureSession.addInput(cameraInput)
}
private func showCameraFeed() {
self.previewLayer.videoGravity = .resizeAspectFill
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.frame
}
private func setCameraOutput() {
self.videoDataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
self.videoDataOutput.alwaysDiscardsLateVideoFrames = true
self.videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue"))
self.captureSession.addOutput(self.videoDataOutput)
guard let connection = self.videoDataOutput.connection(with: AVMediaType.video),
connection.isVideoOrientationSupported else { return }
connection.videoOrientation = .portrait
}
private func detectRectangle(in image: CVPixelBuffer) {
let request = VNDetectRectanglesRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
guard let results = request.results as? [VNRectangleObservation] else { return }
self.maskLayer.removeFromSuperlayer()
guard let rect = results.first else{return}
self.drawBoundingBox(rect: rect)
}
})
request.minimumAspectRatio = VNAspectRatio(1.3)
request.maximumAspectRatio = VNAspectRatio(1.6)
request.minimumSize = Float(0.5)
request.maximumObservations = 1
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: image, options: [:])
try? imageRequestHandler.perform([request])
}
func drawBoundingBox(rect : VNRectangleObservation) {
let transform = CGAffineTransform(scaleX: 1, y: -1).translatedBy(x: 0, y: -self.previewLayer.frame.height)
let scale = CGAffineTransform.identity.scaledBy(x: self.previewLayer.frame.width, y: self.previewLayer.frame.height)
let path = UIBezierPath()
path.move(to: CGPoint(x: rect.bottomLeft.x, y: rect.bottomLeft.y))
path.addLine(to: CGPoint(x: rect.bottomRight.x, y: rect.bottomRight.y))
path.addLine(to: CGPoint(x: rect.topRight.x, y: rect.topRight.y))
path.addLine(to: CGPoint(x: rect.topLeft.x, y: rect.topLeft.y))
path.addLine(to: CGPoint(x: rect.bottomLeft.x, y: rect.bottomLeft.y))
path.apply(scale)
path.apply(transform)
path.close()
maskLayer = CAShapeLayer()
maskLayer.fillColor = UIColor.clear.cgColor
maskLayer.lineWidth = 5
maskLayer.strokeColor = UIColor.red.cgColor
maskLayer.path = path.cgPath
previewLayer.insertSublayer(maskLayer, at: 1)
}
}
extension CGPoint {
func scaled(to size: CGSize) -> CGPoint {
return CGPoint(x: self.x * size.width,
y: self.y * size.height)
}
}
Code above is changed version from tutorial: rectangle detection tutorial

There is my code example.
///SET THE VALUE FOR THE DETECTED RECTANGLE
detectRectanglesRequest.minimumAspectRatio = VNAspectRatio(0.3)
detectRectanglesRequest.maximumAspectRatio = VNAspectRatio(0.9)
detectRectanglesRequest.minimumSize = Float(0.4)
detectRectanglesRequest.maximumObservations = 0
detectRectanglesRequest.minimumConfidence = 0.2
detectRectanglesRequest.quadratureTolerance = 2
detectRectanglesRequest.revision = VNDetectRectanglesRequestRevision1
detectRectanglesRequest.preferBackgroundProcessing = true
"try" use better like this:
///SEND THE REQUESTS TO THE REQUEST HANDLER
DispatchQueue.global(qos: .userInteractive).async {
do {
try imageRequestHandler.perform([detectRectanglesRequest])
} catch let error as NSError {
print("Failed to perform image request: \(error)")
// self.presentAlert("Image Request Failed", error: error)
return
}
}
...and the last:
private func drawBoundingBox(rect: VNRectangleObservation) {
CATransaction.begin()
let transform = CGAffineTransform(scaleX: 1, y: -1)
.translatedBy(x: 0, y: -scanCam.videoPreviewLayer.bounds.height)
let scale = CGAffineTransform.identity
.scaledBy(x: scanCam.videoPreviewLayer.bounds.width,
y: scanCam.videoPreviewLayer.bounds.height)
let currentBounds = rect.boundingBox
.applying(scale).applying(transform)
createLayer(in: currentBounds)
CATransaction.commit()
//viewModel.cameraDetectRectFrame = currentBounds
}
private func createLayer(in rect: CGRect) {
maskLayer = CAShapeLayer()
maskLayer.frame = rect
maskLayer.opacity = 1
maskLayer.borderColor = UIColor.blue.cgColor ///for visual test
maskLayer.borderWidth = 2
scanCam.videoPreviewLayer.insertSublayer(maskLayer, at: 1)
}

Related

How to detect barcode using Apple Vision in CGRect only?

I have an app that uses a CGRect(x: 0, y: 0, width: 335, height: 150) to show the camera for barcode scanning. However when presented a barcode off camera (not in the CGRect) will get scanned. How can I limit the area for scanning to the CGRect in my preview layer? This is using Vision.
let captureSession = AVCaptureSession()
var previewLayer: AVCaptureVideoPreviewLayer!
var activeInput: AVCaptureDeviceInput!
lazy var detectBarcodeRequest = VNDetectBarcodesRequest { request, error in
guard error == nil else {
print("Barcode Error: \(String(describing: error?.localizedDescription))")
return
}
self.processBarCode(request)
}
override func viewDidLoad() {
super.viewDidLoad()
setupSession()
setupPreview()
startSession()
}
func setupSession() {
guard let camera = AVCaptureDevice.default(for: .video) else {
return
}
do {
let videoInput = try AVCaptureDeviceInput(device: camera)
for input in [videoInput] {
if captureSession.canAddInput(input) {
captureSession.addInput(input)
}
}
activeInput = videoInput
} catch {
print("Error setting device input: \(error)")
return
}
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_32BGRA)]
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue.global(qos: DispatchQoS.QoSClass.default))
captureSession.addOutput(captureOutput)
}
func setupPreview() {
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = CGRect(x: 0, y: 0, width: 335, height: 150)
view.layer.insertSublayer(previewLayer, at: 0)
}//setupPreview
func startSession() {
if !captureSession.isRunning {
DispatchQueue.global(qos: .default).async { [weak self] in
self?.captureSession.startRunning()
}
}
}
// MARK: - processBarCode
func processBarCode(_ request: VNRequest) {
DispatchQueue.main.async {
guard let results = request.results as? [VNBarcodeObservation] else {
return
}
if let payload = results.first?.payloadStringValue, let symbology = results.first?.symbology {
print("payload is \(payload) \(symbology) ")
}
}
}//processBarCode
Edit:
// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate
extension CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
let imageRequestHandler = VNImageRequestHandler(
cvPixelBuffer: pixelBuffer,
orientation: .up)
do {
try imageRequestHandler.perform([detectBarcodeRequest])
} catch {
print(error)
}
}//captureOutput
}//extension
extension AVCaptureVideoPreviewLayer {
func rectOfInterestConverted(parentRect: CGRect, fromLayerRect: CGRect) -> CGRect {
let parentWidth = parentRect.width
let parentHeight = parentRect.height
let newX = (parentWidth - fromLayerRect.maxX)/parentWidth
let newY = 1 - (parentHeight - fromLayerRect.minY)/parentHeight
let width = 1 - (fromLayerRect.minX/parentWidth + newX)
let height = (fromLayerRect.maxY/parentHeight) - newY
return CGRect(x: newX, y: newY, width: width, height: height)
}
}
Usage:
if let rect = videoPreviewLayer?.rectOfInterestConverted(parentRect: self.view.frame, fromLayerRect: scanAreaView.frame) {
captureMetadataOutput.rectOfInterest = rect
}
In func captureOutput(_:didOutput:from:) you most likely passing whole image into VNImageRequestHandler.
Instead you need to crop image to your visible rect.
Another approach would be lock Vision to only visible part of image via regionOfInterest as #HurricaneOnTheMoon proposed below.
You can use a property regionOfInterest of VNDetectBarcodesRequest to setup a detection region.
The default value is { { 0, 0 }, { 1, 1 } } according to the Apple documentation

How to make a draggable UIView snap to the corners over the screen?

I have a draggable UIView and I am trying to make it snap to four corners of the screen. I tried a few things, but none of them have worked. Here's the code that I have:
import UIKit
import AVKit
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var crystalName: UILabel!
#IBOutlet weak var crystalInfoContainer: UIView!
#IBOutlet weak var accuracy: UILabel!
var model = IdenticrystClassification().model
override func viewDidLoad() {
super.viewDidLoad()
// This method starts the camera.
let captureSession = AVCaptureSession()
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
// This method defines sub view and defines it's properties.
view.addSubview(crystalInfoContainer)
crystalInfoContainer.clipsToBounds = true
crystalInfoContainer.layer.cornerRadius = 10.0
//crystalInfoContainer.layer.maskedCorners = [.layerMinXMinYCorner, .layerMaxXMinYCorner]
// This method defines torch functionality.
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
// This is the code that I am trying to work out.
func relativeVelocity(forVelocity velocity: CGFloat, from currentValue: CGFloat, to targetValue: CGFloat) -> CGFloat {
guard currentValue - targetValue != 0 else { return 0 }
return velocity / (targetValue - currentValue)
}
func nearestCorner(to point: CGPoint) -> CGPoint {
var minDistance = CGFloat.greatestFiniteMagnitude
var closestPosition = CGPoint.zero
for position in crystalInfoContainer { **Error1**
let distance = point.distance(to: position)
if distance < minDistance {
closestPosition = position
minDistance = distance
}
}
return closestPosition
let decelerationRate = UIScrollView.DecelerationRate.normal.rawValue
let velocity = UIPanGestureRecognizer.velocity(in: view)**Error2**
let projectedPosition = CGPoint(
x: crystalInfoContainer.center.x + project(initialVelocity: velocity.x, decelerationRate: decelerationRate),
y: crystalInfoContainer.center.y + project(initialVelocity: velocity.y, decelerationRate: decelerationRate)
)
let nearestCornerPosition = nearestCorner(to: projectedPosition)
let relativeInitialVelocity = CGVector(
dx: relativeVelocity(forVelocity: velocity.x, from: crystalInfoContainer.center.x, to: nearestCornerPosition.x),
dy: relativeVelocity(forVelocity: velocity.y, from: crystalInfoContainer.center.y, to: nearestCornerPosition.y)
)
let params = UISpringTimingParameters(damping: 1, response: 0.4, initialVelocity: relativeInitialVelocity)
let animator = UIViewPropertyAnimator(duration: 0, timingParameters: params)
animator.addAnimations {
self.crystalInfoContainer.center = nearestCornerPosition
}
animator.startAnimation()
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "video"))
captureSession.addOutput(dataOutput)
toggleTorch(on: true)
}
// Handles Visiout output.
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let model = try? VNCoreMLModel(for: model) else { return }
let request = VNCoreMLRequest(model: model)
{ (finishedReq, err) in
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
let name: String = firstObservation.identifier
let acc: Int = Int(firstObservation.confidence * 100)
DispatchQueue.main.async {
self.crystalName.text = name
self.accuracy.text = "Confidence: \(acc)%"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
override var prefersStatusBarHidden: Bool {
return true
}
}
Error1: For-in loop requires 'UIView?' to conform to 'Sequence'; did you mean to unwrap optional?
Error2: Instance member 'velocity' cannot be used on type 'UIPanGestureRecognizer'; did you mean to use a value of this type instead?
The problem is that your panView method is wrong. You need to switch on the gesture recognizer’s state — began, changed, or ended. Pan only when the gesture changes. When the gesture ends, then and only then, animate the view into the nearest corner.

iOS swift memory issue during a Images to Video Convert Using AVAssetWriter

I already do a lot of search for that and do many experiment but I didn't get any proper solution.
i try to convert UIImages to Video. i have 250+ images array and i try to convert this images to video with 60FPS.
i put render code in autoreleasepool method and add some other code also add autoreleasepool but didn't effect.
Code.
import AVFoundation
import UIKit
import Photos
import AVKit
var tempurl = ""
struct RenderSettings {
var width: CGFloat = UIScreen.main.bounds.width * UIScreen.main.scale
var height: CGFloat = UIScreen.main.bounds.width * UIScreen.main.scale
var fps: Int32 = 60 //frames per second
var avCodecKey = AVVideoCodecType.h264
var videoFilename = "ImageToVideo"
var videoFilenameExt = "mp4"
var size: CGSize {
return CGSize(width: width, height: height)
}
var outputURL: URL {
let fileManager = FileManager.default
if let tmpDirURL = try? fileManager.url(for: .cachesDirectory, in: .userDomainMask, appropriateFor: nil, create: true) {
return tmpDirURL.appendingPathComponent(videoFilename).appendingPathExtension(videoFilenameExt) as URL
}
fatalError("URLForDirectory() failed")
}
}
class VideoWriter {
let renderSettings: RenderSettings
var videoWriter: AVAssetWriter!
var videoWriterInput: AVAssetWriterInput!
var pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor!
var isReadyForData: Bool {
return videoWriterInput?.isReadyForMoreMediaData ?? false
}
class func pixelBufferFromImage(image: UIImage, pixelBufferPool: CVPixelBufferPool, size: CGSize) -> CVPixelBuffer {
autoreleasepool {
var pixelBufferOut: CVPixelBuffer?
let status = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBufferOut)
if status != kCVReturnSuccess {
fatalError("CVPixelBufferPoolCreatePixelBuffer() failed")
}
let pixelBuffer = pixelBufferOut!
CVPixelBufferLockBaseAddress(pixelBuffer, [])
let data = CVPixelBufferGetBaseAddress(pixelBuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: data, width: Int(size.width), height: Int(size.height),
bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedFirst.rawValue)
context!.clear(CGRect(x: 0, y: 0, width: size.width, height: size.height))
let horizontalRatio = size.width / image.size.width
let verticalRatio = size.height / image.size.height
let aspectRatio = min(horizontalRatio, verticalRatio) // ScaleAspectFit
let newSize = CGSize(width: image.size.width * aspectRatio, height: image.size.height * aspectRatio)
let x = newSize.width < size.width ? (size.width - newSize.width) / 2 : 0
let y = newSize.height < size.height ? (size.height - newSize.height) / 2 : 0
context!.concatenate(CGAffineTransform.identity)
context!.draw(image.cgImage!, in: CGRect(x: x, y: y, width: newSize.width, height: newSize.height))
CVPixelBufferUnlockBaseAddress(pixelBuffer, [])
return pixelBuffer
}
}
init(renderSettings: RenderSettings) {
self.renderSettings = renderSettings
}
func start() {
let avOutputSettings: [String: AnyObject] = [
AVVideoCodecKey: renderSettings.avCodecKey as AnyObject,
AVVideoWidthKey: NSNumber(value: Float(renderSettings.width)),
AVVideoHeightKey: NSNumber(value: Float(renderSettings.height))
]
func createPixelBufferAdaptor() {
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(value: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(value: Float(renderSettings.width)),
kCVPixelBufferHeightKey as String: NSNumber(value: Float(renderSettings.height))
]
pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
}
func createAssetWriter(outputURL: URL) -> AVAssetWriter {
guard let assetWriter = try? AVAssetWriter(outputURL: outputURL, fileType: AVFileType.mp4) else {
fatalError("AVAssetWriter() failed")
}
guard assetWriter.canApply(outputSettings: avOutputSettings, forMediaType: AVMediaType.video) else {
fatalError("canApplyOutputSettings() failed")
}
return assetWriter
}
videoWriter = createAssetWriter(outputURL: renderSettings.outputURL)
videoWriterInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: avOutputSettings)
if videoWriter.canAdd(videoWriterInput) {
videoWriter.add(videoWriterInput)
}
else {
fatalError("canAddInput() returned false")
}
createPixelBufferAdaptor()
if videoWriter.startWriting() == false {
fatalError("startWriting() failed")
}
videoWriter.startSession(atSourceTime: CMTime.zero)
precondition(pixelBufferAdaptor.pixelBufferPool != nil, "nil pixelBufferPool")
}
func render(appendPixelBuffers: #escaping (VideoWriter)->Bool, completion: #escaping ()->Void) {
autoreleasepool {
precondition(videoWriter != nil, "Call start() to initialze the writer")
let queue = DispatchQueue(label: "mediaInputQueue")
videoWriterInput.requestMediaDataWhenReady(on: queue) {
let isFinished = appendPixelBuffers(self)
if isFinished {
self.videoWriterInput.markAsFinished()
self.videoWriter.finishWriting() {
DispatchQueue.main.async {
completion()
}
}
}
}
}
}
func addImage(image: UIImage, withPresentationTime presentationTime: CMTime) -> Bool {
autoreleasepool {
precondition(pixelBufferAdaptor != nil, "Call start() to initialze the writer")
let pixelBuffer = VideoWriter.pixelBufferFromImage(image: image, pixelBufferPool: pixelBufferAdaptor.pixelBufferPool!, size: renderSettings.size)
return pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
}
}
}
class ImageAnimator {
static let kTimescale: Int32 = 600
let settings: RenderSettings
let videoWriter: VideoWriter
var images: [UIImage]!
var frameNum = 0
class func removeFileAtURL(fileURL: URL) {
do {
try FileManager.default.removeItem(atPath: fileURL.path)
}
catch _ as NSError {
//
}
}
init(renderSettings: RenderSettings,imagearr: [UIImage]) {
settings = renderSettings
videoWriter = VideoWriter(renderSettings: settings)
images = imagearr
}
func render(completion: #escaping ()->Void) {
// The VideoWriter will fail if a file exists at the URL, so clear it out first.
ImageAnimator.removeFileAtURL(fileURL: settings.outputURL)
videoWriter.start()
videoWriter.render(appendPixelBuffers: appendPixelBuffers) {
let s: String = self.settings.outputURL.path
tempurl = s
completion()
}
}
func appendPixelBuffers(writer: VideoWriter) -> Bool {
let frameDuration = CMTimeMake(value: Int64(ImageAnimator.kTimescale / settings.fps), timescale: ImageAnimator.kTimescale)
while !images.isEmpty {
if writer.isReadyForData == false {
return false
}
let image = images.removeFirst()
let presentationTime = CMTimeMultiply(frameDuration, multiplier: Int32(frameNum))
let success = videoWriter.addImage(image: image, withPresentationTime: presentationTime)
if success == false {
fatalError("addImage() failed")
}
frameNum=frameNum+1
}
return true
}
}
Memory Usage :
Get Images Using This Code :
#objc public class Recorder: NSObject {
public var view : UIView?
var displayLink : CADisplayLink?
var referenceDate : NSDate?
var imageArray = [UIImage]()
public func start() {
self.imageArray.removeAll()
if (view == nil) {
NSException(name: NSExceptionName(rawValue: "No view set"), reason: "You must set a view before calling start.", userInfo: nil).raise()
}else {
displayLink = CADisplayLink(target: self, selector: #selector(self.handleDisplayLink(displayLink:)))
displayLink!.add(to: RunLoop.main, forMode: RunLoop.Mode.common)
referenceDate = NSDate()
}
}
#objc func handleDisplayLink(displayLink : CADisplayLink) {
if (view != nil) {
createImageFromView(captureView: view!)
}
}
func createImageFromView(captureView : UIView) {
UIGraphicsBeginImageContextWithOptions(captureView.bounds.size, false, 0)
captureView.drawHierarchy(in: captureView.bounds, afterScreenUpdates: false)
let image = UIGraphicsGetImageFromCurrentImageContext();
if let img = image {
self.imageArray.append(img)
}
UIGraphicsEndImageContext();
}
public func stop(completion: #escaping (_ saveURL: String) -> Void) {
displayLink?.invalidate()
let seconds = referenceDate?.timeIntervalSinceNow
if (seconds != nil) {
print("Image Count : \(self.imageArray.count)")
DispatchQueue.main.async {
let settings = RenderSettings()
let imageAnimator = ImageAnimator(renderSettings: settings,imagearr: self.imageArray)
imageAnimator.render() {
let u: String = tempurl
completion(u)
//self.saveVideoInPhotos()
}
}
}
}
}
Thank In Advance

Delete white background from UIImage in swift 5

I am trying to save 2 copies of a photo that are taken from the camera, one is the photo itself and the other one its the photo + emojis hiding faces of people that appears there.
Right now I only get the original photo + a image with white background and the emoji faces, but not with the photo behind.
this is the code I use to put the emojis over the faces:
private func detectFace(in image: CVPixelBuffer) {
let faceDetectionRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
if let results = request.results as? [VNFaceObservation] {
self.handleFaceDetectionResults(results)
} else {
self.clearDrawings()
}
}
})
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: image, orientation: .leftMirrored, options: [:])
try? imageRequestHandler.perform([faceDetectionRequest])
}
private func handleFaceDetectionResults(_ observedFaces: [VNFaceObservation]) {
self.clearDrawings()
let facesBoundingBoxes: [CAShapeLayer] = observedFaces.flatMap({ (observedFace: VNFaceObservation) -> [CAShapeLayer] in
let faceBoundingBoxOnScreen = self.previewLayer.layerRectConverted(fromMetadataOutputRect: observedFace.boundingBox)
let image = UIImage(named: "happy_emoji.png")
let imageView = UIImageView(image: image!)
imageView.frame = faceBoundingBoxOnScreen
showCamera.addSubview(imageView)
let newDrawings = [CAShapeLayer]()
return newDrawings
})
self.drawings = facesBoundingBoxes
}
private func clearDrawings() {
showCamera.subviews.forEach({ $0.removeFromSuperview() })
}
and this is the code i use to save the images:
#IBAction func onPhotoTaken(_ sender: Any) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
self.photoOutput.capturePhoto(with: settings, delegate: self)
UIGraphicsBeginImageContextWithOptions(showCamera.frame.size, false, 0.0)
if let context = UIGraphicsGetCurrentContext() {
showCamera.layer.render(in: context)
}
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
let topImage = outputImage
let bottomImage = imageReciber
let size = CGSize(width: topImage!.size.width, height: topImage!.size.height + bottomImage.size.height)
UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
topImage!.draw(in: CGRect(x: 0, y: 0, width: size.width, height: topImage!.size.height))
bottomImage.draw(in: CGRect(x: 0, y: topImage!.size.height, width: size.width, height: bottomImage.size.height))
let newImage:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
UIImageWriteToSavedPhotosAlbum(newImage, nil, nil, nil)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
showCamera.image = image
imageReciber = image!
UIImageWriteToSavedPhotosAlbum(showCamera.image!, nil, nil, nil)
}
I tried different solutions to delete the white background (or black, depends if i put false or true on the "render" part. but i always get the emoji image with white background.
Please help me to get the emoji image with no white/black background and over the photo taken.
My full code here is:
import UIKit
import AVFoundation
import Vision
class cameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCapturePhotoCaptureDelegate {
private let captureSession = AVCaptureSession()
private lazy var previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private let videoDataOutput = AVCaptureVideoDataOutput()
private var drawings: [CAShapeLayer] = []
private let photoOutput = AVCapturePhotoOutput()
var imageReciber = UIImage()
#IBOutlet weak var showCamera: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
self.addCameraInput()
self.showCameraFeed()
self.getCameraFrames()
self.captureSession.startRunning()
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.previewLayer.frame = self.showCamera.frame
}
func captureOutput(
_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
guard let frame = CMSampleBufferGetImageBuffer(sampleBuffer) else {
debugPrint("unable to get image from sample buffer")
return
}
self.detectFace(in: frame)
}
private func addCameraInput() {
guard let device = AVCaptureDevice.DiscoverySession(
deviceTypes: [.builtInWideAngleCamera, .builtInDualCamera, .builtInTrueDepthCamera],
mediaType: .video,
position: .back).devices.first else {
fatalError("No back camera device found, please make sure to run SimpleLaneDetection in an iOS device and not a simulator")
}
let cameraInput = try! AVCaptureDeviceInput(device: device)
self.captureSession.addInput(cameraInput)
captureSession.addOutput(photoOutput)
}
private func showCameraFeed() {
self.previewLayer.videoGravity = .resizeAspectFill
self.showCamera.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.showCamera.frame
}
private func getCameraFrames() {
self.videoDataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
self.videoDataOutput.alwaysDiscardsLateVideoFrames = true
self.videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue"))
self.captureSession.addOutput(self.videoDataOutput)
guard let connection = self.videoDataOutput.connection(with: AVMediaType.video),
connection.isVideoOrientationSupported else { return }
connection.videoOrientation = .portrait
}
private func detectFace(in image: CVPixelBuffer) {
let faceDetectionRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
if let results = request.results as? [VNFaceObservation] {
self.handleFaceDetectionResults(results)
} else {
self.clearDrawings()
}
}
})
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: image, orientation: .leftMirrored, options: [:])
try? imageRequestHandler.perform([faceDetectionRequest])
}
private func handleFaceDetectionResults(_ observedFaces: [VNFaceObservation]) {
self.clearDrawings()
let facesBoundingBoxes: [CAShapeLayer] = observedFaces.flatMap({ (observedFace: VNFaceObservation) -> [CAShapeLayer] in
let faceBoundingBoxOnScreen = self.previewLayer.layerRectConverted(fromMetadataOutputRect: observedFace.boundingBox)
let image = UIImage(named: "happy_emoji.png")
let imageView = UIImageView(image: image!)
imageView.frame = faceBoundingBoxOnScreen
showCamera.addSubview(imageView)
let newDrawings = [CAShapeLayer]()
return newDrawings
})
self.drawings = facesBoundingBoxes
}
private func clearDrawings() {
showCamera.subviews.forEach({ $0.removeFromSuperview() })
}
#IBAction func onPhotoTaken(_ sender: Any) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
self.photoOutput.capturePhoto(with: settings, delegate: self)
UIGraphicsBeginImageContextWithOptions(showCamera.frame.size, false, 0.0)
if let context = UIGraphicsGetCurrentContext() {
showCamera.layer.render(in: context)
}
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
let topImage = outputImage
let bottomImage = imageReciber
let size = CGSize(width: topImage!.size.width, height: topImage!.size.height + bottomImage.size.height)
UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
topImage!.draw(in: CGRect(x: 0, y: 0, width: size.width, height: topImage!.size.height))
bottomImage.draw(in: CGRect(x: 0, y: topImage!.size.height, width: size.width, height: bottomImage.size.height))
let newImage:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
UIImageWriteToSavedPhotosAlbum(newImage, nil, nil, nil)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
//Se añade la imagen capturada desde el Buffer a imageView y se le da un borde algo redondeado para que quede bien.
showCamera.image = image
imageReciber = image!
UIImageWriteToSavedPhotosAlbum(showCamera.image!, nil, nil, nil)
}
}
Thank you in advance.
After looking more calmly into the problem I´ve discovered how to solve my problem.
The problem is that I was trying to print the image to a file before getting it from the video stream, to solve this I created a new function that executes after the image is taken and now everything works flawlessly.
func saveEmoji() {
showCamera.backgroundColor = UIColor.clear
UIGraphicsBeginImageContextWithOptions(showCamera.frame.size, true, 0.0)
if let context = UIGraphicsGetCurrentContext() {
showCamera.layer.render(in: context)
}
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
var topImage = outputImage
UIImageWriteToSavedPhotosAlbum(topImage!, nil, nil, nil)
topImage = nil
}
The function is called after the first image is saved:
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
showCamera.image = image
imageReciber = image!
UIImageWriteToSavedPhotosAlbum(showCamera.image!, nil, nil, nil)
saveEmoji()
}

Custom Barcode Scanner, tap to focus

Referencing these questions here:
How to implement tap to focus on barcode scanner app using swift?
Set Camera Focus On Tap Point With Swift
https://stackoverflow.com/a/41796603/8272698
The links above are quite old and out of date. I have tried to use the answers provided above but to no avail... below is my attempt from them..
I need to tap my screen when reading barcodes to implement a focus on the object in the view.
Here is my code attempt
var captureDevice: AVCaptureDevice? //capture device Is this right?
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let screenSize = videoPreviewLayer!.bounds.size
if let touchPoint = touches.first {
let x = touchPoint.location(in: self.view).y / screenSize.height
let y = 1.0 - touchPoint.location(in: self.view).x / screenSize.width
let focusPoint = CGPoint(x: x, y: y)
if let device = captureDevice {
do {
try device.lockForConfiguration()
device.focusPointOfInterest = focusPoint
//device.focusMode = .continuousAutoFocus
device.focusMode = .autoFocus
//device.focusMode = .locked
device.exposurePointOfInterest = focusPoint
device.exposureMode = AVCaptureDevice.ExposureMode.continuousAutoExposure
device.unlockForConfiguration()
}
catch {
// just ignore
}
}
}
}
This code doesn't work as when I tap no focusing happens.
Here is the rest of my camera code.
import UIKit
import AVFoundation
class BarcodeScanVC: UIViewController {
struct GlobalVariable{
static var senderTags = 0
}
var captureSession = AVCaptureSession()
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var qrCodeFrameView: UIView?
var row = 0
var senderTag = 0
var waybillData: String = ""
var diagnosticErrorCodeData: String = ""
var hddSerialNumberData: String = ""
var scanRectView: UIView?
var delegate: BarcodeScanDelegate?
var captureDevice: AVCaptureDevice?
private let supportedCodeTypes = [AVMetadataObject.ObjectType.upce,
AVMetadataObject.ObjectType.code39,
AVMetadataObject.ObjectType.code39Mod43,
AVMetadataObject.ObjectType.code93,
AVMetadataObject.ObjectType.code128,
AVMetadataObject.ObjectType.ean8,
AVMetadataObject.ObjectType.ean13,
AVMetadataObject.ObjectType.aztec,
AVMetadataObject.ObjectType.pdf417,
AVMetadataObject.ObjectType.itf14,
AVMetadataObject.ObjectType.dataMatrix,
AVMetadataObject.ObjectType.interleaved2of5,
AVMetadataObject.ObjectType.qr]
override func viewDidLoad() {
super.viewDidLoad()
// Get the back-facing camera for capturing videos
//let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back)
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice)
// Set the input device on the capture session.
captureSession.addInput(input)
// Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session.
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession.addOutput(captureMetadataOutput)
// Set delegate and use the default dispatch queue to execute the call back
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = supportedCodeTypes
// captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]
} catch {
// If any error occurs, simply print it out and don't continue any more.
print(error)
return
}
captureSession.commitConfiguration()
// Initialize the video preview layer and add it as a sublayer to the viewPreview view's layer.
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
//videoPreviewLayer?.frame
// let height: CGFloat = ((videoPreviewLayer?.frame.size.width)!)/2
// let width: CGFloat = ((videoPreviewLayer?.frame.size.width)!)/2
let height: CGFloat = (view.frame.size.height)/2
let width: CGFloat = (view.frame.size.width) - 200
let path = UIBezierPath()
//Corner1
path.move(to: CGPoint(x: 5, y: 50))
path.addLine(to: CGPoint(x: 5, y: 5))
path.addLine(to: CGPoint(x: 50, y: 5))
//Corner2
path.move(to: CGPoint(x: height - 55, y: 5))
path.addLine(to: CGPoint(x: height - 5, y: 5))
path.addLine(to: CGPoint(x: height - 5, y: 55))
//Corner3
path.move(to: CGPoint(x: 5, y: width - 55))
path.addLine(to: CGPoint(x: 5, y: width - 5))
path.addLine(to: CGPoint(x: 55, y: width - 5))
//Corner4 -bottom right
path.move(to: CGPoint(x: height - 5, y: width - 55))
path.addLine(to: CGPoint(x: height - 5, y: width - 5))
path.addLine(to: CGPoint(x: height - 55, y: width - 5))
let shape = CAShapeLayer()
shape.path = path.cgPath
shape.strokeColor = UIColor.white.cgColor
shape.lineWidth = 5
shape.frame.origin.x = 20
shape.frame.origin.y = 180
shape.fillColor = UIColor.clear.cgColor
videoPreviewLayer?.addSublayer(shape)
view.layer.addSublayer(videoPreviewLayer!)
//videoPreviewLayer?.anchorPoint.centerXAnchor.constraint(equalTo: view.centerXAnchor).isActive = true
//view.layer.addSublayer(scanRectView)
// Start video capture.
captureSession.startRunning()
// Move the message label and top bar to the front
//view.bringSubview(toFront: messageLabel)
//view.bringSubview(toFront: topbar)
// Initialize QR Code Frame to highlight the QR code
qrCodeFrameView = UIView()
if let qrCodeFrameView = qrCodeFrameView {
qrCodeFrameView.layer.borderColor = UIColor.green.cgColor
qrCodeFrameView.layer.borderWidth = 2
view.addSubview(qrCodeFrameView)
view.bringSubviewToFront(qrCodeFrameView)
}
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let screenSize = videoPreviewLayer!.bounds.size
if let touchPoint = touches.first {
let x = touchPoint.location(in: self.view).y / screenSize.height
let y = 1.0 - touchPoint.location(in: self.view).x / screenSize.width
let focusPoint = CGPoint(x: x, y: y)
if let device = captureDevice {
do {
try device.lockForConfiguration()
device.focusPointOfInterest = focusPoint
//device.focusMode = .continuousAutoFocus
device.focusMode = .autoFocus
//device.focusMode = .locked
device.exposurePointOfInterest = focusPoint
device.exposureMode = AVCaptureDevice.ExposureMode.continuousAutoExposure
device.unlockForConfiguration()
}
catch {
// just ignore
}
}
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func launchApp(barcodeScan: String) {
// if presentedViewController != nil {
// return
// }
guard presentedViewController == nil else {
return
}
let alertPrompt = UIAlertController(title: "Barcode Found", message: "\(barcodeScan)", preferredStyle: .actionSheet)
let confirmAction = UIAlertAction(title: "Confirm", style: UIAlertAction.Style.default, handler: { (action) -> Void in
if self.senderTag == 1 {
GlobalVariable.senderTags = 1
self.delegate?.didScan(barcodeData: barcodeScan)
self.navigationController?.popViewController(animated: true)
}
if self.senderTag == 2 {
GlobalVariable.senderTags = 2
self.delegate?.didScan(barcodeData: barcodeScan)
self.navigationController?.popViewController(animated: true)
}
if self.senderTag == 3 {
GlobalVariable.senderTags = 3
self.delegate?.didScan(barcodeData: barcodeScan)
self.navigationController?.popViewController(animated: true)
}
if self.senderTag != 1 && self.senderTag != 2 && self.senderTag != 3 {
let indexPath = IndexPath(row: self.row, section: 0)
let cell: PartsOrderRequestTableCell = globalPartsOrderRequestTableVC?.tableView.cellForRow(at: indexPath) as! PartsOrderRequestTableCell
cell.diagnosticCodeLabel.text = barcodeScan
cell.diagnosticCodeLabel.endEditing(true)
self.navigationController?.popViewController(animated: true)
//return
}
})
let cancelAction = UIAlertAction(title: "Cancel", style: UIAlertAction.Style.cancel, handler: nil)
alertPrompt.addAction(confirmAction)
alertPrompt.addAction(cancelAction)
present(alertPrompt, animated: true, completion: nil)
}
}
extension BarcodeScanVC: AVCaptureMetadataOutputObjectsDelegate {
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
// Check if the metadataObjects array is not nil and it contains at least one object.
if metadataObjects.count == 0 {
qrCodeFrameView?.frame = CGRect.zero
//messageLabel.text = "No QR code is detected"
return
}
// Get the metadata object.
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if supportedCodeTypes.contains(metadataObj.type) {
// If the found metadata is equal to the QR code metadata (or barcode) then update the status label's text and set the bounds
let barCodeObject = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
qrCodeFrameView?.frame = barCodeObject!.bounds
if metadataObj.stringValue != nil {
launchApp(barcodeScan: metadataObj.stringValue!)
//messageLabel.text = metadataObj.stringValue
}
}
}
private func updatePreviewLayer(layer: AVCaptureConnection, orientation: AVCaptureVideoOrientation) {
layer.videoOrientation = orientation
videoPreviewLayer?.frame = self.view.bounds
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
if let connection = self.videoPreviewLayer?.connection {
let currentDevice: UIDevice = UIDevice.current
let orientation: UIDeviceOrientation = currentDevice.orientation
let previewLayerConnection : AVCaptureConnection = connection
if previewLayerConnection.isVideoOrientationSupported {
switch (orientation) {
case .portrait: updatePreviewLayer(layer: previewLayerConnection, orientation: .portrait)
break
case .landscapeRight: updatePreviewLayer(layer: previewLayerConnection, orientation: .landscapeLeft)
break
case .landscapeLeft: updatePreviewLayer(layer: previewLayerConnection, orientation: .landscapeRight)
break
case .portraitUpsideDown: updatePreviewLayer(layer: previewLayerConnection, orientation: .portraitUpsideDown)
break
default: updatePreviewLayer(layer: previewLayerConnection, orientation: .portrait)
break
}
}
}
}
}
Am I missing something obvious ?
Is there a simple answer to this without changing much of the code I already have?
Thanks!
You're on the right track. It's getting stuck at
if let device = captureDevice
captureDevice is always nil at that point. You're setting it locally in viewDidLoad but nowhere the touchesBegan method can access it.
guard let captureDevice = deviceDiscoverySession.devices.first else {
print("Failed to get the camera device")
return
}
Change your code from the guard let captureDevice for something like:
captureDevice = deviceDiscoverySession.devices.first
and when you use captureDevice, test for nil when you need to.
EDIT:
override func viewDidLoad() {
super.viewDidLoad()
// Get the back-facing camera for capturing videos
//let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .back)
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back)
captureDevice = deviceDiscoverySession.devices.first
if let captureDevice = captureDevice {
do {
// Get an instance of the AVCaptureDeviceInput class using the previous device object.
let input = try AVCaptureDeviceInput(device: captureDevice)
// Set the input device on the capture session.
captureSession.addInput(input)
// Initialize a AVCaptureMetadataOutput object and set it as the output device to the capture session.
let captureMetadataOutput = AVCaptureMetadataOutput()
captureSession.addOutput(captureMetadataOutput)
// Set delegate and use the default dispatch queue to execute the call back
captureMetadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureMetadataOutput.metadataObjectTypes = supportedCodeTypes
// captureMetadataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.qr]
} catch {
// If any error occurs, simply print it out and don't continue any more.
print(error)
return
}
}
..... Method cut short as no other changes.

Resources