func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [Any]!, from connection: AVCaptureConnection!) {
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
let readableObject = metadataObject as! AVMetadataMachineReadableCodeObject;
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: readableObject.stringValue!);
}
}
Above function works perfectly but I tried to get scanned result and write it as photo in device but it seems impossible for now.
The way of doing that is with CoreImage. When you get the readableObject.stringValue you generate the image with the filter CICode128BarcodeGenerator.
So, in your example:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [Any]!, from connection: AVCaptureConnection!) {
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
let readableObject = metadataObject as! AVMetadataMachineReadableCodeObject;
let barcodeFilter = CIFilter(name: "CICode128BarcodeGenerator")
barcodeFilter?.setValue(readableObject.stringValue!.data(using: .ascii), forKey: "inputMessage")
let image = UIImage(ciImage: (barcodeFilter?.outputImage)!)
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: readableObject.stringValue!);
}
}
Remember to import CoreImage at the beginning of the file.
Related
In swift 5 I can capture QR codes fine, but this fails to detect EAN13 barcodes. Can anyone point me in the right direction, thanks
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
if metadataObjects.count != 0 {
if let object = metadataObjects[0] as? AVMetadataMachineReadableCodeObject
{
if object.type == AVMetadataObject.ObjectType.qr {
do something
}
else if object.type == AVMetadataObject.ObjectType.ean13 {
do something else
}
}
}
Did you add .ean13 to the list of metadata object types when initialising your capture session?
For example:
let metadataOutput = AVCaptureMetadataOutput()
if captureSession.canAddOutput(metadataOutput) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr, .ean13]
}
'CMSampleBufferGetImageBuffer' has been replaced by property
'CMSampleBuffer.imageBuffer'
CMSampleBufferGet.ImageBuffer doesn't work :) It seems parameters also being changed regarding to Swift 4.2.
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
Entire function. Just in case ...
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// print("Camera was able to capture a frame:", Date())
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let model = try? VNCoreMLModel(for: ARS().model) else { return }
let request = VNCoreMLRequest(model: model) { (finishedReq, err) in
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
print(firstObservation.identifier, firstObservation.confidence)
DispatchQueue.main.async {
self.identifierLabel.text = "\(firstObservation.identifier) \(firstObservation.confidence * 100)"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
Did anyone try to solve this or has a reference for the new syntax?
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
connection.videoOrientation = AVCaptureVideoOrientation.portrait
let imageBuffer: CVPixelBuffer = sampleBuffer.imageBuffer!
let ciimage : CIImage = CIImage(cvPixelBuffer: imageBuffer)
let image : UIImage = self.convert(cmage: ciimage)
}
// Convert CIImage to CGImage
func convert(cmage:CIImage) -> UIImage {
let context:CIContext = CIContext.init(options: nil)
let cgImage:CGImage = context.createCGImage(cmage, from: cmage.extent)!
let image:UIImage = UIImage.init(cgImage: cgImage)
return image
}
i can track object across video ,but i can't track face.
when i use camera track face . the code print []
extension FaceTrackingViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let request = VNDetectFaceLandmarksRequest { [unowned self] request, error in
if let error = error {
self.presentAlertController(withTitle: self.title,
message: error.localizedDescription)
}
else {
print("\(request.results!)")
}
}
do {
try handler.perform([request], on: pixelBuffer!)
}
catch {
print(error)
}
}
}
Tried many things. But still I am not able to retrieve the stored PDF from a QR code. When I scan the code, I am getting the url output.
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputMetadataObjects metadataObjects: [Any]!, from connection: AVCaptureConnection!) {
if metadataObjects == nil || metadataObjects.count == 0 {
qrCodeFrameView?.frame = CGRect.zero
messageLabel.text = "No barcode/QR code is detected"
return
}
let metadataObj = metadataObjects[0] as! AVMetadataMachineReadableCodeObject
if supportedBarCodes.contains(metadataObj.type) {
// if metadataObj.type == AVMetadataObjectTypeQRCode {
let barCodeObject = videoPreviewLayer?.transformedMetadataObject(for: metadataObj)
qrCodeFrameView?.frame = barCodeObject!.bounds
}
}
}
My goal is to take the video buffer and ultimately convert it to NSData but I do not understand how to access the buffer properly. I have the captureOutput function but I have not been successful if converting the buffer and I'm not sure I am actually collecting anything in the buffer. This is all using swift code, I have found some examples using Objective-C but I am not able to understand the Obj-c code well enough to figure it out.
var captureDevice : AVCaptureDevice?
var videoCaptureOutput = AVCaptureVideoDataOutput()
var bounds: CGRect = UIScreen.mainScreen().bounds
let captureSession = AVCaptureSession()
var captureConnection: AVCaptureMovieFileOutput?
override func viewDidLoad() {
super.viewDidLoad()
captureSession.sessionPreset = AVCaptureSessionPreset640x480
let devices = AVCaptureDevice.devices()
for device in devices {
if (device.hasMediaType(AVMediaTypeVideo)) {
if device.position == AVCaptureDevicePosition.Back {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
beginSession()
}
}
}
}
}
func beginSession() {
var screenWidth:CGFloat = bounds.size.width
var screenHeight:CGFloat = bounds.size.height
var err : NSError? = nil
captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err)!)
if err != nil {
println("Error: \(err?.localizedDescription)")
}
videoCaptureOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey:kCVPixelFormatType_32BGRA]
videoCaptureOutput.alwaysDiscardsLateVideoFrames = true
captureSession.addOutput(videoCaptureOutput)
videoCaptureOutput.setSampleBufferDelegate(self, queue: dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL))
if captureSession.canAddOutput(self.videoCaptureOutput) {
captureSession.addOutput(self.videoCaptureOutput)
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!) {
// I think this is where I can get the buffer info.
}
In the AVCaptureVideoDataOutputSampleBufferDelegate method, captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!), you can get the buffer info
let formatDescription: CMFormatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)
let imageBuffer: CVImageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
CVPixelBufferLockBaseAddress(imageBuffer, 0)
var imagePointer: UnsafeMutablePointer<Void> = CVPixelBufferGetBaseAddress(imageBuffer)
let bufferSize: (width: Int, height: Int) = (CVPixelBufferGetHeight(imageBuffer), CVPixelBufferGetWidth(imageBuffer))
println("Buffer Size: \(bufferSize.width):\(bufferSize.height)")