I want to "stream" the preview layer to my server, however, I only want specific frames to be sent. Basically, I want to take a snapshot of the AVCaptureVideoPreviewLayer, scale it down to 28*28, turn it into an intensity array, and send it to my socket layer where my python backend handles the rest.
Problem here is that AVCapturePhotoOutput's capture function is insanely slow. I can't repeatedly call the function. Not to mention it always makes a camera shutter sound haha.
The other problem is that taking a snapshot of AVCaptureVideoPreviewLayer is really difficult. Using UIGraphicsBeginImageContext almost always returns a blank/clear image.
Help a brother out, thanks!
Basically instead of using AVCaptureVideoPreviewLayer for grabbing frames you should use AVCaptureVideoDataOutputSampleBufferDelegate.
Here is example:
import Foundation
import UIKit
import AVFoundation
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let input = try! AVCaptureDeviceInput(device: device)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Update: To process images you should implement a processCapturedImage method of the CaptureManagerDelegate protocol in any other class where you want, like:
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
#ninjaproger's answer was great! Simply writing this as a Swift 4 version of the answer for future reference.
import UIKit
import AVFoundation
var customPreviewLayer: AVCaptureVideoPreviewLayer?
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let input = try! AVCaptureDeviceInput(device: device!)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Details
Xcode 10.2.1 (10E1001), Swift 5
Features
This solution allow:
to check camera access
to select front or back camera
if no access to the camera show alert with link to the app settings page
to make o photo
to play standard capture photo sound
Solution
CameraService
import UIKit
import AVFoundation
import Vision
class CameraService: NSObject {
private weak var previewView: UIView?
private(set) var cameraIsReadyToUse = false
private let session = AVCaptureSession()
private weak var previewLayer: AVCaptureVideoPreviewLayer?
private lazy var sequenceHandler = VNSequenceRequestHandler()
private lazy var capturePhotoOutput = AVCapturePhotoOutput()
private lazy var dataOutputQueue = DispatchQueue(label: "FaceDetectionService",
qos: .userInitiated, attributes: [],
autoreleaseFrequency: .workItem)
private var captureCompletionBlock: ((UIImage) -> Void)?
private var preparingCompletionHandler: ((Bool) -> Void)?
private var snapshotImageOrientation = UIImage.Orientation.upMirrored
private var cameraPosition = AVCaptureDevice.Position.front {
didSet {
switch cameraPosition {
case .front: snapshotImageOrientation = .upMirrored
case .unspecified, .back: fallthrough
#unknown default: snapshotImageOrientation = .up
}
}
}
func prepare(previewView: UIView,
cameraPosition: AVCaptureDevice.Position,
completion: ((Bool) -> Void)?) {
self.previewView = previewView
self.preparingCompletionHandler = completion
self.cameraPosition = cameraPosition
checkCameraAccess { allowed in
if allowed { self.setup() }
completion?(allowed)
self.preparingCompletionHandler = nil
}
}
private func setup() { configureCaptureSession() }
func start() { if cameraIsReadyToUse { session.startRunning() } }
func stop() { session.stopRunning() }
}
extension CameraService {
private func askUserForCameraPermission(_ completion: ((Bool) -> Void)?) {
AVCaptureDevice.requestAccess(for: AVMediaType.video) { (allowedAccess) -> Void in
DispatchQueue.main.async { completion?(allowedAccess) }
}
}
private func checkCameraAccess(completion: ((Bool) -> Void)?) {
askUserForCameraPermission { [weak self] allowed in
guard let self = self, let completion = completion else { return }
self.cameraIsReadyToUse = allowed
if allowed {
completion(true)
} else {
self.showDisabledCameraAlert(completion: completion)
}
}
}
private func configureCaptureSession() {
guard let previewView = previewView else { return }
// Define the capture device we want to use
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: cameraPosition) else {
let error = NSError(domain: "", code: 0, userInfo: [NSLocalizedDescriptionKey : "No front camera available"])
show(error: error)
return
}
// Connect the camera to the capture session input
do {
try camera.lockForConfiguration()
defer { camera.unlockForConfiguration() }
if camera.isFocusModeSupported(.continuousAutoFocus) {
camera.focusMode = .continuousAutoFocus
}
if camera.isExposureModeSupported(.continuousAutoExposure) {
camera.exposureMode = .continuousAutoExposure
}
let cameraInput = try AVCaptureDeviceInput(device: camera)
session.addInput(cameraInput)
} catch {
show(error: error as NSError)
return
}
// Create the video data output
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
// Add the video output to the capture session
session.addOutput(videoOutput)
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
// Configure the preview layer
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.frame = previewView.bounds
previewView.layer.insertSublayer(previewLayer, at: 0)
self.previewLayer = previewLayer
}
}
extension CameraService: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard captureCompletionBlock != nil,
let outputImage = UIImage(sampleBuffer: sampleBuffer, orientation: snapshotImageOrientation) else { return }
DispatchQueue.main.async { [weak self] in
guard let self = self else { return }
if let captureCompletionBlock = self.captureCompletionBlock{
captureCompletionBlock(outputImage)
AudioServicesPlayAlertSound(SystemSoundID(1108))
}
self.captureCompletionBlock = nil
}
}
}
// Navigation
extension CameraService {
private func show(alert: UIAlertController) {
DispatchQueue.main.async {
UIApplication.topViewController?.present(alert, animated: true, completion: nil)
}
}
private func showDisabledCameraAlert(completion: ((Bool) -> Void)?) {
let alertVC = UIAlertController(title: "Enable Camera Access",
message: "Please provide access to your camera",
preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Go to Settings", style: .default, handler: { action in
guard let previewView = self.previewView,
let settingsUrl = URL(string: UIApplication.openSettingsURLString),
UIApplication.shared.canOpenURL(settingsUrl) else { return }
UIApplication.shared.open(settingsUrl) { [weak self] _ in
guard let self = self else { return }
self.prepare(previewView: previewView,
cameraPosition: self.cameraPosition,
completion: self.preparingCompletionHandler)
}
}))
alertVC.addAction(UIAlertAction(title: "Cancel", style: .cancel, handler: { _ in completion?(false) }))
show(alert: alertVC)
}
private func show(error: NSError) {
let alertVC = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Ok", style: .cancel, handler: nil ))
show(alert: alertVC)
}
}
extension CameraService: AVCapturePhotoCaptureDelegate {
func capturePhoto(completion: ((UIImage) -> Void)?) { captureCompletionBlock = completion }
}
Helpers
///////////////////////////////////////////////////////////////////////////
import UIKit
import AVFoundation
extension UIImage {
convenience init?(sampleBuffer: CMSampleBuffer, orientation: UIImage.Orientation = .upMirrored) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return nil }
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
defer { CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly) }
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height,
bitsPerComponent: 8, bytesPerRow: bytesPerRow,
space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else { return nil }
guard let cgImage = context.makeImage() else { return nil }
self.init(cgImage: cgImage, scale: 1, orientation: orientation)
}
}
///////////////////////////////////////////////////////////////////////////
import UIKit
extension UIApplication {
private class func topViewController(controller: UIViewController? = UIApplication.shared.keyWindow?.rootViewController) -> UIViewController? {
if let navigationController = controller as? UINavigationController {
return topViewController(controller: navigationController.visibleViewController)
}
if let tabController = controller as? UITabBarController {
if let selected = tabController.selectedViewController {
return topViewController(controller: selected)
}
}
if let presented = controller?.presentedViewController {
return topViewController(controller: presented)
}
return controller
}
class var topViewController: UIViewController? { return topViewController() }
}
Usage
private lazy var cameraService = CameraService()
//...
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
//...
cameraService.capturePhoto { [weak self] image in
//...
}
Full sample
import UIKit
class ViewController: UIViewController {
private lazy var cameraService = CameraService()
private weak var button: UIButton?
private weak var imagePreviewView: UIImageView?
private var cameraInited = false
private enum ButtonState { case cancel, makeSnapshot }
private var buttonState = ButtonState.makeSnapshot {
didSet {
switch buttonState {
case .makeSnapshot: button?.setTitle("Make a photo", for: .normal)
case .cancel: button?.setTitle("Cancel", for: .normal)
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
setupCameraPreviewView()
setupButton()
// Do any additional setup after loading the view.
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
cameraService.start()
}
override func viewDidDisappear(_ animated: Bool) {
super.viewDidDisappear(animated)
cameraService.stop()
}
// Ensure that the interface stays locked in Portrait.
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
return .portrait
}
// Ensure that the interface stays locked in Portrait.
override var preferredInterfaceOrientationForPresentation: UIInterfaceOrientation {
return .portrait
}
}
extension ViewController {
private func setupCameraPreviewView() {
let previewView = UIView(frame: .zero)
view.addSubview(previewView)
previewView.translatesAutoresizingMaskIntoConstraints = false
previewView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
previewView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
previewView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
previewView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
previewView.layoutIfNeeded()
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
}
private func setupButton() {
let button = UIButton(frame: .zero)
button.addTarget(self, action: #selector(buttonTouchedUpInside), for: .touchUpInside)
view.addSubview(button)
self.button = button
buttonState = .makeSnapshot
button.translatesAutoresizingMaskIntoConstraints = false
button.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
button.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
button.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
button.heightAnchor.constraint(equalToConstant: 44).isActive = true
button.backgroundColor = UIColor.black.withAlphaComponent(0.4)
}
private func show(image: UIImage) {
let imageView = UIImageView(frame: .zero)
view.insertSubview(imageView, at: 1)
imagePreviewView = imageView
imageView.translatesAutoresizingMaskIntoConstraints = false
imageView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
imageView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
imageView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
imageView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
imageView.image = image
}
#objc func buttonTouchedUpInside() {
switch buttonState {
case .makeSnapshot:
cameraService.capturePhoto { [weak self] image in
guard let self = self else {return }
self.cameraService.stop()
self.buttonState = .cancel
self.show(image: image)
}
case .cancel:
buttonState = .makeSnapshot
cameraService.start()
imagePreviewView?.removeFromSuperview()
}
}
}
Related
I'm using AVCaptureDevice API for scanning barcode and it works very well on iPhone, but very similar code I have doesn't work on iPad and I'm not quite sure why (not detecting any barcode at all). The main differences are the size of scan area, position and orientation. I tested using iPhone 12 mini (iOS 15 beta) and the original iPad Pro 9.7" (iOS 14.6). Not sure if that matters.
Below is the code for the scanner. Please let me know if you noticed something that should be changed.
import Foundation
import AVFoundation
import UIKit
class ScannerViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
var barcodeCallback: (String) -> Void
var cameraScanDismissedCallback: (Bool) -> Void
var scanned = false
var currentDevice: AVCaptureDevice!
var scanRectView: UIView!
init(barcodeCallback: #escaping (String) -> Void, cameraScanDismissedCallback: #escaping (Bool) -> Void) {
self.barcodeCallback = barcodeCallback;
self.cameraScanDismissedCallback = cameraScanDismissedCallback;
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
} else {
failed()
return
}
let windowSize = UIScreen.main.bounds.size
var scanSize: CGSize!;
var scanRect: CGRect!;
if(UIDevice.current.userInterfaceIdiom == .pad){
scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/7);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}else{
scanSize = CGSize(width:windowSize.width*2/3, height:windowSize.width*1/3);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
metadataOutput.rectOfInterest = scanRect
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if(UIDevice.current.userInterfaceIdiom == .pad){
let orientation: UIDeviceOrientation = UIDevice.current.orientation
previewLayer.connection?.videoOrientation = {
switch (orientation) {
case .faceUp:
return .landscapeLeft
case .portrait:
return .portrait
case .landscapeRight:
return .landscapeLeft
case .landscapeLeft:
return .landscapeRight
default:
return .portrait
}
}()
}
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
scanRectView = UIView();
view.addSubview(self.scanRectView)
scanRectView.frame = CGRect(x:0, y:0, width: scanSize.width,
height: scanSize.height);
if(UIDevice.current.userInterfaceIdiom == .pad){
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX - scanSize.width/2,
y:UIScreen.main.bounds.midY - scanSize.height/2)
}else{
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
}
scanRectView.layer.borderColor = UIColor.yellow.cgColor
scanRectView.layer.borderWidth = 5;
currentDevice = videoCaptureDevice
captureSession.startRunning()
toggleTorch(on: true)
}
func toggleTorch(on: Bool) {
guard let device = currentDevice else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if(UIDevice.current.userInterfaceIdiom == .pad){
device.videoZoomFactor = 1.3
}else{
device.videoZoomFactor = 1.5
}
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func failed() {
let ac = UIAlertController(title: "Scanning not supported", message: "Your device does not support scanning a code from an item. Please use a device with a camera.", preferredStyle: .alert)
ac.addAction(UIAlertAction(title: "OK", style: .default))
present(ac, animated: true)
captureSession = nil
toggleTorch(on: false)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
toggleTorch(on: true)
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
toggleTorch(on: false)
}
cameraScanDismissedCallback(scanned)
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
scanned = true
}
dismiss(animated: true)
}
func found(code: String) {
print(code)
barcodeCallback(code)
}
override var prefersStatusBarHidden: Bool {
return true
}
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
if(UIDevice.current.userInterfaceIdiom == .pad){
return .landscape
}else{
return .portrait
}
}
}
Just in case anyone ran into a similar issue, check for the following:
Orientation of the video
The position of AVCaptureMetadataOutput.rectOfInterest
I'm still having an issue with the rectOfInterest not positioned at the center, but it works. Once I can figure out how to center it, I will post the solution here.
it isn't center probably because of your navigationBar.
Try set rectOfInterest as below, remember that camer use different coordinate than UIView, posX and posY are in UIView coordinates
let aimRect = CGRect(x: (posY - navBar.height) / UIScreen.main.bounds.height,
y: posX / UIScreen.main.bounds.width,
width: rectHeight / UIScreen.main.bounds.height,
height: rectWidth / UIScreen.main.bounds.width)
I want to save image at a high frame rate.
IO is expensive, not 240fps exactly.But maybe more than 120fps.
What I'm I do is set captureDivece in viewDidLoad,
and log timestamp in captureOutput and view the rate.
But I notice that captureOutput is always 30fps.
Could you please tell me which I get wrong?
Thanks for your time and answer!
Here is my code and result:
//
// ViewController.swift
// CustomCamera
//
// Created by chunibyo on 2021/3/8.
//
import UIKit
import AVFoundation
import Vision
import VideoToolbox
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var captureButton: UIButton!
let sessionQueue = DispatchQueue(label: "Session Queue")
var status = false
private var MyCaptureDevice: AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
captureButton.layer.cornerRadius = captureButton.frame.width / 2
captureButton.layer.masksToBounds = true
captureButton.layer.zPosition = 10
guard let captureDevice = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back) else {return}
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else {return}
// 1
for vFormat in captureDevice.formats {
// 2
let ranges = vFormat.videoSupportedFrameRateRanges as [AVFrameRateRange]
let frameRates = ranges[0]
// 3
if frameRates.maxFrameRate == 240 {
// 4
try? captureDevice.lockForConfiguration()
captureDevice.activeFormat = vFormat as AVCaptureDevice.Format
captureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: Int32(240))
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: Int32(240))
captureDevice.videoZoomFactor = captureDevice.minAvailableVideoZoomFactor
captureDevice.unlockForConfiguration()
}
}
let captureSession = AVCaptureSession();
// captureSession.sessionPreset = .photo
captureSession.addInput(input)
captureSession.startRunning();
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
dataOutput.alwaysDiscardsLateVideoFrames = true;
captureSession.addOutput(dataOutput)
print(captureDevice.minAvailableVideoZoomFactor)
print(captureDevice.maxAvailableVideoZoomFactor)
MyCaptureDevice = captureDevice
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print(CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)))
// if !status { return }
// guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// guard let uiImage = UIImage(pixelBuffer: pixelBuffer) else { return }
//
// sessionQueue.async {
// guard let captureDevice = self.MyCaptureDevice else { return }
// if captureDevice.videoZoomFactor >= (captureDevice.maxAvailableVideoZoomFactor - 0.2) { return }
// UIImageWriteToSavedPhotosAlbum(uiImage, nil, nil, nil)
// try? captureDevice.lockForConfiguration()
// captureDevice.videoZoomFactor += 0.1
// captureDevice.unlockForConfiguration()
// }
}
#IBAction func captureControl(_ sender: UIButton) {
DispatchQueue.main.async {
if self.status {
self.captureButton.backgroundColor = .white
print("stop")
self.status = !self.status
}
else {
self.captureButton.backgroundColor = .red
print("recording...")
self.status = !self.status
}
}
}
}
extension UIImage {
public convenience init?(pixelBuffer: CVPixelBuffer) {
var cgImage: CGImage?
VTCreateCGImageFromCVPixelBuffer(pixelBuffer, options: nil, imageOut: &cgImage)
guard let _cgImage = cgImage else { return nil }
self.init(cgImage: _cgImage)
}
}
console log (stackoverflow not allow me to post image, sorry)
captureOutput only logs is 240fps.
captureOutput with take photos to album is about 70~100fps.
This code can get 240fps logs.
//
// ViewController.swift
// CustomCamera
//
// Created by chunibyo on 2021/3/8.
//
import UIKit
import AVFoundation
import Vision
import VideoToolbox
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var captureButton: UIButton!
let sessionQueue = DispatchQueue(label: "Session Queue")
var status = false
var zoomStatus = 1
private var MyCaptureDevice: AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
captureButton.layer.cornerRadius = captureButton.frame.width / 2
captureButton.layer.masksToBounds = true
captureButton.layer.zPosition = 10
guard let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) else {return}
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else {return}
let captureSession = AVCaptureSession();
// captureSession.sessionPreset = .photo
captureSession.addInput(input)
// 1
for vFormat in captureDevice.formats {
// 2
let ranges = vFormat.videoSupportedFrameRateRanges as [AVFrameRateRange]
let frameRates = ranges[0]
// 3
if frameRates.maxFrameRate == 240 {
// 4
try? captureDevice.lockForConfiguration()
captureDevice.activeFormat = vFormat as AVCaptureDevice.Format
captureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: Int32(240))
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: Int32(240))
captureDevice.videoZoomFactor = captureDevice.minAvailableVideoZoomFactor
captureDevice.unlockForConfiguration()
}
}
captureSession.startRunning();
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
dataOutput.alwaysDiscardsLateVideoFrames = true;
captureSession.addOutput(dataOutput)
print(captureDevice.minAvailableVideoZoomFactor)
print(captureDevice.maxAvailableVideoZoomFactor)
MyCaptureDevice = captureDevice
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
print(CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)))
// if !status { return }
// guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
// guard let uiImage = UIImage(pixelBuffer: pixelBuffer) else { return }
// UIImageWriteToSavedPhotosAlbum(uiImage, nil, nil, nil)
//
// guard let captureDevice = self.MyCaptureDevice else { return }
// if self.zoomStatus == 1 && captureDevice.videoZoomFactor >= CGFloat(Int32(captureDevice.maxAvailableVideoZoomFactor * 0.6)) { self.zoomStatus = -1
// }
// else if self.zoomStatus == -1 && captureDevice.videoZoomFactor <= (captureDevice.minAvailableVideoZoomFactor + 1.0) {
// self.zoomStatus = 1
// }
// UIImageWriteToSavedPhotosAlbum(uiImage, nil, nil, nil)
// try? captureDevice.lockForConfiguration()
// captureDevice.videoZoomFactor += (0.1 * CGFloat(self.zoomStatus))
// captureDevice.unlockForConfiguration()
}
#IBAction func captureControl(_ sender: UIButton) {
DispatchQueue.main.async {
if self.status {
self.captureButton.backgroundColor = .white
print("stop")
self.status = !self.status
}
else {
self.captureButton.backgroundColor = .red
print("recording...")
self.status = !self.status
}
}
}
}
extension UIImage {
public convenience init?(pixelBuffer: CVPixelBuffer) {
var cgImage: CGImage?
VTCreateCGImageFromCVPixelBuffer(pixelBuffer, options: nil, imageOut: &cgImage)
guard let _cgImage = cgImage else { return nil }
self.init(cgImage: _cgImage)
}
}
I have a draggable UIView and I am trying to make it snap to four corners of the screen. I tried a few things, but none of them have worked. Here's the code that I have:
import UIKit
import AVKit
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet weak var crystalName: UILabel!
#IBOutlet weak var crystalInfoContainer: UIView!
#IBOutlet weak var accuracy: UILabel!
var model = IdenticrystClassification().model
override func viewDidLoad() {
super.viewDidLoad()
// This method starts the camera.
let captureSession = AVCaptureSession()
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
// This method defines sub view and defines it's properties.
view.addSubview(crystalInfoContainer)
crystalInfoContainer.clipsToBounds = true
crystalInfoContainer.layer.cornerRadius = 10.0
//crystalInfoContainer.layer.maskedCorners = [.layerMinXMinYCorner, .layerMaxXMinYCorner]
// This method defines torch functionality.
func toggleTorch(on: Bool) {
guard let device = AVCaptureDevice.default(for: .video) else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
// This is the code that I am trying to work out.
func relativeVelocity(forVelocity velocity: CGFloat, from currentValue: CGFloat, to targetValue: CGFloat) -> CGFloat {
guard currentValue - targetValue != 0 else { return 0 }
return velocity / (targetValue - currentValue)
}
func nearestCorner(to point: CGPoint) -> CGPoint {
var minDistance = CGFloat.greatestFiniteMagnitude
var closestPosition = CGPoint.zero
for position in crystalInfoContainer { **Error1**
let distance = point.distance(to: position)
if distance < minDistance {
closestPosition = position
minDistance = distance
}
}
return closestPosition
let decelerationRate = UIScrollView.DecelerationRate.normal.rawValue
let velocity = UIPanGestureRecognizer.velocity(in: view)**Error2**
let projectedPosition = CGPoint(
x: crystalInfoContainer.center.x + project(initialVelocity: velocity.x, decelerationRate: decelerationRate),
y: crystalInfoContainer.center.y + project(initialVelocity: velocity.y, decelerationRate: decelerationRate)
)
let nearestCornerPosition = nearestCorner(to: projectedPosition)
let relativeInitialVelocity = CGVector(
dx: relativeVelocity(forVelocity: velocity.x, from: crystalInfoContainer.center.x, to: nearestCornerPosition.x),
dy: relativeVelocity(forVelocity: velocity.y, from: crystalInfoContainer.center.y, to: nearestCornerPosition.y)
)
let params = UISpringTimingParameters(damping: 1, response: 0.4, initialVelocity: relativeInitialVelocity)
let animator = UIViewPropertyAnimator(duration: 0, timingParameters: params)
animator.addAnimations {
self.crystalInfoContainer.center = nearestCornerPosition
}
animator.startAnimation()
}
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "video"))
captureSession.addOutput(dataOutput)
toggleTorch(on: true)
}
// Handles Visiout output.
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }
guard let model = try? VNCoreMLModel(for: model) else { return }
let request = VNCoreMLRequest(model: model)
{ (finishedReq, err) in
guard let results = finishedReq.results as? [VNClassificationObservation] else { return }
guard let firstObservation = results.first else { return }
let name: String = firstObservation.identifier
let acc: Int = Int(firstObservation.confidence * 100)
DispatchQueue.main.async {
self.crystalName.text = name
self.accuracy.text = "Confidence: \(acc)%"
}
}
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}
override var prefersStatusBarHidden: Bool {
return true
}
}
Error1: For-in loop requires 'UIView?' to conform to 'Sequence'; did you mean to unwrap optional?
Error2: Instance member 'velocity' cannot be used on type 'UIPanGestureRecognizer'; did you mean to use a value of this type instead?
The problem is that your panView method is wrong. You need to switch on the gesture recognizer’s state — began, changed, or ended. Pan only when the gesture changes. When the gesture ends, then and only then, animate the view into the nearest corner.
It throws Unexpectedly found nil while implicitly unwrapping an Optional value: file when I start recording a video. Camera and Microphone permissions are enabled and included in Info.plist.
/// UIViewController which displays create screen, camera preview.
class CreateViewController: UIViewController {
/// Delegate which will receive calls on create view controller camera changes.
var delegate: CreateViewControllerDelegate?
private weak var flashlightCameraButton: UIImageView!
private weak var recordingCameraButton: UIImageView!
private weak var switchCamerasCameraButton: UIImageView!
var viewModel: CreateViewModel?
// MARK: - Camera
private var captureSession: AVCaptureSession!
private weak var previewLayer: CALayer!
private weak var captureCamera: AVCaptureDevice!
private weak var audioDevice: AVCaptureDevice!
private weak var videoOutput: AVCaptureMovieFileOutput!
private var isFlashlightOn = false
private var isRecording = false
private var isFrontCamera = false
/// The URL of the challenge video
var challengeVideoURL: URL?
override func viewDidLoad() {
super.viewDidLoad()
setUp()
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if isRecording {
videoOutput.stopRecording()
}
}
private func setUp() {
setUpMainView()
setUpCamera()
setUpCameraButtons()
}
private func setUpMainView() {
let appWindow = UIApplication.shared.keyWindow
guard let window = appWindow else { return }
view = UIView(frame: CGRect(x: 0, y: 0, width: window.frame.width, height: window.frame.height))
}
private func setUpCamera() {
let captureSession = AVCaptureSession()
self.captureSession = captureSession
captureSession.sessionPreset = .iFrame1280x720
setUpCameraSide(front: isFrontCamera)
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer = previewLayer
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.layer.frame
let videoOutput = AVCaptureMovieFileOutput()
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
}
private func setUpCameraSide(front: Bool) {
captureSession.beginConfiguration()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
guard let availableDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: front ? .front : .back) else { return }
// Prepare visual
self.captureCamera = availableDevice
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureCamera)
captureSession.addInput(captureDeviceInput)
} catch let error {
// - TODO: display the error
}
// Prepare audio
guard let audioDevice = AVCaptureDevice.default(for: .audio) else { return }
self.audioDevice = audioDevice
do {
let audioDeviceInput = try AVCaptureDeviceInput(device: audioDevice)
captureSession.addInput(audioDeviceInput)
} catch let error {
// - TODO: display the error
}
captureSession.commitConfiguration()
}
private func setUpCameraButtons() {
let flashlightCameraButton = UIImageView()
self.flashlightCameraButton = flashlightCameraButton
view.addSubview(flashlightCameraButton)
flashlightCameraButton.image = UIImage(named: "camera_flashlight")
let flashlightTapGestureRecognizer = UITapGestureRecognizer(target: self, action: #selector(turnOnFlashlight))
flashlightCameraButton.isUserInteractionEnabled = true
flashlightCameraButton.addGestureRecognizer(flashlightTapGestureRecognizer)
flashlightCameraButton.snp.makeConstraints { make in
make.left.equalToSuperview().inset(25)
make.bottom.equalToSuperview().inset(25)
make.height.width.equalTo(50)
}
let recordingCameraButton = UIImageView()
self.recordingCameraButton = recordingCameraButton
view.addSubview(recordingCameraButton)
recordingCameraButton.image = UIImage(named: "camera_record")
let recordingTapGestureRecognizer = UITapGestureRecognizer(target: self, action: #selector(recording))
recordingCameraButton.isUserInteractionEnabled = true
recordingCameraButton.addGestureRecognizer(recordingTapGestureRecognizer)
recordingCameraButton.snp.makeConstraints { make in
make.centerX.equalToSuperview()
make.bottom.equalToSuperview().inset(25)
make.height.width.equalTo(70)
}
let switchCamerasCameraButton = UIImageView()
self.switchCamerasCameraButton = switchCamerasCameraButton
view.addSubview(switchCamerasCameraButton)
switchCamerasCameraButton.image = UIImage(named: "camera_switch_camera")
let switchCameraTapGestureRecognizer = UITapGestureRecognizer(target: self, action: #selector(switchCamera))
switchCamerasCameraButton.isUserInteractionEnabled = true
switchCamerasCameraButton.addGestureRecognizer(switchCameraTapGestureRecognizer)
switchCamerasCameraButton.snp.makeConstraints { make in
make.right.equalToSuperview().inset(25)
make.bottom.equalToSuperview().inset(25)
make.height.width.equalTo(50)
}
}
#objc private func turnOnFlashlight() {
isFlashlightOn = !isFlashlightOn
if !isRecording {
if !isFrontCamera, captureCamera.hasTorch {
do {
try captureCamera.lockForConfiguration()
captureCamera.torchMode = captureCamera.isTorchActive ? .on : .off
captureCamera.unlockForConfiguration()
} catch let error {
// - TODO: handle error here
}
}
}
}
#objc private func recording() {
if !isRecording {
delegate?.didStartRecording(self)
NotificationCenter.default.post(name: .recordingStartedNotification, object: nil)
if !captureSession.isRunning {
return
}
let paths = FileManager.default.urls(for: .moviesDirectory, in: .userDomainMask)
let fileURL = paths[0].appendingPathComponent("challenge.mov")
try? FileManager.default.removeItem(at: fileURL)
challengeVideoURL = fileURL
videoOutput.startRecording(to: fileURL, recordingDelegate: self)
} else {
videoOutput.stopRecording()
}
isRecording = !isRecording
}
#objc private func switchCamera() {
isFrontCamera = !isFrontCamera
setUpCameraSide(front: isFrontCamera)
}
private func setUpTimer() {
}
}
// MARK: - Challenge is recorded
extension CreateViewController: AVCaptureFileOutputRecordingDelegate {
func fileOutput(_ output: AVCaptureFileOutput, didFinishRecordingTo outputFileURL: URL, from connections: [AVCaptureConnection], error: Error?) {
guard error == nil else { return }
viewModel?.challengeVideoUrl = outputFileURL
UISaveVideoAtPathToSavedPhotosAlbum(outputFileURL.path, nil, nil, nil)
delegate?.didStopRecording(self)
NotificationCenter.default.post(name: .recordingStoppedNotification, object: nil)
// - TODO: after receiving this call moves user to challenge preview
}
}
The fatal error appears at this line: videoOutput.startRecording(to: fileURL, recordingDelegate: self). What could be the cause of the issue? Is it storage permission that the access to photo library is denied?
Every time you use ! in Swift a kitten dies :-). You have expected behaviour for your code.
You declare:
private weak var videoOutput: AVCaptureMovieFileOutput!
Not sure why you are using weak here, because this class owns the object and should keep a strong reference.
Then you instantiate a local version of videoOutput in setUpCamera():
let videoOutput = AVCaptureMovieFileOutput()
But never assign that to the instance property videoOutput. Hence videoOutput is nil and you crash at"
videoOutput.startRecording(to: …)
Either assign the property in setUpCamera() after adding to the capture session.
self.videoOutput = videoOutput
Or better just create the instance in the class declaration.
private var videoOutput: AVCaptureMovieFileOutput = AVCaptureMovieFileOutput()
I have created a custom camera and have implemented below code to crop the taken image, I have shown guides in the preview layer so I want to crop the image which appears in that area.
func imageByCropToRect(rect:CGRect, scale:Bool) -> UIImage {
var rect = rect
var scaleFactor: CGFloat = 1.0
if scale {
scaleFactor = self.scale
rect.origin.x *= scaleFactor
rect.origin.y *= scaleFactor
rect.size.width *= scaleFactor
rect.size.height *= scaleFactor
}
var image: UIImage? = nil;
if rect.size.width > 0 && rect.size.height > 0 {
let imageRef = self.cgImage!.cropping(to: rect)
image = UIImage(cgImage: imageRef!, scale: scaleFactor, orientation: self.imageOrientation)
}
return image!
}
This code just works fine when & give the exact cropped image when the below line of code is commented, though I want the image streaming to be full screen so I have to use the below line of code. The image comes zoomed out sort of.
(self.previewLayer as! AVCaptureVideoPreviewLayer).videoGravity = AVLayerVideoGravity.resizeAspectFill
How do I solve this issue? Is the cropping code wrong?
Here is the full Class code
import UIKit
import AVFoundation
class CameraViewController: UIViewController {
#IBOutlet weak var guideImageView: UIImageView!
#IBOutlet weak var guidesView: UIView!
#IBOutlet weak var cameraPreviewView: UIView!
#IBOutlet weak var cameraButtonView: UIView!
#IBOutlet weak var captureButton: UIButton!
var captureSession = AVCaptureSession()
var previewLayer: CALayer!
var captureDevice: AVCaptureDevice!
/// This will be true when the user clicks on the click photo button.
var takePhoto = false
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
previewLayer = CALayer()
takePhoto = false
requestAuthorization()
}
private func userinteractionToButton(_ interaction: Bool) {
captureButton.isEnabled = interaction
}
/// This function will request authorization, If authorized then start the camera.
private func requestAuthorization() {
switch AVCaptureDevice.authorizationStatus(for: AVMediaType.video) {
case .authorized:
prepareCamera()
case .denied, .restricted, .notDetermined:
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (granted) in
if !Thread.isMainThread {
DispatchQueue.main.async {
if granted {
self.prepareCamera()
} else {
let alert = UIAlertController(title: "unable_to_access_the_Camera", message: "to_enable_access_go_to_setting_privacy_camera_and_turn_on_camera_access_for_this_app", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "ok", style: .default, handler: {_ in
self.navigationController?.popToRootViewController(animated: true)
}))
self.present(alert, animated: true, completion: nil)
}
}
} else {
if granted {
self.prepareCamera()
} else {
let alert = UIAlertController(title: "unable_to_access_the_Camera", message: "to_enable_access_go_to_setting_privacy_camera_and_turn_on_camera_access_for_this_app", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "ok", style: .default, handler: {_ in
self.navigationController?.popToRootViewController(animated: true)
}))
self.present(alert, animated: true, completion: nil)
}
}
})
}
}
/// Will see if the primary camera is avilable, If found will call method which will asign the available device to the AVCaptureDevice.
private func prepareCamera() {
// Resets the session.
self.captureSession.sessionPreset = AVCaptureSession.Preset.photo
if #available(iOS 10.0, *) {
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices
self.assignCamera(availableDevices)
} else {
// Fallback on earlier versions
// development, need to test this on iOS 8
if let availableDevices = AVCaptureDevice.default(for: AVMediaType.video) {
self.assignCamera([availableDevices])
} else {
self.showAlert()
}
}
}
/// Assigns AVCaptureDevice to the respected the variable, will begin the session.
///
/// - Parameter availableDevices: [AVCaptureDevice]
private func assignCamera(_ availableDevices: [AVCaptureDevice]) {
if availableDevices.first != nil {
captureDevice = availableDevices.first
beginSession()
} else {
self.showAlert()
}
}
/// Configures the camera settings and begins the session, this function will be responsible for showing the image on the UI.
private func beginSession() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer = previewLayer
self.cameraPreviewView.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
self.previewLayer.frame.origin.y = +self.cameraPreviewView.frame.origin.y
(self.previewLayer as! AVCaptureVideoPreviewLayer).videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer.masksToBounds = true
self.cameraPreviewView.clipsToBounds = true
captureSession.startRunning()
self.view.bringSubview(toFront: self.cameraPreviewView)
self.view.bringSubview(toFront: self.cameraButtonView)
self.view.bringSubview(toFront: self.guidesView)
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [((kCVPixelBufferPixelFormatTypeKey as NSString) as String):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.letsappit.camera")
dataOutput.setSampleBufferDelegate(self, queue: queue)
self.userinteractionToButton(true)
}
/// Get the UIImage from the given CMSampleBuffer.
///
/// - Parameter buffer: CMSampleBuffer
/// - Returns: UIImage?
func getImageFromSampleBuffer(buffer:CMSampleBuffer, orientation: UIImageOrientation) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: orientation)
}
}
return nil
}
/// This function will destroy the capture session.
func stopCaptureSession() {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
func showAlert() {
let alert = UIAlertController(title: "Unable to access the camera", message: "It appears that either your device doesn't have camera or its broken", preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "cancel", style: .cancel, handler: {_ in
self.navigationController?.dismiss(animated: true, completion: nil)
}))
self.present(alert, animated: true, completion: nil)
}
#IBAction func didTapClick(_ sender: Any) {
userinteractionToButton(false)
takePhoto = true
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showImage" {
let vc = segue.destination as! ShowImageViewController
vc.image = sender as! UIImage
}
}
}
extension CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
}
if takePhoto {
takePhoto = false
// Rotation should be unlocked to work.
var orientation = UIImageOrientation.up
switch UIDevice.current.orientation {
case .landscapeLeft:
orientation = .left
case .landscapeRight:
orientation = .right
case .portraitUpsideDown:
orientation = .down
default:
orientation = .up
}
if let image = self.getImageFromSampleBuffer(buffer: sampleBuffer, orientation: orientation) {
DispatchQueue.main.async {
let newImage = image.imageByCropToRect(rect: self.guideImageView.frame, scale: true)
self.stopCaptureSession()
self.previewLayer.removeFromSuperlayer()
self.performSegue(withIdentifier: "showImage", sender: newImage)
}
}
}
}
}
Here is the view hierarchy image
It's not clear where the problem is. I would either use the debugger or some print statements to figure out whether the issue is with the image or the view displaying the image. Print out size of the cropped image to make sure it is correct.
Then, print out the image view size in the ShowImageViewController in viewDidAppear to make sure it is correct.
For the correction of zooming out of cropped image, you have to change your crop function to this by using image orientation.
func croppedInRect(rect: CGRect) -> UIImage? {
func rad(_ degree: Double) -> CGFloat {
return CGFloat(degree / 180.0 * .pi)
}
var rectTransform: CGAffineTransform
switch imageOrientation {
case .left:
rectTransform = CGAffineTransform(rotationAngle: rad(90)).translatedBy(x: 0, y: -self.size.height)
case .right:
rectTransform = CGAffineTransform(rotationAngle: rad(-90)).translatedBy(x: -self.size.width, y: 0)
case .down:
rectTransform = CGAffineTransform(rotationAngle: rad(-180)).translatedBy(x: -self.size.width, y: -self.size.height)
default:
rectTransform = .identity
}
rectTransform = rectTransform.scaledBy(x: self.scale, y: self.scale)
var cgImage = self.cgImage
if cgImage == nil{
let ciContext = CIContext()
if let ciImage = self.ciImage{
cgImage = ciContext.createCGImage(ciImage, from: ciImage.extent)
}
}
if let imageRef = cgImage?.cropping(to: rect.applying(rectTransform)){
let result = UIImage(cgImage: imageRef, scale: self.scale, orientation: self.imageOrientation)
return result
}
return nil
}