Setting up a CoreML Model in Swift - Image Classifier - ios

I have trained a model to differentiate between malignant and benign skin lesions to potentially detect if a patient has skin cancer, and have converted my keras model to coreML. Now I am trying to apply my model to an ios app using swift (through Xcode) which I have no experience in at all (still learning through trial and error).
Currently I am trying to get the model working through a simple app that just takes an image from the phone's camera to get a predicted label as output, but I am quite stuck in getting the camera to actually work to do just that.
import UIKit
import CoreML
import Vision
import Social
#UIApplicationMain
class ViewControl: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate, UIApplicationDelegate {
#IBOutlet weak var imageView: UIImageView!
var classificationResults : [VNClassificationObservation] = []
let imagePicker = UIImagePickerController()
override func viewDidLoad() {
super.viewDidLoad()
imagePicker.delegate = self
}
func detect(image: CIImage) {
// Load the ML model through its generated class
guard let model = try? VNCoreMLModel(for: weights_skin_cancer().model) else {
fatalError("can't load ML model")
}
let request = VNCoreMLRequest(model: model) { request, error in
guard let results = request.results as? [VNClassificationObservation],
let topResult = results.first
else {
fatalError("unexpected result type from VNCoreMLRequest")
}
if topResult.identifier.contains("malignant") {
DispatchQueue.main.async {
self.navigationItem.title = "mal!"
self.navigationController?.navigationBar.barTintColor = UIColor.green
self.navigationController?.navigationBar.isTranslucent = false
}
}
else {
DispatchQueue.main.async {
self.navigationItem.title = "benign!"
self.navigationController?.navigationBar.barTintColor = UIColor.red
self.navigationController?.navigationBar.isTranslucent = false
}
}
}
let handler = VNImageRequestHandler(ciImage: image)
do { try handler.perform([request]) }
catch { print(error) }
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
if let image = info[UIImagePickerController.InfoKey.originalImage] as? UIImage {
imageView.image = image
imagePicker.dismiss(animated: true, completion: nil)
guard let ciImage = CIImage(image: image) else {
fatalError("couldn't convert uiimage to CIImage")
}
detect(image: ciImage)
}
}
#IBAction func cameraTapped(_ sender: Any) {
imagePicker.sourceType = .camera
imagePicker.allowsEditing = false
present(imagePicker, animated: true, completion: nil)
}
}
Here's also the code used to convert my model to coreML for reference:
import coremltools
output_labels = ['benign', 'malignant']
scale = 1/255.
coreml_model = coremltools.converters.keras.convert('/Users/Grampun/Desktop/ISIC-Archive-Downloader-master/trained_models/lr_0.00006-400_DS-20_epochs/weights.best.from_scratch.6.hdf5',
input_names='image',
image_input_names='image',
output_names='output',
class_labels=output_labels,
image_scale=scale)
coreml_model.author = 'Jack Bugeja'
coreml_model.short_description = 'Model used to identify between benign and malignant skin lesions'
coreml_model.input_description['image'] = 'Dermascopic image of skin lesion to evaluate'
coreml_model.input_description['output'] = 'Malignant/Benign'
coreml_model.save(
'/Users/Grampun/Desktop/ISIC-Archive-Downloader-master/trained_models/model_for_ios/lr_0.00006-400_DS-20_epochs/weights_skin_cancer.mlmodel')
Any help in general would be highly appreciate.
Thanks!

Open the camera:
#IBAction func cameraTapped(_ sender: Any) {
let controller = UIImagePickerController()
controller.sourceType = .camera
controller.mediaTypes = ["public.image"]
controller.allowsEditing = false
controller.delegate = self
present(controller, animated: true)
}
Add the YourModel.mlmodel to your project.
In didFinishPickingMediaWithInfo add this code:
if let imageURL = info[.imageURL] as? URL {
if let image = UIImage(contentsOfFile: imageURL.absoluteString) {
self.getPrediction(image)
}
}
Add this to get prediction:
func getPrediction(_ image: UIImage) {
let model = YourModel()
guard let pixelBuffer = buffer(from: image) else { return }
guard let prediction = try? model.prediction(image: pixelBuffer) else { return }
print(prediction.classLabel) // Most likely image category as string value
}
Use this helper function to make from your UIImage a CVPixelBuffer that you need to use it in getPrediction()
func buffer(from image: UIImage) -> CVPixelBuffer? {
let attrs = [kCVPixelBufferCGImageCompatibilityKey: kCFBooleanTrue, kCVPixelBufferCGBitmapContextCompatibilityKey: kCFBooleanTrue] as CFDictionary
var pixelBuffer : CVPixelBuffer?
let status = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, attrs, &pixelBuffer)
guard (status == kCVReturnSuccess) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
let pixelData = CVPixelBufferGetBaseAddress(pixelBuffer!)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(data: pixelData, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: 8, bytesPerRow: CVPixelBufferGetBytesPerRow(pixelBuffer!), space: rgbColorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipFirst.rawValue)
context?.translateBy(x: 0, y: image.size.height)
context?.scaleBy(x: 1.0, y: -1.0)
UIGraphicsPushContext(context!)
image.draw(in: CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
UIGraphicsPopContext()
CVPixelBufferUnlockBaseAddress(pixelBuffer!, CVPixelBufferLockFlags(rawValue: 0))
return pixelBuffer
}

Related

Delete white background from UIImage in swift 5

I am trying to save 2 copies of a photo that are taken from the camera, one is the photo itself and the other one its the photo + emojis hiding faces of people that appears there.
Right now I only get the original photo + a image with white background and the emoji faces, but not with the photo behind.
this is the code I use to put the emojis over the faces:
private func detectFace(in image: CVPixelBuffer) {
let faceDetectionRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
if let results = request.results as? [VNFaceObservation] {
self.handleFaceDetectionResults(results)
} else {
self.clearDrawings()
}
}
})
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: image, orientation: .leftMirrored, options: [:])
try? imageRequestHandler.perform([faceDetectionRequest])
}
private func handleFaceDetectionResults(_ observedFaces: [VNFaceObservation]) {
self.clearDrawings()
let facesBoundingBoxes: [CAShapeLayer] = observedFaces.flatMap({ (observedFace: VNFaceObservation) -> [CAShapeLayer] in
let faceBoundingBoxOnScreen = self.previewLayer.layerRectConverted(fromMetadataOutputRect: observedFace.boundingBox)
let image = UIImage(named: "happy_emoji.png")
let imageView = UIImageView(image: image!)
imageView.frame = faceBoundingBoxOnScreen
showCamera.addSubview(imageView)
let newDrawings = [CAShapeLayer]()
return newDrawings
})
self.drawings = facesBoundingBoxes
}
private func clearDrawings() {
showCamera.subviews.forEach({ $0.removeFromSuperview() })
}
and this is the code i use to save the images:
#IBAction func onPhotoTaken(_ sender: Any) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
self.photoOutput.capturePhoto(with: settings, delegate: self)
UIGraphicsBeginImageContextWithOptions(showCamera.frame.size, false, 0.0)
if let context = UIGraphicsGetCurrentContext() {
showCamera.layer.render(in: context)
}
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
let topImage = outputImage
let bottomImage = imageReciber
let size = CGSize(width: topImage!.size.width, height: topImage!.size.height + bottomImage.size.height)
UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
topImage!.draw(in: CGRect(x: 0, y: 0, width: size.width, height: topImage!.size.height))
bottomImage.draw(in: CGRect(x: 0, y: topImage!.size.height, width: size.width, height: bottomImage.size.height))
let newImage:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
UIImageWriteToSavedPhotosAlbum(newImage, nil, nil, nil)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
showCamera.image = image
imageReciber = image!
UIImageWriteToSavedPhotosAlbum(showCamera.image!, nil, nil, nil)
}
I tried different solutions to delete the white background (or black, depends if i put false or true on the "render" part. but i always get the emoji image with white background.
Please help me to get the emoji image with no white/black background and over the photo taken.
My full code here is:
import UIKit
import AVFoundation
import Vision
class cameraViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate, AVCapturePhotoCaptureDelegate {
private let captureSession = AVCaptureSession()
private lazy var previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private let videoDataOutput = AVCaptureVideoDataOutput()
private var drawings: [CAShapeLayer] = []
private let photoOutput = AVCapturePhotoOutput()
var imageReciber = UIImage()
#IBOutlet weak var showCamera: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
self.addCameraInput()
self.showCameraFeed()
self.getCameraFrames()
self.captureSession.startRunning()
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.previewLayer.frame = self.showCamera.frame
}
func captureOutput(
_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection) {
guard let frame = CMSampleBufferGetImageBuffer(sampleBuffer) else {
debugPrint("unable to get image from sample buffer")
return
}
self.detectFace(in: frame)
}
private func addCameraInput() {
guard let device = AVCaptureDevice.DiscoverySession(
deviceTypes: [.builtInWideAngleCamera, .builtInDualCamera, .builtInTrueDepthCamera],
mediaType: .video,
position: .back).devices.first else {
fatalError("No back camera device found, please make sure to run SimpleLaneDetection in an iOS device and not a simulator")
}
let cameraInput = try! AVCaptureDeviceInput(device: device)
self.captureSession.addInput(cameraInput)
captureSession.addOutput(photoOutput)
}
private func showCameraFeed() {
self.previewLayer.videoGravity = .resizeAspectFill
self.showCamera.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.showCamera.frame
}
private func getCameraFrames() {
self.videoDataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
self.videoDataOutput.alwaysDiscardsLateVideoFrames = true
self.videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue"))
self.captureSession.addOutput(self.videoDataOutput)
guard let connection = self.videoDataOutput.connection(with: AVMediaType.video),
connection.isVideoOrientationSupported else { return }
connection.videoOrientation = .portrait
}
private func detectFace(in image: CVPixelBuffer) {
let faceDetectionRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request: VNRequest, error: Error?) in
DispatchQueue.main.async {
if let results = request.results as? [VNFaceObservation] {
self.handleFaceDetectionResults(results)
} else {
self.clearDrawings()
}
}
})
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: image, orientation: .leftMirrored, options: [:])
try? imageRequestHandler.perform([faceDetectionRequest])
}
private func handleFaceDetectionResults(_ observedFaces: [VNFaceObservation]) {
self.clearDrawings()
let facesBoundingBoxes: [CAShapeLayer] = observedFaces.flatMap({ (observedFace: VNFaceObservation) -> [CAShapeLayer] in
let faceBoundingBoxOnScreen = self.previewLayer.layerRectConverted(fromMetadataOutputRect: observedFace.boundingBox)
let image = UIImage(named: "happy_emoji.png")
let imageView = UIImageView(image: image!)
imageView.frame = faceBoundingBoxOnScreen
showCamera.addSubview(imageView)
let newDrawings = [CAShapeLayer]()
return newDrawings
})
self.drawings = facesBoundingBoxes
}
private func clearDrawings() {
showCamera.subviews.forEach({ $0.removeFromSuperview() })
}
#IBAction func onPhotoTaken(_ sender: Any) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])
self.photoOutput.capturePhoto(with: settings, delegate: self)
UIGraphicsBeginImageContextWithOptions(showCamera.frame.size, false, 0.0)
if let context = UIGraphicsGetCurrentContext() {
showCamera.layer.render(in: context)
}
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
let topImage = outputImage
let bottomImage = imageReciber
let size = CGSize(width: topImage!.size.width, height: topImage!.size.height + bottomImage.size.height)
UIGraphicsBeginImageContextWithOptions(size, false, 0.0)
topImage!.draw(in: CGRect(x: 0, y: 0, width: size.width, height: topImage!.size.height))
bottomImage.draw(in: CGRect(x: 0, y: topImage!.size.height, width: size.width, height: bottomImage.size.height))
let newImage:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
UIImageWriteToSavedPhotosAlbum(newImage, nil, nil, nil)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
//Se añade la imagen capturada desde el Buffer a imageView y se le da un borde algo redondeado para que quede bien.
showCamera.image = image
imageReciber = image!
UIImageWriteToSavedPhotosAlbum(showCamera.image!, nil, nil, nil)
}
}
Thank you in advance.
After looking more calmly into the problem I´ve discovered how to solve my problem.
The problem is that I was trying to print the image to a file before getting it from the video stream, to solve this I created a new function that executes after the image is taken and now everything works flawlessly.
func saveEmoji() {
showCamera.backgroundColor = UIColor.clear
UIGraphicsBeginImageContextWithOptions(showCamera.frame.size, true, 0.0)
if let context = UIGraphicsGetCurrentContext() {
showCamera.layer.render(in: context)
}
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
var topImage = outputImage
UIImageWriteToSavedPhotosAlbum(topImage!, nil, nil, nil)
topImage = nil
}
The function is called after the first image is saved:
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
showCamera.image = image
imageReciber = image!
UIImageWriteToSavedPhotosAlbum(showCamera.image!, nil, nil, nil)
saveEmoji()
}

AVCapturePhotoOutput iOS Camera Super Dark

I have an app setup to use the camera for a photo (on a timer basis) to detect the presence of a face. The detection process works fairly well when I feed the app a photo that I have added to assets. However, when I attempt to use the output of the camera directly or even after saving the image to a file, the resulting image is so dark that the face recognition is completely unreliable.
If I display the image as seen by the camera, it looks correct. I captured the following two images - one from the camera as seen live, the other of the same view after the image was created from AVCapturePhotoOutput. The same darkness happens if I simply display the captured image in an image view.
Note the comment: "I put the breakpoint here and took a screen shot". Then I took the second screen shot when the code completed. These were taken in HIGH light.
Here's the basic code:
class CRSFaceRecognitionViewController: UIViewController, UIImagePickerControllerDelegate {
var sentBy : String?
//timers
var faceTimer : Timer?
var frvcTimer : Timer?
//capture
var captureSession = AVCaptureSession()
var settings = AVCapturePhotoSettings()
var backCamera : AVCaptureDevice?
var frontCamera : AVCaptureDevice?
var currentCamera : AVCaptureDevice?
var photoOutput : AVCapturePhotoOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
var image : UIImage?
var outputImage : UIImage?
#IBOutlet weak var imageView: UIImageView!
//MARK: - Setup
override func viewDidLoad() {
super.viewDidLoad()
}//viewDidLoad
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(true)
}//viewWillAppear
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(true)
//check for camera
if (UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)) {
setupCaptureSession()
setupDevices()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
} else {
print("Camera not present")
}
}//viewDidAppear
//MARK: - Video
#objc func showFaceRecognitionViewController() {
//all this does is present the image in a new ViewController imageView
performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
}//showThePhotoView
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession
func setupDevices() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}//if else
}//for in
currentCamera = frontCamera
}//setupDevices
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
print("in photoOutput completion handler")
})
captureSession.addOutput(photoOutput!)
} catch {
print("Error creating AVCaptureDeviceInput:", error)
}//do catch
}//setupInputOutput
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session : captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}//setupPreviewLayer
func startRunningCaptureSession() {
captureSession.startRunning()
}//startRunningCaptureSession
//MARK: - Segue
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showSavedCameraPhoto" {
let controller = segue.destination as! JustToSeeThePhotoViewController
controller.inImage = outputImage
}//if segue
}//prepare
//MARK: - Look for Faces
func findTheFaces() {
let myView : UIView = self.view
guard let outImage = outputImage else {return}
let imageView = UIImageView(image: outImage)
imageView.contentMode = .scaleAspectFit
let scaledHeight = myView.frame.width / outImage.size.width * outImage.size.height
imageView.frame = CGRect(x: 0, y: 0, width: myView.frame.width, height: myView.frame.height)
imageView.backgroundColor = UIColor.blue
myView.addSubview(imageView)
let request = VNDetectFaceRectanglesRequest { (req, err) in
if let err = err {
print("VNDetectFaceRectanglesRequest failed to run:", err)
return
}//if let err
print(req.results ?? "req.results is empty")
req.results?.forEach({ (res) in
DispatchQueue.main.async {
guard let faceObservation = res as? VNFaceObservation else {return}
let x = myView.frame.width * faceObservation.boundingBox.origin.x
let width = myView.frame.width * faceObservation.boundingBox.width
let height = scaledHeight * faceObservation.boundingBox.height
let y = scaledHeight * (1 - faceObservation.boundingBox.origin.y) - height
let redView = UIView()
redView.backgroundColor = .red
redView.alpha = 0.4
redView.frame = CGRect(x: x, y: y, width: width, height: height)
myView.addSubview(redView)
print("faceObservation bounding box:")
print(faceObservation.boundingBox)
//if you get here, then you have a face bounding box
}//main
})//forEach block
}//let request
guard let cgImage = outImage.cgImage else {return}
DispatchQueue.global(qos: .utility).async {
let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
do {
try handler.perform([request])
print("handler request was successful")
self.performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
} catch let reqErr {
print("Failed to perform request:", reqErr)
}
}//DispatchQueue
}//findTheFaces
//MARK: - Memory
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}//didReceiveMemoryWarning
}//class
extension CRSFaceRecognitionViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
print(imageData)
outputImage = UIImage(data : imageData)
//
//I put breakpoint here and took a screen shot
//
if let outImage = outputImage?.updateImageOrientionUpSide() {
self.outputImage = outImage
}
DispatchQueue.main.async {
self.findTheFaces()
}
}//if let imageData
}//photoOutput
}//extension
extension UIImage {
//you need to do this to ensure that the image is in portrait mode
//the face recognition method will not work if the face is horizontal
func updateImageOrientionUpSide() -> UIImage? {
if self.imageOrientation == .up {
return self
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
if let normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext() {
UIGraphicsEndImageContext()
return normalizedImage
}
UIGraphicsEndImageContext()
return nil
}//updateImageOrientionUpSide
}//image
I must be doing something wrong with the camera capture. Any help would be appreciated. Swift 4, iOS 11.2.5, Xcode 9.2
I would try adding a delay between startRunningCaptureSession() and photoOutput?.capturePhoto(with:settings, delegate: self)
For example,
DispatchQueue.main.asyncAfter(deadline: .now() + .seconds(4), execute: {
// take a photo
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
})
It appears as though I have too many async pieces. I broke the code into separate functions for each major piece - async or not and put them all into a DispatchGroup. That seems to have solved the issue.

Value of type 'StorageReference' has no member 'data'?

I am trying to implement a photo picker into my app. For some reason I receive the following errors in two functions, Value of type 'StorageReference' has no member 'data' and Value of type 'StorageReference' has no member 'put'. Here is my code. I am honestly not sure what the problem seems to be. Any input would be greatly appreciated. Thanks.
import UIKit
import FirebaseStorage
import Photos
class AlbumViewController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
var id: String = ""
let picker = UIImagePickerController()
#IBOutlet weak var image1: UIImageView!
let storage = Storage.storage()
override func viewDidLoad() {
super.viewDidLoad()
self.title = id
picker.delegate = self as! UIImagePickerControllerDelegate & UINavigationControllerDelegate
showPhoto()
// Do any additional setup after loading the view.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
//confirm
func confirm() {
let myAlert = UIAlertController(title: "Firebase Storage", message: "Image Uploaded", preferredStyle: UIAlertControllerStyle.alert)
myAlert.addAction(UIAlertAction(title: "Okay", style: UIAlertActionStyle.default, handler: nil))
self.present(myAlert, animated: true, completion: nil)
}
//retrive pictures
#IBAction func accessAlbum(_ sender: UIBarButtonItem) {
picker.allowsEditing = false
picker.sourceType = .photoLibrary
picker.mediaTypes = UIImagePickerController.availableMediaTypes(for: .photoLibrary)!
picker.modalPresentationStyle = .popover
present(picker, animated: true, completion: nil)
picker.popoverPresentationController?.barButtonItem = sender
}
//for image size
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
print(info)
var selectedImageFromPicker: UIImage?
if let editedImage = info["UIImagePickerControllerEditedImage"] as? UIImage
{
selectedImageFromPicker = editedImage
}else if let originalImage = info["UIImagePickerControllerOriginalImage"] as? UIImage{
selectedImageFromPicker = originalImage
}
if let selectedImage = selectedImageFromPicker
{
image1.image = selectedImage.resizedImageWithinRect(rectSize: CGSize(width: 300, height: 300))
image1.contentMode = .scaleAspectFit //3
}
dismiss(animated: true, completion: nil)
}
#IBAction func savePic(_ sender: Any) {
var data = Data()
data = UIImagePNGRepresentation(image1.image!)! as Data
// set upload path
let imageName = "Photos/" + id + ".png"
// ---- Upload the image to firebase storage
let storageRef = storage.reference(withPath: imageName)
let metaData = StorageMetadata()
metaData.contentType = "image/png"
storageRef.put(data, metadata: metaData){(metaData,error) in //ERROR HERE
if let error = error {
print(error.localizedDescription)
return
}else{
self.confirm()
}
}
}
func showPhoto() {
let imageName = "Photos/" + id + ".png"
// Create a storage reference path with image name
let storageRef = storage.reference(withPath: imageName)
storageRef.data(withMaxSize: 3 * 1024 * 1024) { (data, error) // ERROR HERE-> Void in
// Create a UIImage, add it to the array
if let imgData = data {
let image = UIImage(data: imgData)
DispatchQueue.main.async {
self.myImageView.image = image
}
//
} else {
print ("no image data")
}
}
}
}
extension UIImage {
/// Returns a image that fills in newSize
func resizedImage(newSize: CGSize) -> UIImage {
// Guard newSize is different
guard self.size != newSize else { return self }
UIGraphicsBeginImageContextWithOptions(newSize, false, 0.0);
// self.drawInRect(CGRect(0, 0, newSize.width, newSize.height))
self.draw(in: CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height))
let newImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
/// Returns a resized image that fits in rectSize, keeping it's aspect ratio
/// Note that the new image size is not rectSize, but within it.
func resizedImageWithinRect(rectSize: CGSize) -> UIImage {
let widthFactor = size.width / rectSize.width
let heightFactor = size.height / rectSize.height
var resizeFactor = widthFactor
if size.height > size.width {
resizeFactor = heightFactor
}
let newSize = CGSize(width: size.width/resizeFactor, height: size.height/resizeFactor)
// CGSize(size.width/resizeFactor, size.height/resizeFactor)
let resized = resizedImage(newSize: newSize)
return resized
}
}
StorageReference has no methods called put or data.
Perhaps you were trying to use putData and getData?

Back camera doesn't really work in Swift

I'm trying to create a custom camera on my app. It's really simple, I want to take a picture and send the captured image to the next view controller. However, I don't know why, but the camera is ok when I take the picture with the front camera, but when it's the back camera, it's not.
When I press the button to capture the photo, there is a delay of maybe 7 seconds, and the App crashes one time in two.
This is my code:
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var image: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession(){
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice(){
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}
}
currentCamera = backCamera
}
func setupInputOutput(){
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
if #available(iOS 11.0, *) {
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
} else {
// Fallback on earlier versions
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecJPEG])], completionHandler: nil)
}
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer(){
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = CGRect(x: 0, y: 0, width: self.cameraView.frame.size.width, height: self.cameraView.frame.size.height)
cameraView.layer.addSublayer(cameraPreviewLayer!)
//self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession(){
captureSession.startRunning()
}
#IBAction func switchCameraAction(_ sender: Any) {
swapCamera()
}
/// Swap camera and reconfigures camera session with new input
fileprivate func swapCamera() {
// Get current input
guard let input = captureSession.inputs[0] as? AVCaptureDeviceInput else { return }
// Begin new session configuration and defer commit
captureSession.beginConfiguration()
defer { captureSession.commitConfiguration() }
// Create new capture device
var newDevice: AVCaptureDevice?
if input.device.position == .back {
newDevice = captureDevice(with: .front)
isFront = true
} else {
newDevice = captureDevice(with: .back)
isFront = false
}
// Create new capture input
var deviceInput: AVCaptureDeviceInput!
do {
deviceInput = try AVCaptureDeviceInput(device: newDevice!)
} catch let error {
print(error.localizedDescription)
return
}
// Swap capture device inputs
captureSession.removeInput(input)
captureSession.addInput(deviceInput)
}
fileprivate func captureDevice(with position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let devices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: AVCaptureDevice.Position.unspecified).devices
for device in devices {
if device.position == position {
return device
}
}
return nil
}
func crop(_ image: UIImage, withWidth width: Double, andHeight height: Double) -> UIImage? {
if let cgImage = image.cgImage {
let contextImage: UIImage = UIImage(cgImage: cgImage)
let contextSize: CGSize = contextImage.size
var posX: CGFloat = 0.0
var posY: CGFloat = 0.0
var cgwidth: CGFloat = CGFloat(width)
var cgheight: CGFloat = CGFloat(height)
// See what size is longer and create the center off of that
if contextSize.width > contextSize.height {
posX = ((contextSize.width - contextSize.height) / 2)
posY = 0
cgwidth = contextSize.height
cgheight = contextSize.height
} else {
posX = 0
posY = ((contextSize.height - contextSize.width) / 2)
cgwidth = contextSize.width
cgheight = contextSize.width
}
let rect: CGRect = CGRect(x: posX, y: posY, width: cgwidth, height: cgheight)
// Create bitmap image from context using the rect
var croppedContextImage: CGImage? = nil
if let contextImage = contextImage.cgImage {
if let croppedImage = contextImage.cropping(to: rect) {
croppedContextImage = croppedImage
}
}
// Create a new image based on the imageRef and rotate back to the original orientation
if let croppedImage:CGImage = croppedContextImage {
let image: UIImage = UIImage(cgImage: croppedImage, scale: image.scale, orientation: image.imageOrientation)
return image
}
}
return nil
}
#IBAction func takePhotoAction(_ sender: Any) {
var settingsCamera = AVCapturePhotoSettings()
let previewPixelType = settingsCamera.availablePreviewPhotoPixelFormatTypes.first
settingsCamera.flashMode = .off
if isFront == false && hasFlash{
settingsCamera.flashMode = .on
}
let previewFormat = [kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: 160,
kCVPixelBufferHeightKey as String: 160]
settingsCamera.previewPhotoFormat = previewFormat
photoOutput?.capturePhoto(with: settingsCamera, delegate: self)
}
}
extension UploadViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let error = error {
print("error occure : \(error.localizedDescription)")
}
if let sampleBuffer = photoSampleBuffer,
let previewBuffer = previewPhotoSampleBuffer,
let dataImage = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer) {
let dataProvider = CGDataProvider(data: dataImage as CFData)
let cgImageRef: CGImage! = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
var orientation = UIImageOrientation(rawValue: 0)
if isFront {
orientation = UIImageOrientation.leftMirrored
} else {
orientation = UIImageOrientation.right
}
let image = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: orientation!)
// var flippedImage = UIImage(CGImage: picture.CGImage!, scale: picture.scale, orientation: .leftMirrored)
let timage = crop(image, withWidth: 100, andHeight: 100)
let photoSecondVC = self.storyboard?.instantiateViewController(withIdentifier: "uploadSecondVC") as! UploadSecondViewController
photoSecondVC.imageData = timage!
photoSecondVC.isFront = isFront
self.navigationController?.pushViewController(photoSecondVC, animated: false)
} else {
print("some error here")
}
}
}
This is what I have when the App crashes (I don't know if there is a link):
2017-08-25 16:50:36.125052+0200 Fitshare[93231:3349636] Failed to set (keyPath) user defined inspected property on (UITabBarItem): [ setValue:forUndefinedKey:]: this class is not key value coding-compliant for the key keyPath.
2017-08-25 16:50:36.125584+0200 Fitshare[93231:3349636] Failed to set (keyPath) user defined inspected property on (UITabBarItem): [ setValue:forUndefinedKey:]: this class is not key value coding-compliant for the key keyPath.
2017-08-25 16:50:36.300650+0200 Fitshare[93231:3349636] [MC] Lazy loading NSBundle MobileCoreServices.framework
2017-08-25 16:50:36.302462+0200 Fitshare[93231:3349636] [MC] Loaded MobileCoreServices.framework
2017-08-25 16:50:36.311211+0200 Fitshare[93231:3349636] [MC] System group container for systemgroup.com.apple.configurationprofiles path is /Users/kevinboirel/Library/Developer/CoreSimulator/Devices/B48E7503-47C0-4A75-AC5C-C3DEF6CC8507/data/Containers/Shared/SystemGroup/systemgroup.com.apple.configurationprofiles
2017-08-25 16:50:36.363022+0200 Fitshare[93231:3349636] [Snapshotting] Snapshotting a view (0x7fbb38f2b0c0, Fitshare.ALThreeCircleSpinner) that has not been rendered at least once requires afterScreenUpdates:YES.
2017-08-25 16:50:36.469416+0200 Fitshare[93231:3349636] +[CATransaction synchronize] called within transaction
2017-08-25 16:50:36.469800+0200 Fitshare[93231:3349636] +[CATransaction synchronize] called within transaction
And the weird fact it's that I have this message error in XCode:
You need two protocol delegates, which are: UIImagePickerControllerDelegate and UINavigationControllerDelegate
// MARK: - Global Declaration
#IBOutlet var imgProfile: UIImageView!
var imagePicker = UIImagePickerController()
// MARK: - Camera Methods
func PickingImageFromCamera()
{
let picker = UIImagePickerController()
picker.delegate = self
picker.allowsEditing = false
picker.sourceType = .camera
picker.cameraCaptureMode = .photo
present(picker, animated: true, completion: nil)
}
//----------------------------------
func imagePickerController(_ picker: UIImagePickerController,
didFinishPickingMediaWithInfo info: [String : Any])
{
if let pickedImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
imgProfile.contentMode = .scaleToFill
imgProfile.image = pickedImage
}
dismiss(animated: true, completion: nil)
}
//----------------------------------
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
dismiss(animated: true, completion: nil)
}
//----------------------------------
I think this code will help you...
It was due to the UploadSecondViewController ...
I just resized the picture sent to the second ViewController to a smaller size and the app is now ok !
Thanks for your answers !

How to take UIImage of AVCaptureVideoPreviewLayer instead of AVCapturePhotoOutput capture

I want to "stream" the preview layer to my server, however, I only want specific frames to be sent. Basically, I want to take a snapshot of the AVCaptureVideoPreviewLayer, scale it down to 28*28, turn it into an intensity array, and send it to my socket layer where my python backend handles the rest.
Problem here is that AVCapturePhotoOutput's capture function is insanely slow. I can't repeatedly call the function. Not to mention it always makes a camera shutter sound haha.
The other problem is that taking a snapshot of AVCaptureVideoPreviewLayer is really difficult. Using UIGraphicsBeginImageContext almost always returns a blank/clear image.
Help a brother out, thanks!
Basically instead of using AVCaptureVideoPreviewLayer for grabbing frames you should use AVCaptureVideoDataOutputSampleBufferDelegate.
Here is example:
import Foundation
import UIKit
import AVFoundation
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let input = try! AVCaptureDeviceInput(device: device)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Update: To process images you should implement a processCapturedImage method of the CaptureManagerDelegate protocol in any other class where you want, like:
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
#ninjaproger's answer was great! Simply writing this as a Swift 4 version of the answer for future reference.
import UIKit
import AVFoundation
var customPreviewLayer: AVCaptureVideoPreviewLayer?
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let input = try! AVCaptureDeviceInput(device: device!)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Details
Xcode 10.2.1 (10E1001), Swift 5
Features
This solution allow:
to check camera access
to select front or back camera
if no access to the camera show alert with link to the app settings page
to make o photo
to play standard capture photo sound
Solution
CameraService
import UIKit
import AVFoundation
import Vision
class CameraService: NSObject {
private weak var previewView: UIView?
private(set) var cameraIsReadyToUse = false
private let session = AVCaptureSession()
private weak var previewLayer: AVCaptureVideoPreviewLayer?
private lazy var sequenceHandler = VNSequenceRequestHandler()
private lazy var capturePhotoOutput = AVCapturePhotoOutput()
private lazy var dataOutputQueue = DispatchQueue(label: "FaceDetectionService",
qos: .userInitiated, attributes: [],
autoreleaseFrequency: .workItem)
private var captureCompletionBlock: ((UIImage) -> Void)?
private var preparingCompletionHandler: ((Bool) -> Void)?
private var snapshotImageOrientation = UIImage.Orientation.upMirrored
private var cameraPosition = AVCaptureDevice.Position.front {
didSet {
switch cameraPosition {
case .front: snapshotImageOrientation = .upMirrored
case .unspecified, .back: fallthrough
#unknown default: snapshotImageOrientation = .up
}
}
}
func prepare(previewView: UIView,
cameraPosition: AVCaptureDevice.Position,
completion: ((Bool) -> Void)?) {
self.previewView = previewView
self.preparingCompletionHandler = completion
self.cameraPosition = cameraPosition
checkCameraAccess { allowed in
if allowed { self.setup() }
completion?(allowed)
self.preparingCompletionHandler = nil
}
}
private func setup() { configureCaptureSession() }
func start() { if cameraIsReadyToUse { session.startRunning() } }
func stop() { session.stopRunning() }
}
extension CameraService {
private func askUserForCameraPermission(_ completion: ((Bool) -> Void)?) {
AVCaptureDevice.requestAccess(for: AVMediaType.video) { (allowedAccess) -> Void in
DispatchQueue.main.async { completion?(allowedAccess) }
}
}
private func checkCameraAccess(completion: ((Bool) -> Void)?) {
askUserForCameraPermission { [weak self] allowed in
guard let self = self, let completion = completion else { return }
self.cameraIsReadyToUse = allowed
if allowed {
completion(true)
} else {
self.showDisabledCameraAlert(completion: completion)
}
}
}
private func configureCaptureSession() {
guard let previewView = previewView else { return }
// Define the capture device we want to use
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: cameraPosition) else {
let error = NSError(domain: "", code: 0, userInfo: [NSLocalizedDescriptionKey : "No front camera available"])
show(error: error)
return
}
// Connect the camera to the capture session input
do {
try camera.lockForConfiguration()
defer { camera.unlockForConfiguration() }
if camera.isFocusModeSupported(.continuousAutoFocus) {
camera.focusMode = .continuousAutoFocus
}
if camera.isExposureModeSupported(.continuousAutoExposure) {
camera.exposureMode = .continuousAutoExposure
}
let cameraInput = try AVCaptureDeviceInput(device: camera)
session.addInput(cameraInput)
} catch {
show(error: error as NSError)
return
}
// Create the video data output
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
// Add the video output to the capture session
session.addOutput(videoOutput)
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
// Configure the preview layer
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.frame = previewView.bounds
previewView.layer.insertSublayer(previewLayer, at: 0)
self.previewLayer = previewLayer
}
}
extension CameraService: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard captureCompletionBlock != nil,
let outputImage = UIImage(sampleBuffer: sampleBuffer, orientation: snapshotImageOrientation) else { return }
DispatchQueue.main.async { [weak self] in
guard let self = self else { return }
if let captureCompletionBlock = self.captureCompletionBlock{
captureCompletionBlock(outputImage)
AudioServicesPlayAlertSound(SystemSoundID(1108))
}
self.captureCompletionBlock = nil
}
}
}
// Navigation
extension CameraService {
private func show(alert: UIAlertController) {
DispatchQueue.main.async {
UIApplication.topViewController?.present(alert, animated: true, completion: nil)
}
}
private func showDisabledCameraAlert(completion: ((Bool) -> Void)?) {
let alertVC = UIAlertController(title: "Enable Camera Access",
message: "Please provide access to your camera",
preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Go to Settings", style: .default, handler: { action in
guard let previewView = self.previewView,
let settingsUrl = URL(string: UIApplication.openSettingsURLString),
UIApplication.shared.canOpenURL(settingsUrl) else { return }
UIApplication.shared.open(settingsUrl) { [weak self] _ in
guard let self = self else { return }
self.prepare(previewView: previewView,
cameraPosition: self.cameraPosition,
completion: self.preparingCompletionHandler)
}
}))
alertVC.addAction(UIAlertAction(title: "Cancel", style: .cancel, handler: { _ in completion?(false) }))
show(alert: alertVC)
}
private func show(error: NSError) {
let alertVC = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Ok", style: .cancel, handler: nil ))
show(alert: alertVC)
}
}
extension CameraService: AVCapturePhotoCaptureDelegate {
func capturePhoto(completion: ((UIImage) -> Void)?) { captureCompletionBlock = completion }
}
Helpers
///////////////////////////////////////////////////////////////////////////
import UIKit
import AVFoundation
extension UIImage {
convenience init?(sampleBuffer: CMSampleBuffer, orientation: UIImage.Orientation = .upMirrored) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return nil }
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
defer { CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly) }
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height,
bitsPerComponent: 8, bytesPerRow: bytesPerRow,
space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else { return nil }
guard let cgImage = context.makeImage() else { return nil }
self.init(cgImage: cgImage, scale: 1, orientation: orientation)
}
}
///////////////////////////////////////////////////////////////////////////
import UIKit
extension UIApplication {
private class func topViewController(controller: UIViewController? = UIApplication.shared.keyWindow?.rootViewController) -> UIViewController? {
if let navigationController = controller as? UINavigationController {
return topViewController(controller: navigationController.visibleViewController)
}
if let tabController = controller as? UITabBarController {
if let selected = tabController.selectedViewController {
return topViewController(controller: selected)
}
}
if let presented = controller?.presentedViewController {
return topViewController(controller: presented)
}
return controller
}
class var topViewController: UIViewController? { return topViewController() }
}
Usage
private lazy var cameraService = CameraService()
//...
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
//...
cameraService.capturePhoto { [weak self] image in
//...
}
Full sample
import UIKit
class ViewController: UIViewController {
private lazy var cameraService = CameraService()
private weak var button: UIButton?
private weak var imagePreviewView: UIImageView?
private var cameraInited = false
private enum ButtonState { case cancel, makeSnapshot }
private var buttonState = ButtonState.makeSnapshot {
didSet {
switch buttonState {
case .makeSnapshot: button?.setTitle("Make a photo", for: .normal)
case .cancel: button?.setTitle("Cancel", for: .normal)
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
setupCameraPreviewView()
setupButton()
// Do any additional setup after loading the view.
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
cameraService.start()
}
override func viewDidDisappear(_ animated: Bool) {
super.viewDidDisappear(animated)
cameraService.stop()
}
// Ensure that the interface stays locked in Portrait.
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
return .portrait
}
// Ensure that the interface stays locked in Portrait.
override var preferredInterfaceOrientationForPresentation: UIInterfaceOrientation {
return .portrait
}
}
extension ViewController {
private func setupCameraPreviewView() {
let previewView = UIView(frame: .zero)
view.addSubview(previewView)
previewView.translatesAutoresizingMaskIntoConstraints = false
previewView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
previewView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
previewView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
previewView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
previewView.layoutIfNeeded()
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
}
private func setupButton() {
let button = UIButton(frame: .zero)
button.addTarget(self, action: #selector(buttonTouchedUpInside), for: .touchUpInside)
view.addSubview(button)
self.button = button
buttonState = .makeSnapshot
button.translatesAutoresizingMaskIntoConstraints = false
button.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
button.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
button.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
button.heightAnchor.constraint(equalToConstant: 44).isActive = true
button.backgroundColor = UIColor.black.withAlphaComponent(0.4)
}
private func show(image: UIImage) {
let imageView = UIImageView(frame: .zero)
view.insertSubview(imageView, at: 1)
imagePreviewView = imageView
imageView.translatesAutoresizingMaskIntoConstraints = false
imageView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
imageView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
imageView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
imageView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
imageView.image = image
}
#objc func buttonTouchedUpInside() {
switch buttonState {
case .makeSnapshot:
cameraService.capturePhoto { [weak self] image in
guard let self = self else {return }
self.cameraService.stop()
self.buttonState = .cancel
self.show(image: image)
}
case .cancel:
buttonState = .makeSnapshot
cameraService.start()
imagePreviewView?.removeFromSuperview()
}
}
}

Resources