I have created a custom camera and have implemented below code to crop the taken image, I have shown guides in the preview layer so I want to crop the image which appears in that area.
func imageByCropToRect(rect:CGRect, scale:Bool) -> UIImage {
var rect = rect
var scaleFactor: CGFloat = 1.0
if scale {
scaleFactor = self.scale
rect.origin.x *= scaleFactor
rect.origin.y *= scaleFactor
rect.size.width *= scaleFactor
rect.size.height *= scaleFactor
}
var image: UIImage? = nil;
if rect.size.width > 0 && rect.size.height > 0 {
let imageRef = self.cgImage!.cropping(to: rect)
image = UIImage(cgImage: imageRef!, scale: scaleFactor, orientation: self.imageOrientation)
}
return image!
}
This code just works fine when & give the exact cropped image when the below line of code is commented, though I want the image streaming to be full screen so I have to use the below line of code. The image comes zoomed out sort of.
(self.previewLayer as! AVCaptureVideoPreviewLayer).videoGravity = AVLayerVideoGravity.resizeAspectFill
How do I solve this issue? Is the cropping code wrong?
Here is the full Class code
import UIKit
import AVFoundation
class CameraViewController: UIViewController {
#IBOutlet weak var guideImageView: UIImageView!
#IBOutlet weak var guidesView: UIView!
#IBOutlet weak var cameraPreviewView: UIView!
#IBOutlet weak var cameraButtonView: UIView!
#IBOutlet weak var captureButton: UIButton!
var captureSession = AVCaptureSession()
var previewLayer: CALayer!
var captureDevice: AVCaptureDevice!
/// This will be true when the user clicks on the click photo button.
var takePhoto = false
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
previewLayer = CALayer()
takePhoto = false
requestAuthorization()
}
private func userinteractionToButton(_ interaction: Bool) {
captureButton.isEnabled = interaction
}
/// This function will request authorization, If authorized then start the camera.
private func requestAuthorization() {
switch AVCaptureDevice.authorizationStatus(for: AVMediaType.video) {
case .authorized:
prepareCamera()
case .denied, .restricted, .notDetermined:
AVCaptureDevice.requestAccess(for: AVMediaType.video, completionHandler: { (granted) in
if !Thread.isMainThread {
DispatchQueue.main.async {
if granted {
self.prepareCamera()
} else {
let alert = UIAlertController(title: "unable_to_access_the_Camera", message: "to_enable_access_go_to_setting_privacy_camera_and_turn_on_camera_access_for_this_app", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "ok", style: .default, handler: {_ in
self.navigationController?.popToRootViewController(animated: true)
}))
self.present(alert, animated: true, completion: nil)
}
}
} else {
if granted {
self.prepareCamera()
} else {
let alert = UIAlertController(title: "unable_to_access_the_Camera", message: "to_enable_access_go_to_setting_privacy_camera_and_turn_on_camera_access_for_this_app", preferredStyle: UIAlertControllerStyle.alert)
alert.addAction(UIAlertAction(title: "ok", style: .default, handler: {_ in
self.navigationController?.popToRootViewController(animated: true)
}))
self.present(alert, animated: true, completion: nil)
}
}
})
}
}
/// Will see if the primary camera is avilable, If found will call method which will asign the available device to the AVCaptureDevice.
private func prepareCamera() {
// Resets the session.
self.captureSession.sessionPreset = AVCaptureSession.Preset.photo
if #available(iOS 10.0, *) {
let availableDevices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices
self.assignCamera(availableDevices)
} else {
// Fallback on earlier versions
// development, need to test this on iOS 8
if let availableDevices = AVCaptureDevice.default(for: AVMediaType.video) {
self.assignCamera([availableDevices])
} else {
self.showAlert()
}
}
}
/// Assigns AVCaptureDevice to the respected the variable, will begin the session.
///
/// - Parameter availableDevices: [AVCaptureDevice]
private func assignCamera(_ availableDevices: [AVCaptureDevice]) {
if availableDevices.first != nil {
captureDevice = availableDevices.first
beginSession()
} else {
self.showAlert()
}
}
/// Configures the camera settings and begins the session, this function will be responsible for showing the image on the UI.
private func beginSession() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer = previewLayer
self.cameraPreviewView.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
self.previewLayer.frame.origin.y = +self.cameraPreviewView.frame.origin.y
(self.previewLayer as! AVCaptureVideoPreviewLayer).videoGravity = AVLayerVideoGravity.resizeAspectFill
self.previewLayer.masksToBounds = true
self.cameraPreviewView.clipsToBounds = true
captureSession.startRunning()
self.view.bringSubview(toFront: self.cameraPreviewView)
self.view.bringSubview(toFront: self.cameraButtonView)
self.view.bringSubview(toFront: self.guidesView)
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [((kCVPixelBufferPixelFormatTypeKey as NSString) as String):NSNumber(value:kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
}
captureSession.commitConfiguration()
let queue = DispatchQueue(label: "com.letsappit.camera")
dataOutput.setSampleBufferDelegate(self, queue: queue)
self.userinteractionToButton(true)
}
/// Get the UIImage from the given CMSampleBuffer.
///
/// - Parameter buffer: CMSampleBuffer
/// - Returns: UIImage?
func getImageFromSampleBuffer(buffer:CMSampleBuffer, orientation: UIImageOrientation) -> UIImage? {
if let pixelBuffer = CMSampleBufferGetImageBuffer(buffer) {
let ciImage = CIImage(cvPixelBuffer: pixelBuffer)
let context = CIContext()
let imageRect = CGRect(x: 0, y: 0, width: CVPixelBufferGetWidth(pixelBuffer), height: CVPixelBufferGetHeight(pixelBuffer))
if let image = context.createCGImage(ciImage, from: imageRect) {
return UIImage(cgImage: image, scale: UIScreen.main.scale, orientation: orientation)
}
}
return nil
}
/// This function will destroy the capture session.
func stopCaptureSession() {
self.captureSession.stopRunning()
if let inputs = captureSession.inputs as? [AVCaptureDeviceInput] {
for input in inputs {
self.captureSession.removeInput(input)
}
}
}
func showAlert() {
let alert = UIAlertController(title: "Unable to access the camera", message: "It appears that either your device doesn't have camera or its broken", preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "cancel", style: .cancel, handler: {_ in
self.navigationController?.dismiss(animated: true, completion: nil)
}))
self.present(alert, animated: true, completion: nil)
}
#IBAction func didTapClick(_ sender: Any) {
userinteractionToButton(false)
takePhoto = true
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showImage" {
let vc = segue.destination as! ShowImageViewController
vc.image = sender as! UIImage
}
}
}
extension CameraViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if connection.isVideoOrientationSupported {
connection.videoOrientation = .portrait
}
if takePhoto {
takePhoto = false
// Rotation should be unlocked to work.
var orientation = UIImageOrientation.up
switch UIDevice.current.orientation {
case .landscapeLeft:
orientation = .left
case .landscapeRight:
orientation = .right
case .portraitUpsideDown:
orientation = .down
default:
orientation = .up
}
if let image = self.getImageFromSampleBuffer(buffer: sampleBuffer, orientation: orientation) {
DispatchQueue.main.async {
let newImage = image.imageByCropToRect(rect: self.guideImageView.frame, scale: true)
self.stopCaptureSession()
self.previewLayer.removeFromSuperlayer()
self.performSegue(withIdentifier: "showImage", sender: newImage)
}
}
}
}
}
Here is the view hierarchy image
It's not clear where the problem is. I would either use the debugger or some print statements to figure out whether the issue is with the image or the view displaying the image. Print out size of the cropped image to make sure it is correct.
Then, print out the image view size in the ShowImageViewController in viewDidAppear to make sure it is correct.
For the correction of zooming out of cropped image, you have to change your crop function to this by using image orientation.
func croppedInRect(rect: CGRect) -> UIImage? {
func rad(_ degree: Double) -> CGFloat {
return CGFloat(degree / 180.0 * .pi)
}
var rectTransform: CGAffineTransform
switch imageOrientation {
case .left:
rectTransform = CGAffineTransform(rotationAngle: rad(90)).translatedBy(x: 0, y: -self.size.height)
case .right:
rectTransform = CGAffineTransform(rotationAngle: rad(-90)).translatedBy(x: -self.size.width, y: 0)
case .down:
rectTransform = CGAffineTransform(rotationAngle: rad(-180)).translatedBy(x: -self.size.width, y: -self.size.height)
default:
rectTransform = .identity
}
rectTransform = rectTransform.scaledBy(x: self.scale, y: self.scale)
var cgImage = self.cgImage
if cgImage == nil{
let ciContext = CIContext()
if let ciImage = self.ciImage{
cgImage = ciContext.createCGImage(ciImage, from: ciImage.extent)
}
}
if let imageRef = cgImage?.cropping(to: rect.applying(rectTransform)){
let result = UIImage(cgImage: imageRef, scale: self.scale, orientation: self.imageOrientation)
return result
}
return nil
}
Related
I'm using AVCaptureDevice API for scanning barcode and it works very well on iPhone, but very similar code I have doesn't work on iPad and I'm not quite sure why (not detecting any barcode at all). The main differences are the size of scan area, position and orientation. I tested using iPhone 12 mini (iOS 15 beta) and the original iPad Pro 9.7" (iOS 14.6). Not sure if that matters.
Below is the code for the scanner. Please let me know if you noticed something that should be changed.
import Foundation
import AVFoundation
import UIKit
class ScannerViewController: UIViewController, AVCaptureMetadataOutputObjectsDelegate {
var captureSession: AVCaptureSession!
var previewLayer: AVCaptureVideoPreviewLayer!
var barcodeCallback: (String) -> Void
var cameraScanDismissedCallback: (Bool) -> Void
var scanned = false
var currentDevice: AVCaptureDevice!
var scanRectView: UIView!
init(barcodeCallback: #escaping (String) -> Void, cameraScanDismissedCallback: #escaping (Bool) -> Void) {
self.barcodeCallback = barcodeCallback;
self.cameraScanDismissedCallback = cameraScanDismissedCallback;
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func viewDidLoad() {
super.viewDidLoad()
view.backgroundColor = UIColor.black
captureSession = AVCaptureSession()
guard let videoCaptureDevice = AVCaptureDevice.default(for: .video) else { return }
let videoInput: AVCaptureDeviceInput
do {
videoInput = try AVCaptureDeviceInput(device: videoCaptureDevice)
} catch {
return
}
if (captureSession.canAddInput(videoInput)) {
captureSession.addInput(videoInput)
} else {
failed()
return
}
let metadataOutput = AVCaptureMetadataOutput()
if (captureSession.canAddOutput(metadataOutput)) {
captureSession.addOutput(metadataOutput)
metadataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
metadataOutput.metadataObjectTypes = [.qr, .ean8, .ean13, .pdf417, .code128, .aztec, .code39, .code39Mod43, .code93, .dataMatrix, .face, .interleaved2of5, .itf14, .upce]
} else {
failed()
return
}
let windowSize = UIScreen.main.bounds.size
var scanSize: CGSize!;
var scanRect: CGRect!;
if(UIDevice.current.userInterfaceIdiom == .pad){
scanSize = CGSize(width:windowSize.width*1/3, height:windowSize.width*1/7);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}else{
scanSize = CGSize(width:windowSize.width*2/3, height:windowSize.width*1/3);
scanRect = CGRect(x: UIScreen.main.bounds.midX - scanSize.width/2,
y: UIScreen.main.bounds.midY - scanSize.height/2,
width:scanSize.width, height:scanSize.height);
}
scanRect = CGRect(x:scanRect.origin.y/windowSize.height,
y:scanRect.origin.x/windowSize.width,
width:scanRect.size.height/windowSize.height,
height:scanRect.size.width/windowSize.width);
metadataOutput.rectOfInterest = scanRect
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
if(UIDevice.current.userInterfaceIdiom == .pad){
let orientation: UIDeviceOrientation = UIDevice.current.orientation
previewLayer.connection?.videoOrientation = {
switch (orientation) {
case .faceUp:
return .landscapeLeft
case .portrait:
return .portrait
case .landscapeRight:
return .landscapeLeft
case .landscapeLeft:
return .landscapeRight
default:
return .portrait
}
}()
}
previewLayer.frame = view.layer.bounds
previewLayer.videoGravity = .resizeAspectFill
view.layer.addSublayer(previewLayer)
scanRectView = UIView();
view.addSubview(self.scanRectView)
scanRectView.frame = CGRect(x:0, y:0, width: scanSize.width,
height: scanSize.height);
if(UIDevice.current.userInterfaceIdiom == .pad){
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX - scanSize.width/2,
y:UIScreen.main.bounds.midY - scanSize.height/2)
}else{
scanRectView.center = CGPoint( x:UIScreen.main.bounds.midX,
y:UIScreen.main.bounds.midY)
}
scanRectView.layer.borderColor = UIColor.yellow.cgColor
scanRectView.layer.borderWidth = 5;
currentDevice = videoCaptureDevice
captureSession.startRunning()
toggleTorch(on: true)
}
func toggleTorch(on: Bool) {
guard let device = currentDevice else { return }
if device.hasTorch {
do {
try device.lockForConfiguration()
if(UIDevice.current.userInterfaceIdiom == .pad){
device.videoZoomFactor = 1.3
}else{
device.videoZoomFactor = 1.5
}
if on == true {
device.torchMode = .on
} else {
device.torchMode = .off
}
device.unlockForConfiguration()
} catch {
print("Torch could not be used")
}
} else {
print("Torch is not available")
}
}
func failed() {
let ac = UIAlertController(title: "Scanning not supported", message: "Your device does not support scanning a code from an item. Please use a device with a camera.", preferredStyle: .alert)
ac.addAction(UIAlertAction(title: "OK", style: .default))
present(ac, animated: true)
captureSession = nil
toggleTorch(on: false)
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
if (captureSession?.isRunning == false) {
captureSession.startRunning()
toggleTorch(on: true)
}
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
if (captureSession?.isRunning == true) {
captureSession.stopRunning()
toggleTorch(on: false)
}
cameraScanDismissedCallback(scanned)
}
func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
captureSession.stopRunning()
if let metadataObject = metadataObjects.first {
guard let readableObject = metadataObject as? AVMetadataMachineReadableCodeObject else { return }
guard let stringValue = readableObject.stringValue else { return }
AudioServicesPlaySystemSound(SystemSoundID(kSystemSoundID_Vibrate))
found(code: stringValue)
scanned = true
}
dismiss(animated: true)
}
func found(code: String) {
print(code)
barcodeCallback(code)
}
override var prefersStatusBarHidden: Bool {
return true
}
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
if(UIDevice.current.userInterfaceIdiom == .pad){
return .landscape
}else{
return .portrait
}
}
}
Just in case anyone ran into a similar issue, check for the following:
Orientation of the video
The position of AVCaptureMetadataOutput.rectOfInterest
I'm still having an issue with the rectOfInterest not positioned at the center, but it works. Once I can figure out how to center it, I will post the solution here.
it isn't center probably because of your navigationBar.
Try set rectOfInterest as below, remember that camer use different coordinate than UIView, posX and posY are in UIView coordinates
let aimRect = CGRect(x: (posY - navBar.height) / UIScreen.main.bounds.height,
y: posX / UIScreen.main.bounds.width,
width: rectHeight / UIScreen.main.bounds.height,
height: rectWidth / UIScreen.main.bounds.width)
I have an app setup to use the camera for a photo (on a timer basis) to detect the presence of a face. The detection process works fairly well when I feed the app a photo that I have added to assets. However, when I attempt to use the output of the camera directly or even after saving the image to a file, the resulting image is so dark that the face recognition is completely unreliable.
If I display the image as seen by the camera, it looks correct. I captured the following two images - one from the camera as seen live, the other of the same view after the image was created from AVCapturePhotoOutput. The same darkness happens if I simply display the captured image in an image view.
Note the comment: "I put the breakpoint here and took a screen shot". Then I took the second screen shot when the code completed. These were taken in HIGH light.
Here's the basic code:
class CRSFaceRecognitionViewController: UIViewController, UIImagePickerControllerDelegate {
var sentBy : String?
//timers
var faceTimer : Timer?
var frvcTimer : Timer?
//capture
var captureSession = AVCaptureSession()
var settings = AVCapturePhotoSettings()
var backCamera : AVCaptureDevice?
var frontCamera : AVCaptureDevice?
var currentCamera : AVCaptureDevice?
var photoOutput : AVCapturePhotoOutput?
var cameraPreviewLayer : AVCaptureVideoPreviewLayer?
var image : UIImage?
var outputImage : UIImage?
#IBOutlet weak var imageView: UIImageView!
//MARK: - Setup
override func viewDidLoad() {
super.viewDidLoad()
}//viewDidLoad
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(true)
}//viewWillAppear
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(true)
//check for camera
if (UIImagePickerController.isSourceTypeAvailable(UIImagePickerControllerSourceType.camera)) {
setupCaptureSession()
setupDevices()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
} else {
print("Camera not present")
}
}//viewDidAppear
//MARK: - Video
#objc func showFaceRecognitionViewController() {
//all this does is present the image in a new ViewController imageView
performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
}//showThePhotoView
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}//setupCaptureSession
func setupDevices() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: .unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}//if else
}//for in
currentCamera = frontCamera
}//setupDevices
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: {(success, error) in
print("in photoOutput completion handler")
})
captureSession.addOutput(photoOutput!)
} catch {
print("Error creating AVCaptureDeviceInput:", error)
}//do catch
}//setupInputOutput
func setupPreviewLayer() {
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session : captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = view.frame
view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}//setupPreviewLayer
func startRunningCaptureSession() {
captureSession.startRunning()
}//startRunningCaptureSession
//MARK: - Segue
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "showSavedCameraPhoto" {
let controller = segue.destination as! JustToSeeThePhotoViewController
controller.inImage = outputImage
}//if segue
}//prepare
//MARK: - Look for Faces
func findTheFaces() {
let myView : UIView = self.view
guard let outImage = outputImage else {return}
let imageView = UIImageView(image: outImage)
imageView.contentMode = .scaleAspectFit
let scaledHeight = myView.frame.width / outImage.size.width * outImage.size.height
imageView.frame = CGRect(x: 0, y: 0, width: myView.frame.width, height: myView.frame.height)
imageView.backgroundColor = UIColor.blue
myView.addSubview(imageView)
let request = VNDetectFaceRectanglesRequest { (req, err) in
if let err = err {
print("VNDetectFaceRectanglesRequest failed to run:", err)
return
}//if let err
print(req.results ?? "req.results is empty")
req.results?.forEach({ (res) in
DispatchQueue.main.async {
guard let faceObservation = res as? VNFaceObservation else {return}
let x = myView.frame.width * faceObservation.boundingBox.origin.x
let width = myView.frame.width * faceObservation.boundingBox.width
let height = scaledHeight * faceObservation.boundingBox.height
let y = scaledHeight * (1 - faceObservation.boundingBox.origin.y) - height
let redView = UIView()
redView.backgroundColor = .red
redView.alpha = 0.4
redView.frame = CGRect(x: x, y: y, width: width, height: height)
myView.addSubview(redView)
print("faceObservation bounding box:")
print(faceObservation.boundingBox)
//if you get here, then you have a face bounding box
}//main
})//forEach block
}//let request
guard let cgImage = outImage.cgImage else {return}
DispatchQueue.global(qos: .utility).async {
let handler = VNImageRequestHandler(cgImage: cgImage, options: [:])
do {
try handler.perform([request])
print("handler request was successful")
self.performSegue(withIdentifier: "showSavedCameraPhoto", sender: self)
} catch let reqErr {
print("Failed to perform request:", reqErr)
}
}//DispatchQueue
}//findTheFaces
//MARK: - Memory
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}//didReceiveMemoryWarning
}//class
extension CRSFaceRecognitionViewController : AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
print(imageData)
outputImage = UIImage(data : imageData)
//
//I put breakpoint here and took a screen shot
//
if let outImage = outputImage?.updateImageOrientionUpSide() {
self.outputImage = outImage
}
DispatchQueue.main.async {
self.findTheFaces()
}
}//if let imageData
}//photoOutput
}//extension
extension UIImage {
//you need to do this to ensure that the image is in portrait mode
//the face recognition method will not work if the face is horizontal
func updateImageOrientionUpSide() -> UIImage? {
if self.imageOrientation == .up {
return self
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
if let normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext() {
UIGraphicsEndImageContext()
return normalizedImage
}
UIGraphicsEndImageContext()
return nil
}//updateImageOrientionUpSide
}//image
I must be doing something wrong with the camera capture. Any help would be appreciated. Swift 4, iOS 11.2.5, Xcode 9.2
I would try adding a delay between startRunningCaptureSession() and photoOutput?.capturePhoto(with:settings, delegate: self)
For example,
DispatchQueue.main.asyncAfter(deadline: .now() + .seconds(4), execute: {
// take a photo
startRunningCaptureSession()
photoOutput?.capturePhoto(with:settings, delegate: self)
})
It appears as though I have too many async pieces. I broke the code into separate functions for each major piece - async or not and put them all into a DispatchGroup. That seems to have solved the issue.
I'm trying to create a custom camera on my app. It's really simple, I want to take a picture and send the captured image to the next view controller. However, I don't know why, but the camera is ok when I take the picture with the front camera, but when it's the back camera, it's not.
When I press the button to capture the photo, there is a delay of maybe 7 seconds, and the App crashes one time in two.
This is my code:
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
var image: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession(){
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice(){
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}
}
currentCamera = backCamera
}
func setupInputOutput(){
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
if #available(iOS 11.0, *) {
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
} else {
// Fallback on earlier versions
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecJPEG])], completionHandler: nil)
}
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer(){
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer?.frame = CGRect(x: 0, y: 0, width: self.cameraView.frame.size.width, height: self.cameraView.frame.size.height)
cameraView.layer.addSublayer(cameraPreviewLayer!)
//self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession(){
captureSession.startRunning()
}
#IBAction func switchCameraAction(_ sender: Any) {
swapCamera()
}
/// Swap camera and reconfigures camera session with new input
fileprivate func swapCamera() {
// Get current input
guard let input = captureSession.inputs[0] as? AVCaptureDeviceInput else { return }
// Begin new session configuration and defer commit
captureSession.beginConfiguration()
defer { captureSession.commitConfiguration() }
// Create new capture device
var newDevice: AVCaptureDevice?
if input.device.position == .back {
newDevice = captureDevice(with: .front)
isFront = true
} else {
newDevice = captureDevice(with: .back)
isFront = false
}
// Create new capture input
var deviceInput: AVCaptureDeviceInput!
do {
deviceInput = try AVCaptureDeviceInput(device: newDevice!)
} catch let error {
print(error.localizedDescription)
return
}
// Swap capture device inputs
captureSession.removeInput(input)
captureSession.addInput(deviceInput)
}
fileprivate func captureDevice(with position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let devices = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: .video, position: AVCaptureDevice.Position.unspecified).devices
for device in devices {
if device.position == position {
return device
}
}
return nil
}
func crop(_ image: UIImage, withWidth width: Double, andHeight height: Double) -> UIImage? {
if let cgImage = image.cgImage {
let contextImage: UIImage = UIImage(cgImage: cgImage)
let contextSize: CGSize = contextImage.size
var posX: CGFloat = 0.0
var posY: CGFloat = 0.0
var cgwidth: CGFloat = CGFloat(width)
var cgheight: CGFloat = CGFloat(height)
// See what size is longer and create the center off of that
if contextSize.width > contextSize.height {
posX = ((contextSize.width - contextSize.height) / 2)
posY = 0
cgwidth = contextSize.height
cgheight = contextSize.height
} else {
posX = 0
posY = ((contextSize.height - contextSize.width) / 2)
cgwidth = contextSize.width
cgheight = contextSize.width
}
let rect: CGRect = CGRect(x: posX, y: posY, width: cgwidth, height: cgheight)
// Create bitmap image from context using the rect
var croppedContextImage: CGImage? = nil
if let contextImage = contextImage.cgImage {
if let croppedImage = contextImage.cropping(to: rect) {
croppedContextImage = croppedImage
}
}
// Create a new image based on the imageRef and rotate back to the original orientation
if let croppedImage:CGImage = croppedContextImage {
let image: UIImage = UIImage(cgImage: croppedImage, scale: image.scale, orientation: image.imageOrientation)
return image
}
}
return nil
}
#IBAction func takePhotoAction(_ sender: Any) {
var settingsCamera = AVCapturePhotoSettings()
let previewPixelType = settingsCamera.availablePreviewPhotoPixelFormatTypes.first
settingsCamera.flashMode = .off
if isFront == false && hasFlash{
settingsCamera.flashMode = .on
}
let previewFormat = [kCVPixelBufferPixelFormatTypeKey as String: previewPixelType,
kCVPixelBufferWidthKey as String: 160,
kCVPixelBufferHeightKey as String: 160]
settingsCamera.previewPhotoFormat = previewFormat
photoOutput?.capturePhoto(with: settingsCamera, delegate: self)
}
}
extension UploadViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ captureOutput: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
if let error = error {
print("error occure : \(error.localizedDescription)")
}
if let sampleBuffer = photoSampleBuffer,
let previewBuffer = previewPhotoSampleBuffer,
let dataImage = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: sampleBuffer, previewPhotoSampleBuffer: previewBuffer) {
let dataProvider = CGDataProvider(data: dataImage as CFData)
let cgImageRef: CGImage! = CGImage(jpegDataProviderSource: dataProvider!, decode: nil, shouldInterpolate: true, intent: .defaultIntent)
var orientation = UIImageOrientation(rawValue: 0)
if isFront {
orientation = UIImageOrientation.leftMirrored
} else {
orientation = UIImageOrientation.right
}
let image = UIImage(cgImage: cgImageRef, scale: 1.0, orientation: orientation!)
// var flippedImage = UIImage(CGImage: picture.CGImage!, scale: picture.scale, orientation: .leftMirrored)
let timage = crop(image, withWidth: 100, andHeight: 100)
let photoSecondVC = self.storyboard?.instantiateViewController(withIdentifier: "uploadSecondVC") as! UploadSecondViewController
photoSecondVC.imageData = timage!
photoSecondVC.isFront = isFront
self.navigationController?.pushViewController(photoSecondVC, animated: false)
} else {
print("some error here")
}
}
}
This is what I have when the App crashes (I don't know if there is a link):
2017-08-25 16:50:36.125052+0200 Fitshare[93231:3349636] Failed to set (keyPath) user defined inspected property on (UITabBarItem): [ setValue:forUndefinedKey:]: this class is not key value coding-compliant for the key keyPath.
2017-08-25 16:50:36.125584+0200 Fitshare[93231:3349636] Failed to set (keyPath) user defined inspected property on (UITabBarItem): [ setValue:forUndefinedKey:]: this class is not key value coding-compliant for the key keyPath.
2017-08-25 16:50:36.300650+0200 Fitshare[93231:3349636] [MC] Lazy loading NSBundle MobileCoreServices.framework
2017-08-25 16:50:36.302462+0200 Fitshare[93231:3349636] [MC] Loaded MobileCoreServices.framework
2017-08-25 16:50:36.311211+0200 Fitshare[93231:3349636] [MC] System group container for systemgroup.com.apple.configurationprofiles path is /Users/kevinboirel/Library/Developer/CoreSimulator/Devices/B48E7503-47C0-4A75-AC5C-C3DEF6CC8507/data/Containers/Shared/SystemGroup/systemgroup.com.apple.configurationprofiles
2017-08-25 16:50:36.363022+0200 Fitshare[93231:3349636] [Snapshotting] Snapshotting a view (0x7fbb38f2b0c0, Fitshare.ALThreeCircleSpinner) that has not been rendered at least once requires afterScreenUpdates:YES.
2017-08-25 16:50:36.469416+0200 Fitshare[93231:3349636] +[CATransaction synchronize] called within transaction
2017-08-25 16:50:36.469800+0200 Fitshare[93231:3349636] +[CATransaction synchronize] called within transaction
And the weird fact it's that I have this message error in XCode:
You need two protocol delegates, which are: UIImagePickerControllerDelegate and UINavigationControllerDelegate
// MARK: - Global Declaration
#IBOutlet var imgProfile: UIImageView!
var imagePicker = UIImagePickerController()
// MARK: - Camera Methods
func PickingImageFromCamera()
{
let picker = UIImagePickerController()
picker.delegate = self
picker.allowsEditing = false
picker.sourceType = .camera
picker.cameraCaptureMode = .photo
present(picker, animated: true, completion: nil)
}
//----------------------------------
func imagePickerController(_ picker: UIImagePickerController,
didFinishPickingMediaWithInfo info: [String : Any])
{
if let pickedImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
imgProfile.contentMode = .scaleToFill
imgProfile.image = pickedImage
}
dismiss(animated: true, completion: nil)
}
//----------------------------------
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
dismiss(animated: true, completion: nil)
}
//----------------------------------
I think this code will help you...
It was due to the UploadSecondViewController ...
I just resized the picture sent to the second ViewController to a smaller size and the app is now ok !
Thanks for your answers !
I want to "stream" the preview layer to my server, however, I only want specific frames to be sent. Basically, I want to take a snapshot of the AVCaptureVideoPreviewLayer, scale it down to 28*28, turn it into an intensity array, and send it to my socket layer where my python backend handles the rest.
Problem here is that AVCapturePhotoOutput's capture function is insanely slow. I can't repeatedly call the function. Not to mention it always makes a camera shutter sound haha.
The other problem is that taking a snapshot of AVCaptureVideoPreviewLayer is really difficult. Using UIGraphicsBeginImageContext almost always returns a blank/clear image.
Help a brother out, thanks!
Basically instead of using AVCaptureVideoPreviewLayer for grabbing frames you should use AVCaptureVideoDataOutputSampleBufferDelegate.
Here is example:
import Foundation
import UIKit
import AVFoundation
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
let input = try! AVCaptureDeviceInput(device: device)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as AnyHashable: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Update: To process images you should implement a processCapturedImage method of the CaptureManagerDelegate protocol in any other class where you want, like:
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
#ninjaproger's answer was great! Simply writing this as a Swift 4 version of the answer for future reference.
import UIKit
import AVFoundation
var customPreviewLayer: AVCaptureVideoPreviewLayer?
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
CaptureManager.shared.statSession()
CaptureManager.shared.delegate = self
}
}
extension ViewController: CaptureManagerDelegate {
func processCapturedImage(image: UIImage) {
self.imageView.image = image
}
}
protocol CaptureManagerDelegate: class {
func processCapturedImage(image: UIImage)
}
class CaptureManager: NSObject {
internal static let shared = CaptureManager()
weak var delegate: CaptureManagerDelegate?
var session: AVCaptureSession?
override init() {
super.init()
session = AVCaptureSession()
//setup input
let device = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let input = try! AVCaptureDeviceInput(device: device!)
session?.addInput(input)
//setup output
let output = AVCaptureVideoDataOutput()
output.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session?.addOutput(output)
}
func statSession() {
session?.startRunning()
}
func stopSession() {
session?.stopRunning()
}
func getImageFromSampleBuffer(sampleBuffer: CMSampleBuffer) ->UIImage? {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height, bitsPerComponent: 8, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else {
return nil
}
guard let cgImage = context.makeImage() else {
return nil
}
let image = UIImage(cgImage: cgImage, scale: 1, orientation:.right)
CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly)
return image
}
}
extension CaptureManager: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let outputImage = getImageFromSampleBuffer(sampleBuffer: sampleBuffer) else {
return
}
delegate?.processCapturedImage(image: outputImage)
}
}
Details
Xcode 10.2.1 (10E1001), Swift 5
Features
This solution allow:
to check camera access
to select front or back camera
if no access to the camera show alert with link to the app settings page
to make o photo
to play standard capture photo sound
Solution
CameraService
import UIKit
import AVFoundation
import Vision
class CameraService: NSObject {
private weak var previewView: UIView?
private(set) var cameraIsReadyToUse = false
private let session = AVCaptureSession()
private weak var previewLayer: AVCaptureVideoPreviewLayer?
private lazy var sequenceHandler = VNSequenceRequestHandler()
private lazy var capturePhotoOutput = AVCapturePhotoOutput()
private lazy var dataOutputQueue = DispatchQueue(label: "FaceDetectionService",
qos: .userInitiated, attributes: [],
autoreleaseFrequency: .workItem)
private var captureCompletionBlock: ((UIImage) -> Void)?
private var preparingCompletionHandler: ((Bool) -> Void)?
private var snapshotImageOrientation = UIImage.Orientation.upMirrored
private var cameraPosition = AVCaptureDevice.Position.front {
didSet {
switch cameraPosition {
case .front: snapshotImageOrientation = .upMirrored
case .unspecified, .back: fallthrough
#unknown default: snapshotImageOrientation = .up
}
}
}
func prepare(previewView: UIView,
cameraPosition: AVCaptureDevice.Position,
completion: ((Bool) -> Void)?) {
self.previewView = previewView
self.preparingCompletionHandler = completion
self.cameraPosition = cameraPosition
checkCameraAccess { allowed in
if allowed { self.setup() }
completion?(allowed)
self.preparingCompletionHandler = nil
}
}
private func setup() { configureCaptureSession() }
func start() { if cameraIsReadyToUse { session.startRunning() } }
func stop() { session.stopRunning() }
}
extension CameraService {
private func askUserForCameraPermission(_ completion: ((Bool) -> Void)?) {
AVCaptureDevice.requestAccess(for: AVMediaType.video) { (allowedAccess) -> Void in
DispatchQueue.main.async { completion?(allowedAccess) }
}
}
private func checkCameraAccess(completion: ((Bool) -> Void)?) {
askUserForCameraPermission { [weak self] allowed in
guard let self = self, let completion = completion else { return }
self.cameraIsReadyToUse = allowed
if allowed {
completion(true)
} else {
self.showDisabledCameraAlert(completion: completion)
}
}
}
private func configureCaptureSession() {
guard let previewView = previewView else { return }
// Define the capture device we want to use
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: cameraPosition) else {
let error = NSError(domain: "", code: 0, userInfo: [NSLocalizedDescriptionKey : "No front camera available"])
show(error: error)
return
}
// Connect the camera to the capture session input
do {
try camera.lockForConfiguration()
defer { camera.unlockForConfiguration() }
if camera.isFocusModeSupported(.continuousAutoFocus) {
camera.focusMode = .continuousAutoFocus
}
if camera.isExposureModeSupported(.continuousAutoExposure) {
camera.exposureMode = .continuousAutoExposure
}
let cameraInput = try AVCaptureDeviceInput(device: camera)
session.addInput(cameraInput)
} catch {
show(error: error as NSError)
return
}
// Create the video data output
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
// Add the video output to the capture session
session.addOutput(videoOutput)
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
// Configure the preview layer
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.frame = previewView.bounds
previewView.layer.insertSublayer(previewLayer, at: 0)
self.previewLayer = previewLayer
}
}
extension CameraService: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard captureCompletionBlock != nil,
let outputImage = UIImage(sampleBuffer: sampleBuffer, orientation: snapshotImageOrientation) else { return }
DispatchQueue.main.async { [weak self] in
guard let self = self else { return }
if let captureCompletionBlock = self.captureCompletionBlock{
captureCompletionBlock(outputImage)
AudioServicesPlayAlertSound(SystemSoundID(1108))
}
self.captureCompletionBlock = nil
}
}
}
// Navigation
extension CameraService {
private func show(alert: UIAlertController) {
DispatchQueue.main.async {
UIApplication.topViewController?.present(alert, animated: true, completion: nil)
}
}
private func showDisabledCameraAlert(completion: ((Bool) -> Void)?) {
let alertVC = UIAlertController(title: "Enable Camera Access",
message: "Please provide access to your camera",
preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Go to Settings", style: .default, handler: { action in
guard let previewView = self.previewView,
let settingsUrl = URL(string: UIApplication.openSettingsURLString),
UIApplication.shared.canOpenURL(settingsUrl) else { return }
UIApplication.shared.open(settingsUrl) { [weak self] _ in
guard let self = self else { return }
self.prepare(previewView: previewView,
cameraPosition: self.cameraPosition,
completion: self.preparingCompletionHandler)
}
}))
alertVC.addAction(UIAlertAction(title: "Cancel", style: .cancel, handler: { _ in completion?(false) }))
show(alert: alertVC)
}
private func show(error: NSError) {
let alertVC = UIAlertController(title: "Error", message: error.localizedDescription, preferredStyle: .alert)
alertVC.addAction(UIAlertAction(title: "Ok", style: .cancel, handler: nil ))
show(alert: alertVC)
}
}
extension CameraService: AVCapturePhotoCaptureDelegate {
func capturePhoto(completion: ((UIImage) -> Void)?) { captureCompletionBlock = completion }
}
Helpers
///////////////////////////////////////////////////////////////////////////
import UIKit
import AVFoundation
extension UIImage {
convenience init?(sampleBuffer: CMSampleBuffer, orientation: UIImage.Orientation = .upMirrored) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return nil }
CVPixelBufferLockBaseAddress(pixelBuffer, .readOnly)
defer { CVPixelBufferUnlockBaseAddress(pixelBuffer, .readOnly) }
let baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer)
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
let bytesPerRow = CVPixelBufferGetBytesPerRow(pixelBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue | CGBitmapInfo.byteOrder32Little.rawValue)
guard let context = CGContext(data: baseAddress, width: width, height: height,
bitsPerComponent: 8, bytesPerRow: bytesPerRow,
space: colorSpace, bitmapInfo: bitmapInfo.rawValue) else { return nil }
guard let cgImage = context.makeImage() else { return nil }
self.init(cgImage: cgImage, scale: 1, orientation: orientation)
}
}
///////////////////////////////////////////////////////////////////////////
import UIKit
extension UIApplication {
private class func topViewController(controller: UIViewController? = UIApplication.shared.keyWindow?.rootViewController) -> UIViewController? {
if let navigationController = controller as? UINavigationController {
return topViewController(controller: navigationController.visibleViewController)
}
if let tabController = controller as? UITabBarController {
if let selected = tabController.selectedViewController {
return topViewController(controller: selected)
}
}
if let presented = controller?.presentedViewController {
return topViewController(controller: presented)
}
return controller
}
class var topViewController: UIViewController? { return topViewController() }
}
Usage
private lazy var cameraService = CameraService()
//...
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
//...
cameraService.capturePhoto { [weak self] image in
//...
}
Full sample
import UIKit
class ViewController: UIViewController {
private lazy var cameraService = CameraService()
private weak var button: UIButton?
private weak var imagePreviewView: UIImageView?
private var cameraInited = false
private enum ButtonState { case cancel, makeSnapshot }
private var buttonState = ButtonState.makeSnapshot {
didSet {
switch buttonState {
case .makeSnapshot: button?.setTitle("Make a photo", for: .normal)
case .cancel: button?.setTitle("Cancel", for: .normal)
}
}
}
override func viewDidLoad() {
super.viewDidLoad()
setupCameraPreviewView()
setupButton()
// Do any additional setup after loading the view.
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
cameraService.start()
}
override func viewDidDisappear(_ animated: Bool) {
super.viewDidDisappear(animated)
cameraService.stop()
}
// Ensure that the interface stays locked in Portrait.
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
return .portrait
}
// Ensure that the interface stays locked in Portrait.
override var preferredInterfaceOrientationForPresentation: UIInterfaceOrientation {
return .portrait
}
}
extension ViewController {
private func setupCameraPreviewView() {
let previewView = UIView(frame: .zero)
view.addSubview(previewView)
previewView.translatesAutoresizingMaskIntoConstraints = false
previewView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
previewView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
previewView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
previewView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
previewView.layoutIfNeeded()
cameraService.prepare(previewView: previewView, cameraPosition: .front) { [weak self] success in
if success { self?.cameraService.start() }
}
}
private func setupButton() {
let button = UIButton(frame: .zero)
button.addTarget(self, action: #selector(buttonTouchedUpInside), for: .touchUpInside)
view.addSubview(button)
self.button = button
buttonState = .makeSnapshot
button.translatesAutoresizingMaskIntoConstraints = false
button.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
button.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
button.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
button.heightAnchor.constraint(equalToConstant: 44).isActive = true
button.backgroundColor = UIColor.black.withAlphaComponent(0.4)
}
private func show(image: UIImage) {
let imageView = UIImageView(frame: .zero)
view.insertSubview(imageView, at: 1)
imagePreviewView = imageView
imageView.translatesAutoresizingMaskIntoConstraints = false
imageView.topAnchor.constraint(equalTo: view.topAnchor).isActive = true
imageView.leftAnchor.constraint(equalTo: view.leftAnchor).isActive = true
imageView.rightAnchor.constraint(equalTo: view.rightAnchor).isActive = true
imageView.bottomAnchor.constraint(equalTo: view.bottomAnchor).isActive = true
imageView.image = image
}
#objc func buttonTouchedUpInside() {
switch buttonState {
case .makeSnapshot:
cameraService.capturePhoto { [weak self] image in
guard let self = self else {return }
self.cameraService.stop()
self.buttonState = .cancel
self.show(image: image)
}
case .cancel:
buttonState = .makeSnapshot
cameraService.start()
imagePreviewView?.removeFromSuperview()
}
}
}
I am 100% sure this has been answered but the answers I am finding I cannot seem to make fit into my solution.
Starting from the beginning I have my ShareViewController where I have two imageViews put one on one, and in order to make it one image I am taking a screenshot of that imageView size. The code for taking screenshot I have taken here iOS: what's the fastest, most performant way to make a screenshot programmatically?
Here is my class:
import UIKit
import Photos
class ShareViewController: UIViewController, UIDocumentInteractionControllerDelegate {
var defaultImage: UIImage? = nil
var documentController: UIDocumentInteractionController!
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var overlayImageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
guard let image = defaultImage else {
return
}
self.imageView.image = image
self.imageView.contentMode = .ScaleAspectFill
let subimage1 = UIImage(named: "overlay")
self.overlayImageView.image = subimage1
self.imageView.clipsToBounds = true
}
#IBAction func shareButton(sender: UIButton) {
self.screenshot()
self.shareToInstagram()
_ = self.imageView.image!
let fetchOptions = PHFetchOptions()
fetchOptions.sortDescriptors = [NSSortDescriptor(key: "creationDate", ascending: false)]
let fetchResult = PHAsset.fetchAssetsWithMediaType(.Image, options: fetchOptions)
if let lastAsset = fetchResult.firstObject as? PHAsset {
let localIdentifier = lastAsset.localIdentifier
let u = "instagram://library?LocalIdentifier=" + localIdentifier
_ = NSURL(string: u)!
}
}
// MARK: - Make Screenshot of ImageView
class func screenshot() -> UIImage {
var imageSize = CGSizeZero
let orientation = UIApplication.sharedApplication().statusBarOrientation
if UIInterfaceOrientationIsPortrait(orientation) {
imageSize = UIScreen.mainScreen().bounds.size
} else {
imageSize = CGSize(width: UIScreen.mainScreen().bounds.size.height, height: UIScreen.mainScreen().bounds.size.width)
}
UIGraphicsBeginImageContextWithOptions(imageSize, false, 0)
let context = UIGraphicsGetCurrentContext()
for window in UIApplication.sharedApplication().windows {
CGContextSaveGState(context)
CGContextTranslateCTM(context, window.center.x, window.center.y)
CGContextConcatCTM(context, window.transform)
CGContextTranslateCTM(context, -window.bounds.size.width * window.layer.anchorPoint.x, -window.bounds.size.height * window.layer.anchorPoint.y)
if orientation == .LandscapeLeft {
CGContextRotateCTM(context, CGFloat(M_PI_2))
CGContextTranslateCTM(context, 0, -imageSize.width)
} else if orientation == .LandscapeRight {
CGContextRotateCTM(context, -CGFloat(M_PI_2))
CGContextTranslateCTM(context, -imageSize.height, 0)
} else if orientation == .PortraitUpsideDown {
CGContextRotateCTM(context, CGFloat(M_PI))
CGContextTranslateCTM(context, -imageSize.width, -imageSize.height)
}
if window.respondsToSelector(#selector(UIView.drawViewHierarchyInRect(_:afterScreenUpdates:))) {
window.drawViewHierarchyInRect(window.bounds, afterScreenUpdates: true)
} else if let context = context {
window.layer.renderInContext(context)
}
CGContextRestoreGState(context)
}
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
// MARK: - Share photo to Instagram
func displayAlert(title: String, message: String) {
let alertController = UIAlertController(title: title, message: message, preferredStyle: .Alert)
alertController.addAction(UIAlertAction(title: "OK", style: .Default, handler: nil))
presentViewController(alertController, animated: true, completion: nil)
return
}
func displayShareSheet(shareContent:String) {
let activityViewController = UIActivityViewController(activityItems: [shareContent as NSString], applicationActivities: nil)
presentViewController(activityViewController, animated: true, completion: {})
}
func shareToInstagram() {
let instagramURL = NSURL(string: "instagram://app")
if (UIApplication.sharedApplication().canOpenURL(instagramURL!)) {
let imageData = UIImageJPEGRepresentation(self.imageView.image!, 100)
let captionString = "caption"
let writePath = (NSTemporaryDirectory() as NSString).stringByAppendingPathComponent("instagram.igo")
if imageData?.writeToFile(writePath, atomically: true) == false {
return
} else {
let fileURL = NSURL(fileURLWithPath: writePath)
self.documentController = UIDocumentInteractionController(URL: fileURL)
self.documentController.delegate = self
self.documentController.UTI = "com.instagram.exlusivegram"
self.documentController.annotation = NSDictionary(object: captionString, forKey: "InstagramCaption")
self.documentController.presentOpenInMenuFromRect(self.view.frame, inView: self.view, animated: true)
}
} else {
print(" Instagram isn't installed ")
}
}
}
Now when I try to call screenshot() method it says that
Static member 'screenshot' cannot be used on instance of type
'ShareViewController'
How can I fix this and successfully take screenshot of my imageViews?