Updating published variables using DispatchQueue.main.async in SwiftUI - ios

I am working on a SwiftUI app that displays an AVCaptureVideoPreviewLayer and also implements the AVCaptureVideoDataOutputSampleBufferDelegate protocol to perform some custom logic in captureOutput(_: didOutput: from:). The custom logic was working as expected and updating my view as expected until I implemented the video preview layer.
Now, only the video preview layer is updated within the view. Both the video preview layer and the update to the published variable occur within a call to DispatchQueue.main.async. Is this appropriate?
I also have a suspicion that I may need to implement some logic within the updateUIViewController(_: context:) function within the UIViewControllerRepresentable struct I am using to display the video peview layer in my view. The docs provided for this function are not very helpful, can anyone provide any tips on how it should be used?
class VideoStream: UIViewController, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
#Published var luminosityReading : Double = 0.0
...
// AVCaptureSession configuration entered, input added, establish preview layer:
// Currently working on DispatchQueue(label: "VideoStreamSetupQueue")
layer = AVCaptureVideoPreviewLayer(session: session)
...
DispatchQueue.main.async {
self.view.layer.addSublayer(self.layer)
}
// Establish output for luminosity calculation
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoStreamForCaptureOutputQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
...
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// custom logic to calculate luminosity
DispatchQueue.main.async {
print(luminosity) // value changes as expected
self.luminosityReading = luminosity // view not updated with printed value
}
}
Establishing a UIViewControllerRepresentable to display video preview layer in a SwiftUI view:
struct HostedVideoPreviewLayer: UIViewControllerRepresentable {
func makeUIViewController(context: Context) -> some UIViewController {
return VideoStream()
}
func updateUIViewController(_ uiViewController: UIViewControllerType, context: Context) {
// video preview layer works as expected
// text unrelated to this struct (see below) is not updating
}
}
Creating the view:
struct ContentView: View {
#StateObject var videoStream = VideoStream()
var body: some View {
VStack {
HostedVideoPreviewLayer()
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
.font(.largeTitle)
.padding()
}
}
}
Minimal Reproducible Example:
import Foundation
import UIKit
import AVKit
import AVFoundation
import SwiftUI
struct ContentView: View {
#StateObject var videoStream = VideoStream()
var body: some View {
VStack {
HostedVideoPreviewLayer()
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
.font(.largeTitle)
.padding()
}
}
}
class VideoStream: UIViewController, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
#Published var luminosityReading : Double = 0.0
private let session = AVCaptureSession()
private let queue = DispatchQueue(label: "VideoStreamSetupQueue")
private var layer = AVCaptureVideoPreviewLayer()
var screenRect: CGRect!
override func viewDidLoad() {
authorizeCapture()
queue.async {
self.authorizeCapture()
}
}
func authorizeCapture() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized: // The user has previously granted access to the camera.
beginCapture()
case .notDetermined: // The user has not yet been asked for camera access.
queue.suspend()
AVCaptureDevice.requestAccess(for: .video) { granted in
if granted {
self.beginCapture()
self.queue.resume()
}
}
default:
return
}
}
func beginCapture() {
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(for: .video)
// Add device as input
guard
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!),
session.canAddInput(videoDeviceInput)
else {
print("Camera selection failed")
return
}
session.addInput(videoDeviceInput)
// Establish preview layer
screenRect = UIScreen.main.bounds
layer = AVCaptureVideoPreviewLayer(session: session)
layer.frame = CGRect(x: 0, y: 0, width: screenRect.size.width, height: 300)
layer.videoGravity = AVLayerVideoGravity.resizeAspectFill
layer.connection?.videoOrientation = .portrait
DispatchQueue.main.async {
self.view.layer.addSublayer(self.layer)
}
// Establish output for luminosity calculation
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoStreamForCaptureOutputQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
}
// From: https://stackoverflow.com/questions/41921326/how-to-get-light-value-from-avfoundation/46842115#46842115
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Retrieving EXIF data of camara frame buffer
let rawMetadata = CMCopyDictionaryOfAttachments(allocator: nil, target: sampleBuffer, attachmentMode: CMAttachmentMode(kCMAttachmentMode_ShouldPropagate))
let metadata = CFDictionaryCreateMutableCopy(nil, 0, rawMetadata) as NSMutableDictionary
let exifData = metadata.value(forKey: "{Exif}") as? NSMutableDictionary
let FNumber : Double = exifData?["FNumber"] as! Double
let ExposureTime : Double = exifData?["ExposureTime"] as! Double
let ISOSpeedRatingsArray = exifData!["ISOSpeedRatings"] as? NSArray
let ISOSpeedRatings : Double = ISOSpeedRatingsArray![0] as! Double
let CalibrationConstant : Double = 50
//Calculating the luminosity
let luminosity : Double = (CalibrationConstant * FNumber * FNumber ) / ( ExposureTime * ISOSpeedRatings )
DispatchQueue.main.async {
print(luminosity) // value changes as expected
self.luminosityReading = luminosity // view not updated with recent value
}
}
override func willTransition(to newCollection: UITraitCollection, with coordinator: UIViewControllerTransitionCoordinator) {
screenRect = UIScreen.main.bounds
layer.frame = CGRect(x: 0, y: 0, width: screenRect.size.width, height: screenRect.size.height)
switch UIDevice.current.orientation {
// Home button on top
case UIDeviceOrientation.portraitUpsideDown:
layer.connection?.videoOrientation = .portraitUpsideDown
// Home button on right
case UIDeviceOrientation.landscapeLeft:
layer.connection?.videoOrientation = .landscapeRight
// Home button on left
case UIDeviceOrientation.landscapeRight:
layer.connection?.videoOrientation = .landscapeLeft
// Home button at bottom
case UIDeviceOrientation.portrait:
layer.connection?.videoOrientation = .portrait
default:
break
}
}
}
struct HostedVideoPreviewLayer: UIViewControllerRepresentable {
func makeUIViewController(context: Context) -> some UIViewController {
return VideoStream()
}
func updateUIViewController(_ uiViewController: UIViewControllerType, context: Context) {
// video preview layer works as expected
// text unrelated to this struct is not updating
}
}
[1]: https://developer.apple.com/documentation/swiftui/uiviewcontrollerrepresentable/updateuiviewcontroller(_:context:)

My working solution instead passes the AVCaptureSession created in VideoStream as a parameter to a custom view VideoPreviewHolder. I use a state object to ensure the session is available (if not, a progress indicator is displayed) and then display the preview layer. I hope this may be useful to others:
class VideoPreview: UIView {
private var session: AVCaptureSession!
init(runningSession session: AVCaptureSession) {
super.init(frame: .zero)
self.session = session
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
override func didMoveToSuperview() {
super.didMoveToSuperview()
if self.superview != nil {
self.videoPreviewLayer.session = self.session
self.videoPreviewLayer.videoGravity = .resizeAspect
}
}
}
struct VideoPreviewHolder: UIViewRepresentable {
public var runningSession: AVCaptureSession
typealias UIViewType = VideoPreview
func makeUIView(context: Context) -> VideoPreview {
VideoPreview(runningSession: runningSession)
}
func updateUIView(_ uiView: VideoPreview, context: Context) {
}
}
struct ContentView: View {
#StateObject var videoStream = VideoStream() // this class definition is in original question body
var body: some View {
if (!videoStream.cameraAccess) {
// request access
} else {
NavigationView {
VStack {
if (videoStream.session != nil) {
VideoPreviewHolder(runningSession: videoStream.session)
.frame(minWidth: 0, idealWidth: .infinity, maxWidth: .infinity, minHeight: 0, idealHeight: .infinity, maxHeight: .infinity, alignment: .center)
} else {
ProgressView()
}
...
}
}
}
}

Related

How to modify FPS, Bit rate and resolution for the video programmatically in iOS?

I am implementing the functionality for to record the video in my iOS application,
Also, i am using ReplayKit to record a full screen instead of the camera's default capturing
In that there is a requirement for customizing,
(1) Resolution
(2) FPS (frames)
(3) Bit Rate
to implement the above functionalities i am currently working on (1) Resolution and (2) FPS.
For this i have set the resolution and FPS as coded below.
class PreviewView: UIView {
private var captureSession: AVCaptureSession?
private var shakeCountDown: Timer?
let videoFileOutput = AVCaptureMovieFileOutput()
var recordingDelegate:AVCaptureFileOutputRecordingDelegate!
var recorded = 0
var secondsToReachGoal = 30
var videoDevice: AVCaptureDevice?
var onRecord: ((Int, Int)->())?
var onReset: (() -> ())?
var onComplete: (() -> ())?
//MARK:- Screen Recording Variables
let recorder = RPScreenRecorder.shared()
var isRecording = false
init() {
super.init(frame: .zero)
var allowedAccess = false
let blocker = DispatchGroup()
blocker.enter()
AVCaptureDevice.requestAccess(for: .video) { flag in
allowedAccess = flag
blocker.leave()
}
blocker.wait()
recorder.isMicrophoneEnabled = true
if !allowedAccess {
print("!!! NO ACCESS TO CAMERA")
return
}
// setup session
let session = AVCaptureSession()
session.beginConfiguration()
videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video, position: .back)
guard videoDevice != nil, let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!), session.canAddInput(videoDeviceInput) else {
print("!!! NO CAMERA DETECTED")
return
}
session.addInput(videoDeviceInput)
session.commitConfiguration()
self.captureSession = session
//MARK: Test Cases
//Setup the resolution
captureSession?.sessionPreset = AVCaptureSession.Preset.inputPriority
// Setup the frame
videoDevice?.set(frameRate: 20) // 1 to 30 FPS
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
override func didMoveToSuperview() {
super.didMoveToSuperview()
recordingDelegate = self
} }
and, to set frame rates (FPS) i have created one extension as below:
extension AVCaptureDevice {
func set(frameRate: Double) {
var isFPSSupported = false
do {
let supportedFrameRange = activeFormat.videoSupportedFrameRateRanges
for range in supportedFrameRange {
if (range.maxFrameRate >= Double(frameRate) && range.minFrameRate <= Double(frameRate)) {
isFPSSupported = true
break
}
}
if isFPSSupported {
try lockForConfiguration()
activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: Int32(frameRate))
activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: Int32(frameRate))
unlockForConfiguration()
}
} catch {
print("lockForConfiguration error: \(error.localizedDescription)")
}
}
}
I have checked the scenario for preset the session like,
(1) AVCaptureSession.Preset.inputPriority
(2) AVCaptureSession.Preset.hd1280x720
etc...
But, i got result on saved video like screenshots below:
As screenshot describe the FPS is 59.88 FPS which is not same as what we set through the code, as in code i have set 20 as FPS.
and the second question is how we can set the resolution?
Because in all the preset session scenarios it's taking always the resolution like,
828 x 1792
How can we achieve this?
Any help would be appreciable
Thanks in advance

How to handle a device rotation for AVCaptureVideoPreviewLayer?

I have a simple camera preview implementation:
import SwiftUI
import AVFoundation
struct CameraView: View {
#StateObject var model = CameraModel()
var body: some View {
CameraPreview(camera: model)
.safeAreaInset(edge: .bottom, alignment: .center, spacing: 0) {
Color.clear
.frame(height: 0)
.background(Material.bar)
}
.ignoresSafeArea(.all, edges: .top)
.onAppear() {
model.check()
}
}
}
struct CameraPreview: UIViewRepresentable {
#ObservedObject var camera: CameraModel
func makeUIView(context: Context) -> some UIView {
let view = UIView(frame: UIScreen.main.bounds)
camera.preview = AVCaptureVideoPreviewLayer(session: camera.session)
camera.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
camera.preview.frame = view.frame
view.layer.addSublayer(camera.preview)
camera.start()
return view
}
func updateUIView(_ uiView: UIViewType, context: Context) {
}
}
struct CameraView_Previews: PreviewProvider {
static var previews: some View {
CameraView()
}
}
class CameraModel: ObservableObject {
#Published var session = AVCaptureSession()
#Published var alert = false
#Published var preview: AVCaptureVideoPreviewLayer!
func check() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
setUp()
break
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { (status) in
if status {
self.setUp()
}
}
break
case .denied:
self.alert.toggle()
break
default:
break
}
}
func setUp() {
do {
self.session.beginConfiguration()
let device = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
let input = try AVCaptureDeviceInput(device: device!)
if self.session.canAddInput(input) {
self.session.addInput(input)
}
self.session.commitConfiguration()
}
catch {
print(error.localizedDescription)
}
}
func start() {
self.session.startRunning()
}
}
The problem is that it doesn't handle screen rotations:
I found similar topics, for example, this one, but I am a noobie in iOS development, I can't even understand where to put this solution. I've checked neither View, nor UIViewRepresentable have such methods to override.
How to handle screen rotation in AVCaptureVideoPreviewLayer?
If you want to update layer frame in rotation, you need to create custom UIView and override layoutSubviews(). Inside layoutSubviews(), you need to update frame for sublayers.
The code will be as below.
struct CameraPreview: UIViewRepresentable {
#ObservedObject var camera: CameraModel
class LayerView: UIView {
override func layoutSubviews() {
super.layoutSubviews()
// To disable default animation of layer. You can comment out those lines with `CATransaction` if you want to include
CATransaction.begin()
CATransaction.setDisableActions(true)
layer.sublayers?.forEach({ layer in
layer.frame = frame
})
CATransaction.commit()
}
}
func makeUIView(context: Context) -> some UIView {
let view = LayerView()
camera.preview = AVCaptureVideoPreviewLayer(session: camera.session)
camera.preview.frame = view.frame
camera.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
view.layer.addSublayer(camera.preview)
camera.session.startRunning()
return view
}
func updateUIView(_ uiView: UIViewType, context: Context) {
}
}
This is a working variant with video rotation based on Dscyre Scotti'es answer:
struct CameraView: View {
#StateObject var model = CameraModel()
var body: some View {
CameraPreview(camera: model)
.safeAreaInset(edge: .bottom, alignment: .center, spacing: 0) {
Color.clear
.frame(height: 0)
.background(Material.bar)
}
.ignoresSafeArea(.all, edges: [.top, .horizontal])
.onAppear() {
model.check()
}
}
}
struct CameraPreview: UIViewRepresentable {
#ObservedObject var camera: CameraModel
class LayerView: UIView {
var parent: CameraPreview! = nil
override func layoutSubviews() {
super.layoutSubviews()
// To disable default animation of layer. You can comment out those lines with `CATransaction` if you want to include
CATransaction.begin()
CATransaction.setDisableActions(true)
layer.sublayers?.forEach({ layer in
layer.frame = UIScreen.main.bounds
})
self.parent.camera.rotate(orientation: UIDevice.current.orientation)
CATransaction.commit()
}
}
func makeUIView(context: Context) -> some UIView {
let view = LayerView()
view.parent = self
camera.preview = AVCaptureVideoPreviewLayer(session: camera.session)
camera.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
camera.preview.frame = view.frame
view.layer.addSublayer(camera.preview)
camera.start()
return view
}
func updateUIView(_ uiView: UIViewType, context: Context) {
}
}
struct CameraView_Previews: PreviewProvider {
static var previews: some View {
CameraView()
}
}
class CameraModel: ObservableObject {
#Published var session = AVCaptureSession()
#Published var alert = false
#Published var preview: AVCaptureVideoPreviewLayer!
func check() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
setUp()
break
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { (status) in
if status {
self.setUp()
}
}
break
case .denied:
self.alert.toggle()
break
default:
break
}
}
func setUp() {
do {
self.session.beginConfiguration()
let device = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
let input = try AVCaptureDeviceInput(device: device!)
if self.session.canAddInput(input) {
self.session.addInput(input)
}
self.session.commitConfiguration()
}
catch {
print(error.localizedDescription)
}
}
func start() {
self.session.startRunning()
}
func rotate(orientation: UIDeviceOrientation) {
let videoConnection = self.preview.connection
switch orientation {
case .portraitUpsideDown:
videoConnection?.videoOrientation = .portraitUpsideDown
case .landscapeLeft:
videoConnection?.videoOrientation = .landscapeRight
case .landscapeRight:
videoConnection?.videoOrientation = .landscapeLeft
case .faceDown:
videoConnection?.videoOrientation = .portraitUpsideDown
default:
videoConnection?.videoOrientation = .portrait
}
}
}

Setting Apple SSO button dimensions in SwiftUI

I can't figure out how to make this apple sign-in button wider and taller. no matter where I try to add .frame(width: .... it just seems to move the button around the screen. But, does not alter the dimensions of the button itself.
This is in ContentView.swift:
struct ContentView : View {
#State var credentials: CredentialsOrError?
var body: some View {
VStack {
if $credentials.wrappedValue == nil {
SignInWithAppleButton(credentials: $credentials)
}
else if $credentials.wrappedValue!.isSuccess
{
Text("User: \($credentials.wrappedValue!.values!.user)")
Text("Given name: \($credentials.wrappedValue!.values?.givenName ?? "")")
Text("Family name: \($credentials.wrappedValue!.values?.familyName ?? "")")
Text("Email: \($credentials.wrappedValue!.values?.email ?? "")")
}
else {
Text($credentials.wrappedValue!.error!.localizedDescription).foregroundColor(.red)
}
}.fullScreenCover(isPresented: .constant($credentials.wrappedValue != nil && $credentials.wrappedValue!.isSuccess) , content: {
HomeView()
})
}
}
This is from SignInWithAppleButton.swift:
struct SignInWithAppleButton: View {
#Binding var credentials: CredentialsOrError?
var body: some View {
let button = ButtonController(credentials: $credentials)
return button
.frame(width: button.button.frame.width, height: button.button.frame.height, alignment: .center)
}
struct ButtonController: UIViewControllerRepresentable {
let button: ASAuthorizationAppleIDButton = ASAuthorizationAppleIDButton()
let vc: UIViewController = UIViewController()
#Binding var credentials: CredentialsOrError?
func makeCoordinator() -> Coordinator {
return Coordinator(self)
}
func makeUIViewController(context: Context) -> UIViewController {
vc.view.addSubview(button)
return vc
}
func updateUIViewController(_ uiViewController: UIViewController, context: Context) { }
class Coordinator: NSObject, ASAuthorizationControllerDelegate, ASAuthorizationControllerPresentationContextProviding {
let parent: ButtonController
init(_ parent: ButtonController) {
self.parent = parent
super.init()
parent.button.addTarget(self, action: #selector(didTapButton), for: .touchUpInside)
}
#objc func didTapButton() {
let appleIDProvider = ASAuthorizationAppleIDProvider()
let request = appleIDProvider.createRequest()
request.requestedScopes = [.fullName, .email]
let authorizationController = ASAuthorizationController(authorizationRequests: [request])
authorizationController.presentationContextProvider = self
authorizationController.delegate = self
authorizationController.performRequests()
}
func presentationAnchor(for controller: ASAuthorizationController) -> ASPresentationAnchor {
return parent.vc.view.window!
}
func authorizationController(controller: ASAuthorizationController, didCompleteWithAuthorization authorization: ASAuthorization) {
guard let credentials = authorization.credential as? ASAuthorizationAppleIDCredential else {
parent.credentials = .error("Credentials are not of type ASAuthorizationAppleIDCredential")
return
}
parent.credentials = .credentials(user: credentials.user, givenName: credentials.fullName?.givenName, familyName: credentials.fullName?.familyName, email: credentials.email)
}
func authorizationController(controller: ASAuthorizationController, didCompleteWithError error: Error) {
parent.credentials = .error(error)
}
}
}
}
Attached is a picture of what it looks like currently on my dark mode iPhone and I've also attached a picture of the light mode emulator.
Related question: How can I hard code the background of the screen to be white?
In the emulator:
On my dark mode iPhone:

Live Face Detection- Capture only rectangle part as image

Detect and track faces from the selfie cam feed in real time. I could get that based on source :- https://developer.apple.com/documentation/vision/tracking_the_user_s_face_in_real_time. The following image show that rectangle will be placed in face,
As you can see the red rectangular part of screen, I need to capture only the inside part of rectangle as image and save not the full screen as image. How could I get that?
I have tried with some other source which gives me only the full screen as image but not the rectangular part.
The source code for Live face detection is,
import UIKit
import AVKit
import Vision
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
// Main view for showing camera content.
#IBOutlet weak var previewView: UIView?
// AVCapture variables to hold sequence data
var session: AVCaptureSession?
var previewLayer: AVCaptureVideoPreviewLayer?
var videoDataOutput: AVCaptureVideoDataOutput?
var videoDataOutputQueue: DispatchQueue?
var captureDevice: AVCaptureDevice?
var captureDeviceResolution: CGSize = CGSize()
// Layer UI for drawing Vision results
var rootLayer: CALayer?
var detectionOverlayLayer: CALayer?
var detectedFaceRectangleShapeLayer: CAShapeLayer?
var detectedFaceLandmarksShapeLayer: CAShapeLayer?
// Vision requests
private var detectionRequests: [VNDetectFaceRectanglesRequest]?
private var trackingRequests: [VNTrackObjectRequest]?
lazy var sequenceRequestHandler = VNSequenceRequestHandler()
// MARK: UIViewController overrides
override func viewDidLoad() {
super.viewDidLoad()
self.session = self.setupAVCaptureSession()
self.prepareVisionRequest()
self.session?.startRunning()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
// Ensure that the interface stays locked in Portrait.
override var supportedInterfaceOrientations: UIInterfaceOrientationMask {
return .portrait
}
// Ensure that the interface stays locked in Portrait.
override var preferredInterfaceOrientationForPresentation: UIInterfaceOrientation {
return .portrait
}
// MARK: AVCapture Setup
/// - Tag: CreateCaptureSession
fileprivate func setupAVCaptureSession() -> AVCaptureSession? {
let captureSession = AVCaptureSession()
do {
let inputDevice = try self.configureFrontCamera(for: captureSession)
self.configureVideoDataOutput(for: inputDevice.device, resolution: inputDevice.resolution, captureSession: captureSession)
self.designatePreviewLayer(for: captureSession)
return captureSession
} catch let executionError as NSError {
self.presentError(executionError)
} catch {
self.presentErrorAlert(message: "An unexpected failure has occured")
}
self.teardownAVCapture()
return nil
}
/// - Tag: ConfigureDeviceResolution
fileprivate func highestResolution420Format(for device: AVCaptureDevice) -> (format: AVCaptureDevice.Format, resolution: CGSize)? {
var highestResolutionFormat: AVCaptureDevice.Format? = nil
var highestResolutionDimensions = CMVideoDimensions(width: 0, height: 0)
for format in device.formats {
let deviceFormat = format as AVCaptureDevice.Format
let deviceFormatDescription = deviceFormat.formatDescription
if CMFormatDescriptionGetMediaSubType(deviceFormatDescription) == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange {
let candidateDimensions = CMVideoFormatDescriptionGetDimensions(deviceFormatDescription)
if (highestResolutionFormat == nil) || (candidateDimensions.width > highestResolutionDimensions.width) {
highestResolutionFormat = deviceFormat
highestResolutionDimensions = candidateDimensions
}
}
}
if highestResolutionFormat != nil {
let resolution = CGSize(width: CGFloat(highestResolutionDimensions.width), height: CGFloat(highestResolutionDimensions.height))
return (highestResolutionFormat!, resolution)
}
return nil
}
fileprivate func configureFrontCamera(for captureSession: AVCaptureSession) throws -> (device: AVCaptureDevice, resolution: CGSize) {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .front)
if let device = deviceDiscoverySession.devices.first {
if let deviceInput = try? AVCaptureDeviceInput(device: device) {
if captureSession.canAddInput(deviceInput) {
captureSession.addInput(deviceInput)
}
if let highestResolution = self.highestResolution420Format(for: device) {
try device.lockForConfiguration()
device.activeFormat = highestResolution.format
device.unlockForConfiguration()
return (device, highestResolution.resolution)
}
}
}
throw NSError(domain: "ViewController", code: 1, userInfo: nil)
}
/// - Tag: CreateSerialDispatchQueue
fileprivate func configureVideoDataOutput(for inputDevice: AVCaptureDevice, resolution: CGSize, captureSession: AVCaptureSession) {
let videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.alwaysDiscardsLateVideoFrames = true
// Create a serial dispatch queue used for the sample buffer delegate as well as when a still image is captured.
// A serial dispatch queue must be used to guarantee that video frames will be delivered in order.
let videoDataOutputQueue = DispatchQueue(label: "com.example.apple-samplecode.VisionFaceTrack")
videoDataOutput.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
if captureSession.canAddOutput(videoDataOutput) {
captureSession.addOutput(videoDataOutput)
}
videoDataOutput.connection(with: .video)?.isEnabled = true
if let captureConnection = videoDataOutput.connection(with: AVMediaType.video) {
if captureConnection.isCameraIntrinsicMatrixDeliverySupported {
captureConnection.isCameraIntrinsicMatrixDeliveryEnabled = true
}
}
self.videoDataOutput = videoDataOutput
self.videoDataOutputQueue = videoDataOutputQueue
self.captureDevice = inputDevice
self.captureDeviceResolution = resolution
}
/// - Tag: DesignatePreviewLayer
fileprivate func designatePreviewLayer(for captureSession: AVCaptureSession) {
let videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.previewLayer = videoPreviewLayer
videoPreviewLayer.name = "CameraPreview"
videoPreviewLayer.backgroundColor = UIColor.black.cgColor
videoPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
if let previewRootLayer = self.previewView?.layer {
self.rootLayer = previewRootLayer
previewRootLayer.masksToBounds = true
videoPreviewLayer.frame = previewRootLayer.bounds
previewRootLayer.addSublayer(videoPreviewLayer)
}
}
// Removes infrastructure for AVCapture as part of cleanup.
fileprivate func teardownAVCapture() {
self.videoDataOutput = nil
self.videoDataOutputQueue = nil
if let previewLayer = self.previewLayer {
previewLayer.removeFromSuperlayer()
self.previewLayer = nil
}
}
// MARK: Helper Methods for Error Presentation
fileprivate func presentErrorAlert(withTitle title: String = "Unexpected Failure", message: String) {
let alertController = UIAlertController(title: title, message: message, preferredStyle: .alert)
self.present(alertController, animated: true)
}
fileprivate func presentError(_ error: NSError) {
self.presentErrorAlert(withTitle: "Failed with error \(error.code)", message: error.localizedDescription)
}
// MARK: Helper Methods for Handling Device Orientation & EXIF
fileprivate func radiansForDegrees(_ degrees: CGFloat) -> CGFloat {
return CGFloat(Double(degrees) * Double.pi / 180.0)
}
func exifOrientationForDeviceOrientation(_ deviceOrientation: UIDeviceOrientation) -> CGImagePropertyOrientation {
switch deviceOrientation {
case .portraitUpsideDown:
return .rightMirrored
case .landscapeLeft:
return .downMirrored
case .landscapeRight:
return .upMirrored
default:
return .leftMirrored
}
}
func exifOrientationForCurrentDeviceOrientation() -> CGImagePropertyOrientation {
return exifOrientationForDeviceOrientation(UIDevice.current.orientation)
}
// MARK: Performing Vision Requests
/// - Tag: WriteCompletionHandler
fileprivate func prepareVisionRequest() {
//self.trackingRequests = []
var requests = [VNTrackObjectRequest]()
let faceDetectionRequest = VNDetectFaceRectanglesRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceDetection error: \(String(describing: error)).")
}
guard let faceDetectionRequest = request as? VNDetectFaceRectanglesRequest,
let results = faceDetectionRequest.results as? [VNFaceObservation] else {
return
}
DispatchQueue.main.async {
// Add the observations to the tracking list
for observation in results {
let faceTrackingRequest = VNTrackObjectRequest(detectedObjectObservation: observation)
requests.append(faceTrackingRequest)
}
self.trackingRequests = requests
}
})
// Start with detection. Find face, then track it.
self.detectionRequests = [faceDetectionRequest]
self.sequenceRequestHandler = VNSequenceRequestHandler()
self.setupVisionDrawingLayers()
}
// MARK: Drawing Vision Observations
fileprivate func setupVisionDrawingLayers() {
let captureDeviceResolution = self.captureDeviceResolution
let captureDeviceBounds = CGRect(x: 0,
y: 0,
width: captureDeviceResolution.width,
height: captureDeviceResolution.height)
let captureDeviceBoundsCenterPoint = CGPoint(x: captureDeviceBounds.midX,
y: captureDeviceBounds.midY)
let normalizedCenterPoint = CGPoint(x: 0.5, y: 0.5)
guard let rootLayer = self.rootLayer else {
self.presentErrorAlert(message: "view was not property initialized")
return
}
let overlayLayer = CALayer()
overlayLayer.name = "DetectionOverlay"
overlayLayer.masksToBounds = true
overlayLayer.anchorPoint = normalizedCenterPoint
overlayLayer.bounds = captureDeviceBounds
overlayLayer.position = CGPoint(x: rootLayer.bounds.midX, y: rootLayer.bounds.midY)
let faceRectangleShapeLayer = CAShapeLayer()
faceRectangleShapeLayer.name = "RectangleOutlineLayer"
faceRectangleShapeLayer.bounds = captureDeviceBounds
faceRectangleShapeLayer.anchorPoint = normalizedCenterPoint
faceRectangleShapeLayer.position = captureDeviceBoundsCenterPoint
faceRectangleShapeLayer.fillColor = nil
faceRectangleShapeLayer.strokeColor = UIColor.green.withAlphaComponent(0.7).cgColor
faceRectangleShapeLayer.lineWidth = 5
faceRectangleShapeLayer.shadowOpacity = 0.7
faceRectangleShapeLayer.shadowRadius = 5
let faceLandmarksShapeLayer = CAShapeLayer()
faceLandmarksShapeLayer.name = "FaceLandmarksLayer"
faceLandmarksShapeLayer.bounds = captureDeviceBounds
faceLandmarksShapeLayer.anchorPoint = normalizedCenterPoint
faceLandmarksShapeLayer.position = captureDeviceBoundsCenterPoint
faceLandmarksShapeLayer.fillColor = nil
faceLandmarksShapeLayer.strokeColor = UIColor.yellow.withAlphaComponent(0.7).cgColor
faceLandmarksShapeLayer.lineWidth = 3
faceLandmarksShapeLayer.shadowOpacity = 0.7
faceLandmarksShapeLayer.shadowRadius = 5
overlayLayer.addSublayer(faceRectangleShapeLayer)
faceRectangleShapeLayer.addSublayer(faceLandmarksShapeLayer)
rootLayer.addSublayer(overlayLayer)
self.detectionOverlayLayer = overlayLayer
self.detectedFaceRectangleShapeLayer = faceRectangleShapeLayer
self.detectedFaceLandmarksShapeLayer = faceLandmarksShapeLayer
self.updateLayerGeometry()
}
fileprivate func updateLayerGeometry() {
guard let overlayLayer = self.detectionOverlayLayer,
let rootLayer = self.rootLayer,
let previewLayer = self.previewLayer
else {
return
}
CATransaction.setValue(NSNumber(value: true), forKey: kCATransactionDisableActions)
let videoPreviewRect = previewLayer.layerRectConverted(fromMetadataOutputRect: CGRect(x: 0, y: 0, width: 1, height: 1))
var rotation: CGFloat
var scaleX: CGFloat
var scaleY: CGFloat
// Rotate the layer into screen orientation.
switch UIDevice.current.orientation {
case .portraitUpsideDown:
rotation = 180
scaleX = videoPreviewRect.width / captureDeviceResolution.width
scaleY = videoPreviewRect.height / captureDeviceResolution.height
case .landscapeLeft:
rotation = 90
scaleX = videoPreviewRect.height / captureDeviceResolution.width
scaleY = scaleX
case .landscapeRight:
rotation = -90
scaleX = videoPreviewRect.height / captureDeviceResolution.width
scaleY = scaleX
default:
rotation = 0
scaleX = videoPreviewRect.width / captureDeviceResolution.width
scaleY = videoPreviewRect.height / captureDeviceResolution.height
}
// Scale and mirror the image to ensure upright presentation.
let affineTransform = CGAffineTransform(rotationAngle: radiansForDegrees(rotation))
.scaledBy(x: scaleX, y: -scaleY)
overlayLayer.setAffineTransform(affineTransform)
// Cover entire screen UI.
let rootLayerBounds = rootLayer.bounds
overlayLayer.position = CGPoint(x: rootLayerBounds.midX, y: rootLayerBounds.midY)
}
fileprivate func addPoints(in landmarkRegion: VNFaceLandmarkRegion2D, to path: CGMutablePath, applying affineTransform: CGAffineTransform, closingWhenComplete closePath: Bool) {
let pointCount = landmarkRegion.pointCount
if pointCount > 1 {
let points: [CGPoint] = landmarkRegion.normalizedPoints
path.move(to: points[0], transform: affineTransform)
path.addLines(between: points, transform: affineTransform)
if closePath {
path.addLine(to: points[0], transform: affineTransform)
path.closeSubpath()
}
}
}
fileprivate func addIndicators(to faceRectanglePath: CGMutablePath, faceLandmarksPath: CGMutablePath, for faceObservation: VNFaceObservation) {
let displaySize = self.captureDeviceResolution
let faceBounds = VNImageRectForNormalizedRect(faceObservation.boundingBox, Int(displaySize.width), Int(displaySize.height))
faceRectanglePath.addRect(faceBounds)
if let landmarks = faceObservation.landmarks {
// Landmarks are relative to -- and normalized within --- face bounds
let affineTransform = CGAffineTransform(translationX: faceBounds.origin.x, y: faceBounds.origin.y)
.scaledBy(x: faceBounds.size.width, y: faceBounds.size.height)
// Treat eyebrows and lines as open-ended regions when drawing paths.
let openLandmarkRegions: [VNFaceLandmarkRegion2D?] = [
landmarks.leftEyebrow,
landmarks.rightEyebrow,
landmarks.faceContour,
landmarks.noseCrest,
landmarks.medianLine
]
for openLandmarkRegion in openLandmarkRegions where openLandmarkRegion != nil {
self.addPoints(in: openLandmarkRegion!, to: faceLandmarksPath, applying: affineTransform, closingWhenComplete: false)
}
// Draw eyes, lips, and nose as closed regions.
let closedLandmarkRegions: [VNFaceLandmarkRegion2D?] = [
landmarks.leftEye,
landmarks.rightEye,
landmarks.outerLips,
landmarks.innerLips,
landmarks.nose
]
for closedLandmarkRegion in closedLandmarkRegions where closedLandmarkRegion != nil {
self.addPoints(in: closedLandmarkRegion!, to: faceLandmarksPath, applying: affineTransform, closingWhenComplete: true)
}
}
}
/// - Tag: DrawPaths
fileprivate func drawFaceObservations(_ faceObservations: [VNFaceObservation]) {
guard let faceRectangleShapeLayer = self.detectedFaceRectangleShapeLayer,
let faceLandmarksShapeLayer = self.detectedFaceLandmarksShapeLayer
else {
return
}
CATransaction.begin()
CATransaction.setValue(NSNumber(value: true), forKey: kCATransactionDisableActions)
let faceRectanglePath = CGMutablePath()
let faceLandmarksPath = CGMutablePath()
for faceObservation in faceObservations {
self.addIndicators(to: faceRectanglePath,
faceLandmarksPath: faceLandmarksPath,
for: faceObservation)
}
faceRectangleShapeLayer.path = faceRectanglePath
faceLandmarksShapeLayer.path = faceLandmarksPath
self.updateLayerGeometry()
CATransaction.commit()
}
// MARK: AVCaptureVideoDataOutputSampleBufferDelegate
/// - Tag: PerformRequests
// Handle delegate method callback on receiving a sample buffer.
public func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
var requestHandlerOptions: [VNImageOption: AnyObject] = [:]
let cameraIntrinsicData = CMGetAttachment(sampleBuffer, key: kCMSampleBufferAttachmentKey_CameraIntrinsicMatrix, attachmentModeOut: nil)
if cameraIntrinsicData != nil {
requestHandlerOptions[VNImageOption.cameraIntrinsics] = cameraIntrinsicData
}
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
print("Failed to obtain a CVPixelBuffer for the current output frame.")
return
}
let exifOrientation = self.exifOrientationForCurrentDeviceOrientation()
guard let requests = self.trackingRequests, !requests.isEmpty else {
// No tracking object detected, so perform initial detection
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer,
orientation: exifOrientation,
options: requestHandlerOptions)
do {
guard let detectRequests = self.detectionRequests else {
return
}
try imageRequestHandler.perform(detectRequests)
} catch let error as NSError {
NSLog("Failed to perform FaceRectangleRequest: %#", error)
}
return
}
do {
try self.sequenceRequestHandler.perform(requests,
on: pixelBuffer,
orientation: exifOrientation)
} catch let error as NSError {
NSLog("Failed to perform SequenceRequest: %#", error)
}
// Setup the next round of tracking.
var newTrackingRequests = [VNTrackObjectRequest]()
for trackingRequest in requests {
guard let results = trackingRequest.results else {
return
}
guard let observation = results[0] as? VNDetectedObjectObservation else {
return
}
if !trackingRequest.isLastFrame {
if observation.confidence > 0.3 {
trackingRequest.inputObservation = observation
} else {
trackingRequest.isLastFrame = true
}
newTrackingRequests.append(trackingRequest)
}
}
self.trackingRequests = newTrackingRequests
if newTrackingRequests.isEmpty {
// Nothing to track, so abort.
return
}
// Perform face landmark tracking on detected faces.
var faceLandmarkRequests = [VNDetectFaceLandmarksRequest]()
// Perform landmark detection on tracked faces.
for trackingRequest in newTrackingRequests {
let faceLandmarksRequest = VNDetectFaceLandmarksRequest(completionHandler: { (request, error) in
if error != nil {
print("FaceLandmarks error: \(String(describing: error)).")
}
guard let landmarksRequest = request as? VNDetectFaceLandmarksRequest,
let results = landmarksRequest.results as? [VNFaceObservation] else {
return
}
// Perform all UI updates (drawing) on the main queue, not the background queue on which this handler is being called.
DispatchQueue.main.async {
self.drawFaceObservations(results)
}
})
guard let trackingResults = trackingRequest.results else {
return
}
guard let observation = trackingResults[0] as? VNDetectedObjectObservation else {
return
}
let faceObservation = VNFaceObservation(boundingBox: observation.boundingBox)
faceLandmarksRequest.inputFaceObservations = [faceObservation]
// Continue to track detected facial landmarks.
faceLandmarkRequests.append(faceLandmarksRequest)
let imageRequestHandler = VNImageRequestHandler(cvPixelBuffer: pixelBuffer,
orientation: exifOrientation,
options: requestHandlerOptions)
do {
try imageRequestHandler.perform(faceLandmarkRequests)
} catch let error as NSError {
NSLog("Failed to perform FaceLandmarkRequest: %#", error)
}
}
}
}
If you want to save only that rectangular part of the image by knowing that rectangular frame(CGRect) value, you can crop the original image by passing the required frame(CGRect) value.
let rectFrame = CGRect()
let image = UIImage()
image.cgImage?.cropping(to: rectFrame)

SwiftUI Custom Camera View?

I'm playing with SwiftUI and trying to build a custom camera with it. I found tutorials on how to use system built-in camera with SwiftUI(using ImagePickerController) and how to build a custom camera with storyboard.
I've already built a struct CameraViewController: UIViewControllerRepresentable that initialize the camera and setup capturesession.(using AVFoundation).
First I'm not sure how to setup func makeUIViewController for CameraViewController struct, since I dont know which controller class to conform to.
Also I don't know how to integrate my CameraViewController class into the app with SwiftUI. Can someone help?
Thanks!
SwiftUI - Custom Camera Implementation Example
CustomCameraPhotoView / Main Screen - Photo Preview
2. CustomCameraView / Camera Screen - Combines SwiftUI View (Record Button) with UIKit ViewController
3. CustomCameraRepresentable / Custom Camera ViewController SwiftUI Wrapper
4. CustomCameraController / Custom Camera View Controller
5. CaptureButtonView / SwiftUI View - Capture Button
Note: Avoid app crashing by adding this Privacy - Camera Usage Description into the Info.plist file.
import SwiftUI
import AVFoundation
struct CustomCameraPhotoView: View {
#State private var image: Image?
#State private var showingCustomCamera = false
#State private var inputImage: UIImage?
var body: some View {
NavigationView {
VStack {
ZStack {
Rectangle().fill(Color.secondary)
if image != nil
{
image?
.resizable()
.aspectRatio(contentMode: .fill)
}
else
{
Text("Take Photo").foregroundColor(.white).font(.headline)
}
}
.onTapGesture {
self.showingCustomCamera = true
}
}
.sheet(isPresented: $showingCustomCamera, onDismiss: loadImage) {
CustomCameraView(image: self.$inputImage)
}
.edgesIgnoringSafeArea(.all)
}
}
func loadImage() {
guard let inputImage = inputImage else { return }
image = Image(uiImage: inputImage)
}
}
struct CustomCameraView: View {
#Binding var image: UIImage?
#State var didTapCapture: Bool = false
var body: some View {
ZStack(alignment: .bottom) {
CustomCameraRepresentable(image: self.$image, didTapCapture: $didTapCapture)
CaptureButtonView().onTapGesture {
self.didTapCapture = true
}
}
}
}
struct CustomCameraRepresentable: UIViewControllerRepresentable {
#Environment(\.presentationMode) var presentationMode
#Binding var image: UIImage?
#Binding var didTapCapture: Bool
func makeUIViewController(context: Context) -> CustomCameraController {
let controller = CustomCameraController()
controller.delegate = context.coordinator
return controller
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {
if(self.didTapCapture) {
cameraViewController.didTapRecord()
}
}
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
class Coordinator: NSObject, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate {
let parent: CustomCameraRepresentable
init(_ parent: CustomCameraRepresentable) {
self.parent = parent
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
parent.didTapCapture = false
if let imageData = photo.fileDataRepresentation() {
parent.image = UIImage(data: imageData)
}
parent.presentationMode.wrappedValue.dismiss()
}
}
}
class CustomCameraController: UIViewController {
var image: UIImage?
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
//DELEGATE
var delegate: AVCapturePhotoCaptureDelegate?
func didTapRecord() {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: delegate!)
}
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera],
mediaType: AVMediaType.video,
position: AVCaptureDevice.Position.unspecified)
for device in deviceDiscoverySession.devices {
switch device.position {
case AVCaptureDevice.Position.front:
self.frontCamera = device
case AVCaptureDevice.Position.back:
self.backCamera = device
default:
break
}
}
self.currentCamera = self.backCamera
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer()
{
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession(){
captureSession.startRunning()
}
}
struct CaptureButtonView: View {
#State private var animationAmount: CGFloat = 1
var body: some View {
Image(systemName: "video").font(.largeTitle)
.padding(30)
.background(Color.red)
.foregroundColor(.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(Color.red)
.scaleEffect(animationAmount)
.opacity(Double(2 - animationAmount))
.animation(Animation.easeOut(duration: 1)
.repeatForever(autoreverses: false))
)
.onAppear
{
self.animationAmount = 2
}
}
}
Here's a version, where you can pass any frame size for camera preview layer.
If you have a back button, ozmpai answer does not work out of the box. I have edited ozmpai answer, so all kudos still goes to him.
Don't like the shared singleton, but for now, haven't figured a better approach for adaptation of SwiftUI view lifecycle yet. As SwiftUI is probably using black magic behind it.
Also, passing a bool to take a photo is probably not the greatest approach, so I have refactored it with a closure.
import SwiftUI
struct MyCameraView: View {
#State private var image: UIImage?
var customCameraRepresentable = CustomCameraRepresentable(
cameraFrame: .zero,
imageCompletion: { _ in }
)
var body: some View {
CustomCameraView(
customCameraRepresentable: customCameraRepresentable,
imageCompletion: { newImage in
self.image = newImage
}
)
.onAppear {
customCameraRepresentable.startRunningCaptureSession()
}
.onDisappear {
customCameraRepresentable.stopRunningCaptureSession()
}
if let image = image {
Image(uiImage: image)
.resizable()
.aspectRatio(contentMode: .fit)
}
}
}
import SwiftUI
struct CustomCameraView: View {
var customCameraRepresentable: CustomCameraRepresentable
var imageCompletion: ((UIImage) -> Void)
var body: some View {
GeometryReader { geometry in
VStack {
let frame = CGRect(x: 0, y: 0, width: geometry.size.width, height: geometry.size.height - 100)
cameraView(frame: frame)
HStack {
CameraControlsView(captureButtonAction: { [weak customCameraRepresentable] in
customCameraRepresentable?.takePhoto()
})
}
}
}
}
private func cameraView(frame: CGRect) -> CustomCameraRepresentable {
customCameraRepresentable.cameraFrame = frame
customCameraRepresentable.imageCompletion = imageCompletion
return customCameraRepresentable
}
}
import SwiftUI
struct CameraControlsView: View {
var captureButtonAction: (() -> Void)
var body: some View {
CaptureButtonView()
.onTapGesture {
captureButtonAction()
}
}
}
import SwiftUI
struct CaptureButtonView: View {
#Environment(\.colorScheme) var colorScheme
#State private var animationAmount: CGFloat = 1
var body: some View {
Image(systemName: "camera")
.font(.largeTitle)
.padding(20)
.background(colorScheme == .dark ? Color.white : Color.black)
.foregroundColor(colorScheme == .dark ? Color.black : Color.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(colorScheme == .dark ? Color.white : Color.black)
.scaleEffect(animationAmount)
.opacity(Double(2 - animationAmount))
.animation(
Animation.easeOut(duration: 1)
.repeatForever(autoreverses: false)
)
)
.onAppear {
animationAmount = 2
}
}
}
import SwiftUI
import AVFoundation
final class CustomCameraController: UIViewController {
static let shared = CustomCameraController()
private var captureSession = AVCaptureSession()
private var backCamera: AVCaptureDevice?
private var frontCamera: AVCaptureDevice?
private var currentCamera: AVCaptureDevice?
private var photoOutput: AVCapturePhotoOutput?
private var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
weak var captureDelegate: AVCapturePhotoCaptureDelegate?
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func configurePreviewLayer(with frame: CGRect) {
let cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer.frame = frame
view.layer.insertSublayer(cameraPreviewLayer, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
func stopRunningCaptureSession() {
captureSession.stopRunning()
}
func takePhoto() {
let settings = AVCapturePhotoSettings()
guard let delegate = captureDelegate else {
print("delegate nil")
return
}
photoOutput?.capturePhoto(with: settings, delegate: delegate)
}
// MARK: Private
private func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
}
private func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
private func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(
deviceTypes: [.builtInWideAngleCamera],
mediaType: .video,
position: .unspecified
)
for device in deviceDiscoverySession.devices {
switch device.position {
case AVCaptureDevice.Position.front:
frontCamera = device
case AVCaptureDevice.Position.back:
backCamera = device
default:
break
}
}
self.currentCamera = self.backCamera
}
private func setupInputOutput() {
do {
guard let currentCamera = currentCamera else { return }
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray(
[AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])],
completionHandler: nil
)
guard let photoOutput = photoOutput else { return }
captureSession.addOutput(photoOutput)
} catch {
print(error)
}
}
}
struct CustomCameraRepresentable: UIViewControllerRepresentable {
// #Environment(\.presentationMode) var presentationMode
init(cameraFrame: CGRect, imageCompletion: #escaping ((UIImage) -> Void)) {
self.cameraFrame = cameraFrame
self.imageCompletion = imageCompletion
}
#State var cameraFrame: CGRect
#State var imageCompletion: ((UIImage) -> Void)
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
func makeUIViewController(context: Context) -> CustomCameraController {
CustomCameraController.shared.configurePreviewLayer(with: cameraFrame)
CustomCameraController.shared.captureDelegate = context.coordinator
return CustomCameraController.shared
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {}
func takePhoto() {
CustomCameraController.shared.takePhoto()
}
func startRunningCaptureSession() {
CustomCameraController.shared.startRunningCaptureSession()
}
func stopRunningCaptureSession() {
CustomCameraController.shared.stopRunningCaptureSession()
}
}
extension CustomCameraRepresentable {
final class Coordinator: NSObject, AVCapturePhotoCaptureDelegate {
private let parent: CustomCameraRepresentable
init(_ parent: CustomCameraRepresentable) {
self.parent = parent
}
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?) {
if let imageData = photo.fileDataRepresentation() {
guard let newImage = UIImage(data: imageData) else { return }
parent.imageCompletion(newImage)
}
// parent.presentationMode.wrappedValue.dismiss()
}
}
}

Resources