I'm playing with SwiftUI and trying to build a custom camera with it. I found tutorials on how to use system built-in camera with SwiftUI(using ImagePickerController) and how to build a custom camera with storyboard.
I've already built a struct CameraViewController: UIViewControllerRepresentable that initialize the camera and setup capturesession.(using AVFoundation).
First I'm not sure how to setup func makeUIViewController for CameraViewController struct, since I dont know which controller class to conform to.
Also I don't know how to integrate my CameraViewController class into the app with SwiftUI. Can someone help?
Thanks!
SwiftUI - Custom Camera Implementation Example
CustomCameraPhotoView / Main Screen - Photo Preview
2. CustomCameraView / Camera Screen - Combines SwiftUI View (Record Button) with UIKit ViewController
3. CustomCameraRepresentable / Custom Camera ViewController SwiftUI Wrapper
4. CustomCameraController / Custom Camera View Controller
5. CaptureButtonView / SwiftUI View - Capture Button
Note: Avoid app crashing by adding this Privacy - Camera Usage Description into the Info.plist file.
import SwiftUI
import AVFoundation
struct CustomCameraPhotoView: View {
#State private var image: Image?
#State private var showingCustomCamera = false
#State private var inputImage: UIImage?
var body: some View {
NavigationView {
VStack {
ZStack {
Rectangle().fill(Color.secondary)
if image != nil
{
image?
.resizable()
.aspectRatio(contentMode: .fill)
}
else
{
Text("Take Photo").foregroundColor(.white).font(.headline)
}
}
.onTapGesture {
self.showingCustomCamera = true
}
}
.sheet(isPresented: $showingCustomCamera, onDismiss: loadImage) {
CustomCameraView(image: self.$inputImage)
}
.edgesIgnoringSafeArea(.all)
}
}
func loadImage() {
guard let inputImage = inputImage else { return }
image = Image(uiImage: inputImage)
}
}
struct CustomCameraView: View {
#Binding var image: UIImage?
#State var didTapCapture: Bool = false
var body: some View {
ZStack(alignment: .bottom) {
CustomCameraRepresentable(image: self.$image, didTapCapture: $didTapCapture)
CaptureButtonView().onTapGesture {
self.didTapCapture = true
}
}
}
}
struct CustomCameraRepresentable: UIViewControllerRepresentable {
#Environment(\.presentationMode) var presentationMode
#Binding var image: UIImage?
#Binding var didTapCapture: Bool
func makeUIViewController(context: Context) -> CustomCameraController {
let controller = CustomCameraController()
controller.delegate = context.coordinator
return controller
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {
if(self.didTapCapture) {
cameraViewController.didTapRecord()
}
}
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
class Coordinator: NSObject, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate {
let parent: CustomCameraRepresentable
init(_ parent: CustomCameraRepresentable) {
self.parent = parent
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
parent.didTapCapture = false
if let imageData = photo.fileDataRepresentation() {
parent.image = UIImage(data: imageData)
}
parent.presentationMode.wrappedValue.dismiss()
}
}
}
class CustomCameraController: UIViewController {
var image: UIImage?
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
//DELEGATE
var delegate: AVCapturePhotoCaptureDelegate?
func didTapRecord() {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: delegate!)
}
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera],
mediaType: AVMediaType.video,
position: AVCaptureDevice.Position.unspecified)
for device in deviceDiscoverySession.devices {
switch device.position {
case AVCaptureDevice.Position.front:
self.frontCamera = device
case AVCaptureDevice.Position.back:
self.backCamera = device
default:
break
}
}
self.currentCamera = self.backCamera
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer()
{
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession(){
captureSession.startRunning()
}
}
struct CaptureButtonView: View {
#State private var animationAmount: CGFloat = 1
var body: some View {
Image(systemName: "video").font(.largeTitle)
.padding(30)
.background(Color.red)
.foregroundColor(.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(Color.red)
.scaleEffect(animationAmount)
.opacity(Double(2 - animationAmount))
.animation(Animation.easeOut(duration: 1)
.repeatForever(autoreverses: false))
)
.onAppear
{
self.animationAmount = 2
}
}
}
Here's a version, where you can pass any frame size for camera preview layer.
If you have a back button, ozmpai answer does not work out of the box. I have edited ozmpai answer, so all kudos still goes to him.
Don't like the shared singleton, but for now, haven't figured a better approach for adaptation of SwiftUI view lifecycle yet. As SwiftUI is probably using black magic behind it.
Also, passing a bool to take a photo is probably not the greatest approach, so I have refactored it with a closure.
import SwiftUI
struct MyCameraView: View {
#State private var image: UIImage?
var customCameraRepresentable = CustomCameraRepresentable(
cameraFrame: .zero,
imageCompletion: { _ in }
)
var body: some View {
CustomCameraView(
customCameraRepresentable: customCameraRepresentable,
imageCompletion: { newImage in
self.image = newImage
}
)
.onAppear {
customCameraRepresentable.startRunningCaptureSession()
}
.onDisappear {
customCameraRepresentable.stopRunningCaptureSession()
}
if let image = image {
Image(uiImage: image)
.resizable()
.aspectRatio(contentMode: .fit)
}
}
}
import SwiftUI
struct CustomCameraView: View {
var customCameraRepresentable: CustomCameraRepresentable
var imageCompletion: ((UIImage) -> Void)
var body: some View {
GeometryReader { geometry in
VStack {
let frame = CGRect(x: 0, y: 0, width: geometry.size.width, height: geometry.size.height - 100)
cameraView(frame: frame)
HStack {
CameraControlsView(captureButtonAction: { [weak customCameraRepresentable] in
customCameraRepresentable?.takePhoto()
})
}
}
}
}
private func cameraView(frame: CGRect) -> CustomCameraRepresentable {
customCameraRepresentable.cameraFrame = frame
customCameraRepresentable.imageCompletion = imageCompletion
return customCameraRepresentable
}
}
import SwiftUI
struct CameraControlsView: View {
var captureButtonAction: (() -> Void)
var body: some View {
CaptureButtonView()
.onTapGesture {
captureButtonAction()
}
}
}
import SwiftUI
struct CaptureButtonView: View {
#Environment(\.colorScheme) var colorScheme
#State private var animationAmount: CGFloat = 1
var body: some View {
Image(systemName: "camera")
.font(.largeTitle)
.padding(20)
.background(colorScheme == .dark ? Color.white : Color.black)
.foregroundColor(colorScheme == .dark ? Color.black : Color.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(colorScheme == .dark ? Color.white : Color.black)
.scaleEffect(animationAmount)
.opacity(Double(2 - animationAmount))
.animation(
Animation.easeOut(duration: 1)
.repeatForever(autoreverses: false)
)
)
.onAppear {
animationAmount = 2
}
}
}
import SwiftUI
import AVFoundation
final class CustomCameraController: UIViewController {
static let shared = CustomCameraController()
private var captureSession = AVCaptureSession()
private var backCamera: AVCaptureDevice?
private var frontCamera: AVCaptureDevice?
private var currentCamera: AVCaptureDevice?
private var photoOutput: AVCapturePhotoOutput?
private var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
weak var captureDelegate: AVCapturePhotoCaptureDelegate?
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func configurePreviewLayer(with frame: CGRect) {
let cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer.frame = frame
view.layer.insertSublayer(cameraPreviewLayer, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
func stopRunningCaptureSession() {
captureSession.stopRunning()
}
func takePhoto() {
let settings = AVCapturePhotoSettings()
guard let delegate = captureDelegate else {
print("delegate nil")
return
}
photoOutput?.capturePhoto(with: settings, delegate: delegate)
}
// MARK: Private
private func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
}
private func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
private func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(
deviceTypes: [.builtInWideAngleCamera],
mediaType: .video,
position: .unspecified
)
for device in deviceDiscoverySession.devices {
switch device.position {
case AVCaptureDevice.Position.front:
frontCamera = device
case AVCaptureDevice.Position.back:
backCamera = device
default:
break
}
}
self.currentCamera = self.backCamera
}
private func setupInputOutput() {
do {
guard let currentCamera = currentCamera else { return }
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray(
[AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])],
completionHandler: nil
)
guard let photoOutput = photoOutput else { return }
captureSession.addOutput(photoOutput)
} catch {
print(error)
}
}
}
struct CustomCameraRepresentable: UIViewControllerRepresentable {
// #Environment(\.presentationMode) var presentationMode
init(cameraFrame: CGRect, imageCompletion: #escaping ((UIImage) -> Void)) {
self.cameraFrame = cameraFrame
self.imageCompletion = imageCompletion
}
#State var cameraFrame: CGRect
#State var imageCompletion: ((UIImage) -> Void)
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
func makeUIViewController(context: Context) -> CustomCameraController {
CustomCameraController.shared.configurePreviewLayer(with: cameraFrame)
CustomCameraController.shared.captureDelegate = context.coordinator
return CustomCameraController.shared
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {}
func takePhoto() {
CustomCameraController.shared.takePhoto()
}
func startRunningCaptureSession() {
CustomCameraController.shared.startRunningCaptureSession()
}
func stopRunningCaptureSession() {
CustomCameraController.shared.stopRunningCaptureSession()
}
}
extension CustomCameraRepresentable {
final class Coordinator: NSObject, AVCapturePhotoCaptureDelegate {
private let parent: CustomCameraRepresentable
init(_ parent: CustomCameraRepresentable) {
self.parent = parent
}
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?) {
if let imageData = photo.fileDataRepresentation() {
guard let newImage = UIImage(data: imageData) else { return }
parent.imageCompletion(newImage)
}
// parent.presentationMode.wrappedValue.dismiss()
}
}
}
Related
I am working on a SwiftUI app that displays an AVCaptureVideoPreviewLayer and also implements the AVCaptureVideoDataOutputSampleBufferDelegate protocol to perform some custom logic in captureOutput(_: didOutput: from:). The custom logic was working as expected and updating my view as expected until I implemented the video preview layer.
Now, only the video preview layer is updated within the view. Both the video preview layer and the update to the published variable occur within a call to DispatchQueue.main.async. Is this appropriate?
I also have a suspicion that I may need to implement some logic within the updateUIViewController(_: context:) function within the UIViewControllerRepresentable struct I am using to display the video peview layer in my view. The docs provided for this function are not very helpful, can anyone provide any tips on how it should be used?
class VideoStream: UIViewController, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
#Published var luminosityReading : Double = 0.0
...
// AVCaptureSession configuration entered, input added, establish preview layer:
// Currently working on DispatchQueue(label: "VideoStreamSetupQueue")
layer = AVCaptureVideoPreviewLayer(session: session)
...
DispatchQueue.main.async {
self.view.layer.addSublayer(self.layer)
}
// Establish output for luminosity calculation
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoStreamForCaptureOutputQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
...
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// custom logic to calculate luminosity
DispatchQueue.main.async {
print(luminosity) // value changes as expected
self.luminosityReading = luminosity // view not updated with printed value
}
}
Establishing a UIViewControllerRepresentable to display video preview layer in a SwiftUI view:
struct HostedVideoPreviewLayer: UIViewControllerRepresentable {
func makeUIViewController(context: Context) -> some UIViewController {
return VideoStream()
}
func updateUIViewController(_ uiViewController: UIViewControllerType, context: Context) {
// video preview layer works as expected
// text unrelated to this struct (see below) is not updating
}
}
Creating the view:
struct ContentView: View {
#StateObject var videoStream = VideoStream()
var body: some View {
VStack {
HostedVideoPreviewLayer()
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
.font(.largeTitle)
.padding()
}
}
}
Minimal Reproducible Example:
import Foundation
import UIKit
import AVKit
import AVFoundation
import SwiftUI
struct ContentView: View {
#StateObject var videoStream = VideoStream()
var body: some View {
VStack {
HostedVideoPreviewLayer()
Text(String(format: "%.2f Lux", videoStream.luminosityReading))
.font(.largeTitle)
.padding()
}
}
}
class VideoStream: UIViewController, ObservableObject, AVCaptureVideoDataOutputSampleBufferDelegate {
#Published var luminosityReading : Double = 0.0
private let session = AVCaptureSession()
private let queue = DispatchQueue(label: "VideoStreamSetupQueue")
private var layer = AVCaptureVideoPreviewLayer()
var screenRect: CGRect!
override func viewDidLoad() {
authorizeCapture()
queue.async {
self.authorizeCapture()
}
}
func authorizeCapture() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized: // The user has previously granted access to the camera.
beginCapture()
case .notDetermined: // The user has not yet been asked for camera access.
queue.suspend()
AVCaptureDevice.requestAccess(for: .video) { granted in
if granted {
self.beginCapture()
self.queue.resume()
}
}
default:
return
}
}
func beginCapture() {
session.beginConfiguration()
let videoDevice = AVCaptureDevice.default(for: .video)
// Add device as input
guard
let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice!),
session.canAddInput(videoDeviceInput)
else {
print("Camera selection failed")
return
}
session.addInput(videoDeviceInput)
// Establish preview layer
screenRect = UIScreen.main.bounds
layer = AVCaptureVideoPreviewLayer(session: session)
layer.frame = CGRect(x: 0, y: 0, width: screenRect.size.width, height: 300)
layer.videoGravity = AVLayerVideoGravity.resizeAspectFill
layer.connection?.videoOrientation = .portrait
DispatchQueue.main.async {
self.view.layer.addSublayer(self.layer)
}
// Establish output for luminosity calculation
let videoOutput = AVCaptureVideoDataOutput()
guard
session.canAddOutput(videoOutput)
else {
print("Error creating video output")
return
}
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "VideoStreamForCaptureOutputQueue"))
session.addOutput(videoOutput)
session.sessionPreset = .medium
session.commitConfiguration()
session.startRunning()
}
// From: https://stackoverflow.com/questions/41921326/how-to-get-light-value-from-avfoundation/46842115#46842115
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// Retrieving EXIF data of camara frame buffer
let rawMetadata = CMCopyDictionaryOfAttachments(allocator: nil, target: sampleBuffer, attachmentMode: CMAttachmentMode(kCMAttachmentMode_ShouldPropagate))
let metadata = CFDictionaryCreateMutableCopy(nil, 0, rawMetadata) as NSMutableDictionary
let exifData = metadata.value(forKey: "{Exif}") as? NSMutableDictionary
let FNumber : Double = exifData?["FNumber"] as! Double
let ExposureTime : Double = exifData?["ExposureTime"] as! Double
let ISOSpeedRatingsArray = exifData!["ISOSpeedRatings"] as? NSArray
let ISOSpeedRatings : Double = ISOSpeedRatingsArray![0] as! Double
let CalibrationConstant : Double = 50
//Calculating the luminosity
let luminosity : Double = (CalibrationConstant * FNumber * FNumber ) / ( ExposureTime * ISOSpeedRatings )
DispatchQueue.main.async {
print(luminosity) // value changes as expected
self.luminosityReading = luminosity // view not updated with recent value
}
}
override func willTransition(to newCollection: UITraitCollection, with coordinator: UIViewControllerTransitionCoordinator) {
screenRect = UIScreen.main.bounds
layer.frame = CGRect(x: 0, y: 0, width: screenRect.size.width, height: screenRect.size.height)
switch UIDevice.current.orientation {
// Home button on top
case UIDeviceOrientation.portraitUpsideDown:
layer.connection?.videoOrientation = .portraitUpsideDown
// Home button on right
case UIDeviceOrientation.landscapeLeft:
layer.connection?.videoOrientation = .landscapeRight
// Home button on left
case UIDeviceOrientation.landscapeRight:
layer.connection?.videoOrientation = .landscapeLeft
// Home button at bottom
case UIDeviceOrientation.portrait:
layer.connection?.videoOrientation = .portrait
default:
break
}
}
}
struct HostedVideoPreviewLayer: UIViewControllerRepresentable {
func makeUIViewController(context: Context) -> some UIViewController {
return VideoStream()
}
func updateUIViewController(_ uiViewController: UIViewControllerType, context: Context) {
// video preview layer works as expected
// text unrelated to this struct is not updating
}
}
[1]: https://developer.apple.com/documentation/swiftui/uiviewcontrollerrepresentable/updateuiviewcontroller(_:context:)
My working solution instead passes the AVCaptureSession created in VideoStream as a parameter to a custom view VideoPreviewHolder. I use a state object to ensure the session is available (if not, a progress indicator is displayed) and then display the preview layer. I hope this may be useful to others:
class VideoPreview: UIView {
private var session: AVCaptureSession!
init(runningSession session: AVCaptureSession) {
super.init(frame: .zero)
self.session = session
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
return layer as! AVCaptureVideoPreviewLayer
}
override func didMoveToSuperview() {
super.didMoveToSuperview()
if self.superview != nil {
self.videoPreviewLayer.session = self.session
self.videoPreviewLayer.videoGravity = .resizeAspect
}
}
}
struct VideoPreviewHolder: UIViewRepresentable {
public var runningSession: AVCaptureSession
typealias UIViewType = VideoPreview
func makeUIView(context: Context) -> VideoPreview {
VideoPreview(runningSession: runningSession)
}
func updateUIView(_ uiView: VideoPreview, context: Context) {
}
}
struct ContentView: View {
#StateObject var videoStream = VideoStream() // this class definition is in original question body
var body: some View {
if (!videoStream.cameraAccess) {
// request access
} else {
NavigationView {
VStack {
if (videoStream.session != nil) {
VideoPreviewHolder(runningSession: videoStream.session)
.frame(minWidth: 0, idealWidth: .infinity, maxWidth: .infinity, minHeight: 0, idealHeight: .infinity, maxHeight: .infinity, alignment: .center)
} else {
ProgressView()
}
...
}
}
}
}
I have a simple camera preview implementation:
import SwiftUI
import AVFoundation
struct CameraView: View {
#StateObject var model = CameraModel()
var body: some View {
CameraPreview(camera: model)
.safeAreaInset(edge: .bottom, alignment: .center, spacing: 0) {
Color.clear
.frame(height: 0)
.background(Material.bar)
}
.ignoresSafeArea(.all, edges: .top)
.onAppear() {
model.check()
}
}
}
struct CameraPreview: UIViewRepresentable {
#ObservedObject var camera: CameraModel
func makeUIView(context: Context) -> some UIView {
let view = UIView(frame: UIScreen.main.bounds)
camera.preview = AVCaptureVideoPreviewLayer(session: camera.session)
camera.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
camera.preview.frame = view.frame
view.layer.addSublayer(camera.preview)
camera.start()
return view
}
func updateUIView(_ uiView: UIViewType, context: Context) {
}
}
struct CameraView_Previews: PreviewProvider {
static var previews: some View {
CameraView()
}
}
class CameraModel: ObservableObject {
#Published var session = AVCaptureSession()
#Published var alert = false
#Published var preview: AVCaptureVideoPreviewLayer!
func check() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
setUp()
break
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { (status) in
if status {
self.setUp()
}
}
break
case .denied:
self.alert.toggle()
break
default:
break
}
}
func setUp() {
do {
self.session.beginConfiguration()
let device = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
let input = try AVCaptureDeviceInput(device: device!)
if self.session.canAddInput(input) {
self.session.addInput(input)
}
self.session.commitConfiguration()
}
catch {
print(error.localizedDescription)
}
}
func start() {
self.session.startRunning()
}
}
The problem is that it doesn't handle screen rotations:
I found similar topics, for example, this one, but I am a noobie in iOS development, I can't even understand where to put this solution. I've checked neither View, nor UIViewRepresentable have such methods to override.
How to handle screen rotation in AVCaptureVideoPreviewLayer?
If you want to update layer frame in rotation, you need to create custom UIView and override layoutSubviews(). Inside layoutSubviews(), you need to update frame for sublayers.
The code will be as below.
struct CameraPreview: UIViewRepresentable {
#ObservedObject var camera: CameraModel
class LayerView: UIView {
override func layoutSubviews() {
super.layoutSubviews()
// To disable default animation of layer. You can comment out those lines with `CATransaction` if you want to include
CATransaction.begin()
CATransaction.setDisableActions(true)
layer.sublayers?.forEach({ layer in
layer.frame = frame
})
CATransaction.commit()
}
}
func makeUIView(context: Context) -> some UIView {
let view = LayerView()
camera.preview = AVCaptureVideoPreviewLayer(session: camera.session)
camera.preview.frame = view.frame
camera.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
view.layer.addSublayer(camera.preview)
camera.session.startRunning()
return view
}
func updateUIView(_ uiView: UIViewType, context: Context) {
}
}
This is a working variant with video rotation based on Dscyre Scotti'es answer:
struct CameraView: View {
#StateObject var model = CameraModel()
var body: some View {
CameraPreview(camera: model)
.safeAreaInset(edge: .bottom, alignment: .center, spacing: 0) {
Color.clear
.frame(height: 0)
.background(Material.bar)
}
.ignoresSafeArea(.all, edges: [.top, .horizontal])
.onAppear() {
model.check()
}
}
}
struct CameraPreview: UIViewRepresentable {
#ObservedObject var camera: CameraModel
class LayerView: UIView {
var parent: CameraPreview! = nil
override func layoutSubviews() {
super.layoutSubviews()
// To disable default animation of layer. You can comment out those lines with `CATransaction` if you want to include
CATransaction.begin()
CATransaction.setDisableActions(true)
layer.sublayers?.forEach({ layer in
layer.frame = UIScreen.main.bounds
})
self.parent.camera.rotate(orientation: UIDevice.current.orientation)
CATransaction.commit()
}
}
func makeUIView(context: Context) -> some UIView {
let view = LayerView()
view.parent = self
camera.preview = AVCaptureVideoPreviewLayer(session: camera.session)
camera.preview.videoGravity = AVLayerVideoGravity.resizeAspectFill
camera.preview.frame = view.frame
view.layer.addSublayer(camera.preview)
camera.start()
return view
}
func updateUIView(_ uiView: UIViewType, context: Context) {
}
}
struct CameraView_Previews: PreviewProvider {
static var previews: some View {
CameraView()
}
}
class CameraModel: ObservableObject {
#Published var session = AVCaptureSession()
#Published var alert = false
#Published var preview: AVCaptureVideoPreviewLayer!
func check() {
switch AVCaptureDevice.authorizationStatus(for: .video) {
case .authorized:
setUp()
break
case .notDetermined:
AVCaptureDevice.requestAccess(for: .video) { (status) in
if status {
self.setUp()
}
}
break
case .denied:
self.alert.toggle()
break
default:
break
}
}
func setUp() {
do {
self.session.beginConfiguration()
let device = AVCaptureDevice.default(.builtInDualCamera, for: .video, position: .back)
let input = try AVCaptureDeviceInput(device: device!)
if self.session.canAddInput(input) {
self.session.addInput(input)
}
self.session.commitConfiguration()
}
catch {
print(error.localizedDescription)
}
}
func start() {
self.session.startRunning()
}
func rotate(orientation: UIDeviceOrientation) {
let videoConnection = self.preview.connection
switch orientation {
case .portraitUpsideDown:
videoConnection?.videoOrientation = .portraitUpsideDown
case .landscapeLeft:
videoConnection?.videoOrientation = .landscapeRight
case .landscapeRight:
videoConnection?.videoOrientation = .landscapeLeft
case .faceDown:
videoConnection?.videoOrientation = .portraitUpsideDown
default:
videoConnection?.videoOrientation = .portrait
}
}
}
Until iOS 15.2 , the SwiftUI ScrollView doesn't support refreshable{}, so I using UIRefreshControl to solve this issues. Everything went smoothly and success was just one step away ;-(
full code ContentView.swift :
import SwiftUI
extension UIView {
func viewsInHierarchy<ViewType: UIView>() -> [ViewType]? {
var views: [ViewType] = []
viewsInHierarchy(views: &views)
return views.count > 0 ? views : nil
}
fileprivate func viewsInHierarchy<ViewType: UIView>(views: inout [ViewType]) {
subviews.forEach { eachSubView in
if let matchingView = eachSubView as? ViewType {
views.append(matchingView)
}
eachSubView.viewsInHierarchy(views: &views)
}
}
func searchViewAnchestorsFor<ViewType: UIView>(
_ onViewFound: (ViewType) -> Void
) {
if let matchingView = self.superview as? ViewType {
onViewFound(matchingView)
} else {
superview?.searchViewAnchestorsFor(onViewFound)
}
}
func searchViewAnchestorsFor<ViewType: UIView>() -> ViewType? {
if let matchingView = self.superview as? ViewType {
return matchingView
} else {
return superview?.searchViewAnchestorsFor()
}
}
func printViewHierarchyInformation(_ level: Int = 0) {
printViewInformation(level)
self.subviews.forEach { $0.printViewHierarchyInformation(level + 1) }
}
func printViewInformation(_ level: Int) {
let leadingWhitespace = String(repeating: " ", count: level)
let className = "\(Self.self)"
let superclassName = "\(self.superclass!)"
let frame = "\(self.frame)"
let id = (self.accessibilityIdentifier == nil) ? "" : " `\(self.accessibilityIdentifier!)`"
print("\(leadingWhitespace)\(className): \(superclassName)\(id) \(frame)")
}
}
final class ViewControllerResolver: UIViewControllerRepresentable {
let onResolve: (UIViewController) -> Void
init(onResolve: #escaping (UIViewController) -> Void) {
self.onResolve = onResolve
}
func makeUIViewController(context: Context) -> ParentResolverViewController {
ParentResolverViewController(onResolve: onResolve)
}
func updateUIViewController(_ uiViewController: ParentResolverViewController, context: Context) { }
}
class ParentResolverViewController: UIViewController {
let onResolve: (UIViewController) -> Void
init(onResolve: #escaping (UIViewController) -> Void) {
self.onResolve = onResolve
super.init(nibName: nil, bundle: nil)
}
required init?(coder: NSCoder) {
fatalError("Use init(onResolve:) to instantiate ParentResolverViewController.")
}
override func didMove(toParent parent: UIViewController?) {
super.didMove(toParent: parent)
if let parent = parent {
//parent.navigationController?.navigationBar.prefersLargeTitles = true
//parent.navigationItem.largeTitleDisplayMode = .never
self.onResolve(parent)
// print("didMove(toParent: \(parent)")
}
}
}
struct ContentView: View {
var body: some View {
NavigationView {
ScrollView {
VStack {
HStack {
Spacer()
Text("Content")
Spacer()
}
Spacer()
}
.background(Color.red)
}
.navigationTitle("title")
.navigationBarTitleDisplayMode(.inline)
.overlay(
ViewControllerResolver { parent in
var scrollView: UIScrollView?
if let scrollViewsInHierarchy: [UIScrollView] = parent.view.viewsInHierarchy() {
if scrollViewsInHierarchy.count == 1, let firstScrollViewInHierarchy = scrollViewsInHierarchy.first {
scrollView = firstScrollViewInHierarchy
}
}
if let scrollView = scrollView {
guard scrollView.refreshControl == nil else { return }
let refreshControl = UIRefreshControl()
refreshControl.transform = CGAffineTransform(scaleX: 0.75, y: 0.75)
refreshControl.layoutIfNeeded()
let uiAction = UIAction(handler: { uiAction in
DispatchQueue.main.asyncAfter(deadline: .now() + 3) {
refreshControl.endRefreshing()
}
})
refreshControl.addAction(uiAction, for: .primaryActionTriggered)
scrollView.refreshControl = refreshControl
}
}
.frame(width: 0, height: 0)
)
}
}
}
struct AnimationView: UIViewRepresentable {
#Binding var cnt: Int
var imageView: UIImageView = UIImageView(image: effctPicker(cnt: 0))
func makeUIView(context: Self.Context) -> UIImageView {
return imageView
}
func updateUIView(_ uiView: UIImageView, context: UIViewRepresentableContext<AnimationView>) {
if self.cnt == 0{
self.imageView.image = effctPicker(cnt: 0)
}
else {
self.imageView.image = effctPicker(cnt: self.cnt)
}
}
}
struct ContentView: View {
#State var count:Int = 0
#State var audioPlayer: AVAudioPlayer!
#State var bg: String = "bg"
var body: some View {
Button(action:{
count += 1
switch count
{
case 1:
self.bg = "bg_2"
default:
count = 0
self.bg = "bg"
}
}, label: {
ZStack {
Image(bg)
.resizable()
AnimationView(cnt: $count)
}.onAppear {
let sound = Bundle.main.path(forResource: "freelove", ofType: "mp3")
self.audioPlayer = try! AVAudioPlayer(contentsOf: URL(fileURLWithPath: sound!))
self.audioPlayer.play()
}
}
)
}
}
var springImages = [UIImage(named: "eft1")!, UIImage(named: "eft2")!,UIImage(named: "eft3")!, UIImage(named: "eft4")!]
var rainImages = [UIImage(named: "raineft_1")!, UIImage(named: "raineft_2")!,UIImage(named: "raineft_3")!]
func effctPicker(cnt : Int) ->UIImage{
if cnt == 0{
return UIImage.animatedImage(with: springImages, duration: 0.4)!
}
else {
return UIImage.animatedImage(with: rainImages, duration: 0.4)!
}
}
First of all, I'm sorry that my english skill is not good. And this is my first question in stackoverflow
This is my animation view
I want when i press the button, binded "cnt" will change and updaateUIView will be called.
But it didn't worked. Should I add coordinator? If so, what should I do?
I can't figure out how to make this apple sign-in button wider and taller. no matter where I try to add .frame(width: .... it just seems to move the button around the screen. But, does not alter the dimensions of the button itself.
This is in ContentView.swift:
struct ContentView : View {
#State var credentials: CredentialsOrError?
var body: some View {
VStack {
if $credentials.wrappedValue == nil {
SignInWithAppleButton(credentials: $credentials)
}
else if $credentials.wrappedValue!.isSuccess
{
Text("User: \($credentials.wrappedValue!.values!.user)")
Text("Given name: \($credentials.wrappedValue!.values?.givenName ?? "")")
Text("Family name: \($credentials.wrappedValue!.values?.familyName ?? "")")
Text("Email: \($credentials.wrappedValue!.values?.email ?? "")")
}
else {
Text($credentials.wrappedValue!.error!.localizedDescription).foregroundColor(.red)
}
}.fullScreenCover(isPresented: .constant($credentials.wrappedValue != nil && $credentials.wrappedValue!.isSuccess) , content: {
HomeView()
})
}
}
This is from SignInWithAppleButton.swift:
struct SignInWithAppleButton: View {
#Binding var credentials: CredentialsOrError?
var body: some View {
let button = ButtonController(credentials: $credentials)
return button
.frame(width: button.button.frame.width, height: button.button.frame.height, alignment: .center)
}
struct ButtonController: UIViewControllerRepresentable {
let button: ASAuthorizationAppleIDButton = ASAuthorizationAppleIDButton()
let vc: UIViewController = UIViewController()
#Binding var credentials: CredentialsOrError?
func makeCoordinator() -> Coordinator {
return Coordinator(self)
}
func makeUIViewController(context: Context) -> UIViewController {
vc.view.addSubview(button)
return vc
}
func updateUIViewController(_ uiViewController: UIViewController, context: Context) { }
class Coordinator: NSObject, ASAuthorizationControllerDelegate, ASAuthorizationControllerPresentationContextProviding {
let parent: ButtonController
init(_ parent: ButtonController) {
self.parent = parent
super.init()
parent.button.addTarget(self, action: #selector(didTapButton), for: .touchUpInside)
}
#objc func didTapButton() {
let appleIDProvider = ASAuthorizationAppleIDProvider()
let request = appleIDProvider.createRequest()
request.requestedScopes = [.fullName, .email]
let authorizationController = ASAuthorizationController(authorizationRequests: [request])
authorizationController.presentationContextProvider = self
authorizationController.delegate = self
authorizationController.performRequests()
}
func presentationAnchor(for controller: ASAuthorizationController) -> ASPresentationAnchor {
return parent.vc.view.window!
}
func authorizationController(controller: ASAuthorizationController, didCompleteWithAuthorization authorization: ASAuthorization) {
guard let credentials = authorization.credential as? ASAuthorizationAppleIDCredential else {
parent.credentials = .error("Credentials are not of type ASAuthorizationAppleIDCredential")
return
}
parent.credentials = .credentials(user: credentials.user, givenName: credentials.fullName?.givenName, familyName: credentials.fullName?.familyName, email: credentials.email)
}
func authorizationController(controller: ASAuthorizationController, didCompleteWithError error: Error) {
parent.credentials = .error(error)
}
}
}
}
Attached is a picture of what it looks like currently on my dark mode iPhone and I've also attached a picture of the light mode emulator.
Related question: How can I hard code the background of the screen to be white?
In the emulator:
On my dark mode iPhone: