Video AVAsset — only saves one object to arrays - ios

I am trying to use implement photo and video functionality to my iOS app with Swift.
Images work fine. The images and uiimages arrays are correctly populated.
With video — it only seems to save one video to the array (i.e. it overwrites the existing video, even though the imageIndex has changed. This happens in both the videos and media arrays).
What could be causing this? It seems I have the exact same logic for videos as for images, and saving images in the media ([AnyObject]) array works fine, but not for videos.
#State var images = [Image?](repeating: nil, count: 6)
#State var uiimages = [UIImage?](repeating: nil, count: 6)
#State var videos = [AVAsset?](repeating: nil, count: 6)
#State var media = [AnyObject?](repeating: nil, count: 6)
...
CaptureImageView(isShown: $showCaptureImageView, uiimage: $uiimages[imageIndex], image: $images[imageIndex], video: $videos[imageIndex], amedia: $media[imageIndex], camera: camera)
.onAppear {
self.disableButtons = true
}
.onDisappear {
var nonNil:Int = 0
for media in media {
if media != nil {
nonNil += 1
}
}
self.nonNilImages = nonNil
self.imageIndex = nonNil
self.disableButtons = false
print("-------")
print("imageIndex: \(imageIndex)")
print(videos)
print(media)
}
import SwiftUI
import AVFoundation
struct CaptureImageView {
#Binding var isShown: Bool
#Binding var uiimage: UIImage?
#Binding var image: Image?
#Binding var video: AVAsset?
#Binding var amedia: AnyObject?
var camera: Bool
func makeCoordinator() -> Coordinator {
return Coordinator(isShown: $isShown, image: $image, uiimage: $uiimage, video: $video, amedia: $amedia)
}
}
extension CaptureImageView: UIViewControllerRepresentable {
func makeUIViewController(context: UIViewControllerRepresentableContext<CaptureImageView>) -> UIImagePickerController {
let picker = UIImagePickerController()
picker.delegate = context.coordinator
if camera {
if let mediaTypes = UIImagePickerController.availableMediaTypes(for: .camera) {
picker.mediaTypes = mediaTypes
}
picker.sourceType = .camera
}
return picker
}
func updateUIViewController(_ uiViewController: UIImagePickerController,
context: UIViewControllerRepresentableContext<CaptureImageView>) {
}
}
import SwiftUI
import AVFoundation
class Coordinator: NSObject, UINavigationControllerDelegate, UIImagePickerControllerDelegate {
#Binding var isCoordinatorShown: Bool
#Binding var uiimageInCoordinator: UIImage?
#Binding var imageInCoordinator: Image?
#Binding var videoInCoordinator: AVAsset?
#Binding var mediaInCoordinator: AnyObject?
init(isShown: Binding<Bool>, image: Binding<Image?>, uiimage: Binding<UIImage?>, video: Binding<AVAsset?>, amedia: Binding<AnyObject?>) {
_isCoordinatorShown = isShown
_uiimageInCoordinator = uiimage
_imageInCoordinator = image
_videoInCoordinator = video
_mediaInCoordinator = amedia
}
func imagePickerController(_ picker: UIImagePickerController,
didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
if let videoURL = info[UIImagePickerController.InfoKey.mediaURL] as? URL {
let asset = AVAsset(url: videoURL)
print(asset)
videoInCoordinator = AVAsset(url: videoURL)
mediaInCoordinator = AVAsset(url: videoURL)
} else if let unwrapImage = info[UIImagePickerController.InfoKey.originalImage] as? UIImage {
// handle image pick
uiimageInCoordinator = unwrapImage
imageInCoordinator = Image(uiImage: unwrapImage)
mediaInCoordinator = unwrapImage
}
isCoordinatorShown = false
}
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
isCoordinatorShown = false
}
}

Related

How to change View while app is running in SwiftUI

struct AnimationView: UIViewRepresentable {
#Binding var cnt: Int
var imageView: UIImageView = UIImageView(image: effctPicker(cnt: 0))
func makeUIView(context: Self.Context) -> UIImageView {
return imageView
}
func updateUIView(_ uiView: UIImageView, context: UIViewRepresentableContext<AnimationView>) {
if self.cnt == 0{
self.imageView.image = effctPicker(cnt: 0)
}
else {
self.imageView.image = effctPicker(cnt: self.cnt)
}
}
}
struct ContentView: View {
#State var count:Int = 0
#State var audioPlayer: AVAudioPlayer!
#State var bg: String = "bg"
var body: some View {
Button(action:{
count += 1
switch count
{
case 1:
self.bg = "bg_2"
default:
count = 0
self.bg = "bg"
}
}, label: {
ZStack {
Image(bg)
.resizable()
AnimationView(cnt: $count)
}.onAppear {
let sound = Bundle.main.path(forResource: "freelove", ofType: "mp3")
self.audioPlayer = try! AVAudioPlayer(contentsOf: URL(fileURLWithPath: sound!))
self.audioPlayer.play()
}
}
)
}
}
var springImages = [UIImage(named: "eft1")!, UIImage(named: "eft2")!,UIImage(named: "eft3")!, UIImage(named: "eft4")!]
var rainImages = [UIImage(named: "raineft_1")!, UIImage(named: "raineft_2")!,UIImage(named: "raineft_3")!]
func effctPicker(cnt : Int) ->UIImage{
if cnt == 0{
return UIImage.animatedImage(with: springImages, duration: 0.4)!
}
else {
return UIImage.animatedImage(with: rainImages, duration: 0.4)!
}
}
First of all, I'm sorry that my english skill is not good. And this is my first question in stackoverflow
This is my animation view
I want when i press the button, binded "cnt" will change and updaateUIView will be called.
But it didn't worked. Should I add coordinator? If so, what should I do?

Improving the accuracy of text recognition when using iOS Vision Framework to scan a document

I am trying to build a document scanner that is able to read text off of any document/card. However, it sometimes has trouble identifying text correctly off of a credit card. The accuracy is decent, but there is definitely room for improvement. I used the VisionTextRecognition framework and have used all the standard settings which are the right ones for setting up text recognition.
This is what I had to setup the text recognition request
textRecognitionRequest = VNRecognizeTextRequest(completionHandler: { (request, error) in
if let results = request.results, !results.isEmpty {
if let requestResults = request.results as? [VNRecognizedTextObservation] {
var foundText = ""
for observation in recognizedText {
guard let candidate = observation.topCandidates(1).first else { continue }
foundText.append(candidate.string + "\n")
}
}
}
})
textRecognitionRequest.recognitionLevel = .accurate
textRecognitionRequest.usesLanguageCorrection = true
Does anyone have any suggestions for improving the identification programmatically by either pre-processing or post-processing the scan at some point?
UPDATE: I've made a fully open source project that may help you do exactly what you need. Check it out: https://github.com/ethanwa/credit-card-scanner-and-validator
**
You can't do much to improve accuracy beyond adding some preset values to specifically look for, which doesn't make sense with CC numbers so I won't even bother showing that code. You'll need to rely on Apple to improve their text recognition model as iOS iterates for it to truly improve.
What I suggest in the meantime are these two things you can do:
Do validation on your credit card numbers that you think you're recieving. For example, Visa starts with 4, MasterCard starts with 5, Discover with 6, Amex with 3, etc. They have specific lengths and so on. See here: https://www.freeformatter.com/credit-card-number-generator-validator.html
Keep iterating over and over on a camera feed until you get a number that validates. I'm not sure if you are currently just taking a picture of the card, and processing that image (which it sounds like you are doing), but you should be processing many images per second until you get a valid CC. This is most likely how Apple does it when adding a card via Apple Pay on your phone, or when depositing checks digitally using banking apps (finding valid routing and account numbers).
Here's an example of what I mean...
I wrote this code that can pick out and validate ISBN numbers (basically 10 and 13 digit numbers that catalog books, which have a check digit for validation) in any given text and will keep looking until it finds all the numbers and then validates. It works extremely well and is very fast. Check out this Swift 5.3 code:
import UIKit
import Vision
import Photos
import AVFoundation
class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate {
var recognizedText = ""
var finalText = ""
var image: UIImage?
var processing = false
#IBOutlet weak var nameLabel: UILabel!
#IBOutlet weak var setLabel: UILabel!
#IBOutlet weak var numberLabel: UILabel!
lazy var textDetectionRequest: VNRecognizeTextRequest = {
let request = VNRecognizeTextRequest(completionHandler: self.handleDetectedText)
request.recognitionLevel = .accurate
request.usesLanguageCorrection = false
return request
}()
private let videoOutput = AVCaptureVideoDataOutput()
private let captureSession = AVCaptureSession()
private lazy var previewLayer: AVCaptureVideoPreviewLayer = {
let preview = AVCaptureVideoPreviewLayer(session: self.captureSession)
preview.videoGravity = .resizeAspect
return preview
}()
// MARK: AV
override func viewDidLoad() {
super.viewDidLoad()
self.addCameraInput()
self.addVideoOutput()
}
private func addCameraInput() {
let device = AVCaptureDevice.default(for: .video)!
let cameraInput = try! AVCaptureDeviceInput(device: device)
self.captureSession.addInput(cameraInput)
}
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.previewLayer.frame = self.view.bounds
}
private func addVideoOutput() {
self.videoOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
self.videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "my.image.handling.queue"))
self.captureSession.addOutput(self.videoOutput)
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection)
{
if !processing
{
guard let frame = CMSampleBufferGetImageBuffer(sampleBuffer) else {
debugPrint("unable to get image from sample buffer")
return
}
print("did receive image frame")
// process image here
self.processing = true
let ciimage : CIImage = CIImage(cvPixelBuffer: frame)
let theimage : UIImage = self.convert(cmage: ciimage)
self.image = theimage
processImage()
}
}
// Convert CIImage to CGImage
func convert(cmage:CIImage) -> UIImage
{
let context:CIContext = CIContext.init(options: nil)
let cgImage:CGImage = context.createCGImage(cmage, from: cmage.extent)!
let image:UIImage = UIImage.init(cgImage: cgImage)
return image
}
// AV
func processImage()
{
DispatchQueue.main.async {
self.nameLabel.text = ""
self.setLabel.text = ""
self.numberLabel.text = ""
}
guard let image = image, let cgImage = image.cgImage else { return }
let requests = [textDetectionRequest]
let imageRequestHandler = VNImageRequestHandler(cgImage: cgImage, orientation: .right, options: [:])
DispatchQueue.global(qos: .userInitiated).async {
do {
try imageRequestHandler.perform(requests)
} catch let error {
print("Error: \(error)")
}
}
}
fileprivate func handleDetectedText(request: VNRequest?, error: Error?)
{
self.finalText = ""
if let error = error {
print(error.localizedDescription)
self.processing = false
return
}
guard let results = request?.results, results.count > 0 else {
print("No text was found.")
self.processing = false
return
}
if let requestResults = request?.results as? [VNRecognizedTextObservation] {
self.recognizedText = ""
for observation in requestResults {
guard let candidiate = observation.topCandidates(1).first else { return }
self.recognizedText += candidiate.string
self.recognizedText += " "
}
var replaced = self.recognizedText.replacingOccurrences(of: "-", with: "")
replaced = String(replaced.filter { !"\n\t\r".contains($0) })
let replacedArr = replaced.components(separatedBy: " ")
for here in replacedArr
{
let final = here.trimmingCharacters(in: CharacterSet.whitespacesAndNewlines)
if (final.count == 10 || final.count == 13) && final.containsISBNnums && Validate.isbn(final) // validate barcode
{
self.finalText += final
print(final)
self.captureSession.stopRunning()
DispatchQueue.main.async {
self.previewLayer.removeFromSuperlayer()
}
break
}
}
DispatchQueue.main.async {
self.numberLabel.text = self.finalText
}
}
self.processing = false
}
// MARK: Buttons
// This is a live camera view that will start a capture session
#IBAction func takePhoto(_ sender: Any) {
self.view.layer.addSublayer(self.previewLayer)
self.captureSession.startRunning()
}
#IBAction func choosePhoto(_ sender: Any) {
presentPhotoPicker(type: .photoLibrary)
}
fileprivate func presentPhotoPicker(type: UIImagePickerController.SourceType) {
let controller = UIImagePickerController()
controller.sourceType = type
controller.delegate = self
present(controller, animated: true, completion: nil)
}
}
extension ViewController: UIImagePickerControllerDelegate, UINavigationControllerDelegate {
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
dismiss(animated: true, completion: nil)
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
dismiss(animated: true, completion: nil)
image = info[.originalImage] as? UIImage
processImage()
}
}
extension String {
var containsISBNnums: Bool {
guard self.count > 0 else { return false }
let nums: Set<Character> = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "X"]
return Set(self).isSubset(of: nums)
}
}

SwiftUI Custom Camera View?

I'm playing with SwiftUI and trying to build a custom camera with it. I found tutorials on how to use system built-in camera with SwiftUI(using ImagePickerController) and how to build a custom camera with storyboard.
I've already built a struct CameraViewController: UIViewControllerRepresentable that initialize the camera and setup capturesession.(using AVFoundation).
First I'm not sure how to setup func makeUIViewController for CameraViewController struct, since I dont know which controller class to conform to.
Also I don't know how to integrate my CameraViewController class into the app with SwiftUI. Can someone help?
Thanks!
SwiftUI - Custom Camera Implementation Example
CustomCameraPhotoView / Main Screen - Photo Preview
2. CustomCameraView / Camera Screen - Combines SwiftUI View (Record Button) with UIKit ViewController
3. CustomCameraRepresentable / Custom Camera ViewController SwiftUI Wrapper
4. CustomCameraController / Custom Camera View Controller
5. CaptureButtonView / SwiftUI View - Capture Button
Note: Avoid app crashing by adding this Privacy - Camera Usage Description into the Info.plist file.
import SwiftUI
import AVFoundation
struct CustomCameraPhotoView: View {
#State private var image: Image?
#State private var showingCustomCamera = false
#State private var inputImage: UIImage?
var body: some View {
NavigationView {
VStack {
ZStack {
Rectangle().fill(Color.secondary)
if image != nil
{
image?
.resizable()
.aspectRatio(contentMode: .fill)
}
else
{
Text("Take Photo").foregroundColor(.white).font(.headline)
}
}
.onTapGesture {
self.showingCustomCamera = true
}
}
.sheet(isPresented: $showingCustomCamera, onDismiss: loadImage) {
CustomCameraView(image: self.$inputImage)
}
.edgesIgnoringSafeArea(.all)
}
}
func loadImage() {
guard let inputImage = inputImage else { return }
image = Image(uiImage: inputImage)
}
}
struct CustomCameraView: View {
#Binding var image: UIImage?
#State var didTapCapture: Bool = false
var body: some View {
ZStack(alignment: .bottom) {
CustomCameraRepresentable(image: self.$image, didTapCapture: $didTapCapture)
CaptureButtonView().onTapGesture {
self.didTapCapture = true
}
}
}
}
struct CustomCameraRepresentable: UIViewControllerRepresentable {
#Environment(\.presentationMode) var presentationMode
#Binding var image: UIImage?
#Binding var didTapCapture: Bool
func makeUIViewController(context: Context) -> CustomCameraController {
let controller = CustomCameraController()
controller.delegate = context.coordinator
return controller
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {
if(self.didTapCapture) {
cameraViewController.didTapRecord()
}
}
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
class Coordinator: NSObject, UINavigationControllerDelegate, AVCapturePhotoCaptureDelegate {
let parent: CustomCameraRepresentable
init(_ parent: CustomCameraRepresentable) {
self.parent = parent
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
parent.didTapCapture = false
if let imageData = photo.fileDataRepresentation() {
parent.image = UIImage(data: imageData)
}
parent.presentationMode.wrappedValue.dismiss()
}
}
}
class CustomCameraController: UIViewController {
var image: UIImage?
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentCamera: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
//DELEGATE
var delegate: AVCapturePhotoCaptureDelegate?
func didTapRecord() {
let settings = AVCapturePhotoSettings()
photoOutput?.capturePhoto(with: settings, delegate: delegate!)
}
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
startRunningCaptureSession()
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera],
mediaType: AVMediaType.video,
position: AVCaptureDevice.Position.unspecified)
for device in deviceDiscoverySession.devices {
switch device.position {
case AVCaptureDevice.Position.front:
self.frontCamera = device
case AVCaptureDevice.Position.back:
self.backCamera = device
default:
break
}
}
self.currentCamera = self.backCamera
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer()
{
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.cameraPreviewLayer?.frame = self.view.frame
self.view.layer.insertSublayer(cameraPreviewLayer!, at: 0)
}
func startRunningCaptureSession(){
captureSession.startRunning()
}
}
struct CaptureButtonView: View {
#State private var animationAmount: CGFloat = 1
var body: some View {
Image(systemName: "video").font(.largeTitle)
.padding(30)
.background(Color.red)
.foregroundColor(.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(Color.red)
.scaleEffect(animationAmount)
.opacity(Double(2 - animationAmount))
.animation(Animation.easeOut(duration: 1)
.repeatForever(autoreverses: false))
)
.onAppear
{
self.animationAmount = 2
}
}
}
Here's a version, where you can pass any frame size for camera preview layer.
If you have a back button, ozmpai answer does not work out of the box. I have edited ozmpai answer, so all kudos still goes to him.
Don't like the shared singleton, but for now, haven't figured a better approach for adaptation of SwiftUI view lifecycle yet. As SwiftUI is probably using black magic behind it.
Also, passing a bool to take a photo is probably not the greatest approach, so I have refactored it with a closure.
import SwiftUI
struct MyCameraView: View {
#State private var image: UIImage?
var customCameraRepresentable = CustomCameraRepresentable(
cameraFrame: .zero,
imageCompletion: { _ in }
)
var body: some View {
CustomCameraView(
customCameraRepresentable: customCameraRepresentable,
imageCompletion: { newImage in
self.image = newImage
}
)
.onAppear {
customCameraRepresentable.startRunningCaptureSession()
}
.onDisappear {
customCameraRepresentable.stopRunningCaptureSession()
}
if let image = image {
Image(uiImage: image)
.resizable()
.aspectRatio(contentMode: .fit)
}
}
}
import SwiftUI
struct CustomCameraView: View {
var customCameraRepresentable: CustomCameraRepresentable
var imageCompletion: ((UIImage) -> Void)
var body: some View {
GeometryReader { geometry in
VStack {
let frame = CGRect(x: 0, y: 0, width: geometry.size.width, height: geometry.size.height - 100)
cameraView(frame: frame)
HStack {
CameraControlsView(captureButtonAction: { [weak customCameraRepresentable] in
customCameraRepresentable?.takePhoto()
})
}
}
}
}
private func cameraView(frame: CGRect) -> CustomCameraRepresentable {
customCameraRepresentable.cameraFrame = frame
customCameraRepresentable.imageCompletion = imageCompletion
return customCameraRepresentable
}
}
import SwiftUI
struct CameraControlsView: View {
var captureButtonAction: (() -> Void)
var body: some View {
CaptureButtonView()
.onTapGesture {
captureButtonAction()
}
}
}
import SwiftUI
struct CaptureButtonView: View {
#Environment(\.colorScheme) var colorScheme
#State private var animationAmount: CGFloat = 1
var body: some View {
Image(systemName: "camera")
.font(.largeTitle)
.padding(20)
.background(colorScheme == .dark ? Color.white : Color.black)
.foregroundColor(colorScheme == .dark ? Color.black : Color.white)
.clipShape(Circle())
.overlay(
Circle()
.stroke(colorScheme == .dark ? Color.white : Color.black)
.scaleEffect(animationAmount)
.opacity(Double(2 - animationAmount))
.animation(
Animation.easeOut(duration: 1)
.repeatForever(autoreverses: false)
)
)
.onAppear {
animationAmount = 2
}
}
}
import SwiftUI
import AVFoundation
final class CustomCameraController: UIViewController {
static let shared = CustomCameraController()
private var captureSession = AVCaptureSession()
private var backCamera: AVCaptureDevice?
private var frontCamera: AVCaptureDevice?
private var currentCamera: AVCaptureDevice?
private var photoOutput: AVCapturePhotoOutput?
private var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
weak var captureDelegate: AVCapturePhotoCaptureDelegate?
override func viewDidLoad() {
super.viewDidLoad()
setup()
}
func configurePreviewLayer(with frame: CGRect) {
let cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewLayer.frame = frame
view.layer.insertSublayer(cameraPreviewLayer, at: 0)
}
func startRunningCaptureSession() {
captureSession.startRunning()
}
func stopRunningCaptureSession() {
captureSession.stopRunning()
}
func takePhoto() {
let settings = AVCapturePhotoSettings()
guard let delegate = captureDelegate else {
print("delegate nil")
return
}
photoOutput?.capturePhoto(with: settings, delegate: delegate)
}
// MARK: Private
private func setup() {
setupCaptureSession()
setupDevice()
setupInputOutput()
}
private func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
private func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(
deviceTypes: [.builtInWideAngleCamera],
mediaType: .video,
position: .unspecified
)
for device in deviceDiscoverySession.devices {
switch device.position {
case AVCaptureDevice.Position.front:
frontCamera = device
case AVCaptureDevice.Position.back:
backCamera = device
default:
break
}
}
self.currentCamera = self.backCamera
}
private func setupInputOutput() {
do {
guard let currentCamera = currentCamera else { return }
let captureDeviceInput = try AVCaptureDeviceInput(device: currentCamera)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput?.setPreparedPhotoSettingsArray(
[AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.hevc])],
completionHandler: nil
)
guard let photoOutput = photoOutput else { return }
captureSession.addOutput(photoOutput)
} catch {
print(error)
}
}
}
struct CustomCameraRepresentable: UIViewControllerRepresentable {
// #Environment(\.presentationMode) var presentationMode
init(cameraFrame: CGRect, imageCompletion: #escaping ((UIImage) -> Void)) {
self.cameraFrame = cameraFrame
self.imageCompletion = imageCompletion
}
#State var cameraFrame: CGRect
#State var imageCompletion: ((UIImage) -> Void)
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
func makeUIViewController(context: Context) -> CustomCameraController {
CustomCameraController.shared.configurePreviewLayer(with: cameraFrame)
CustomCameraController.shared.captureDelegate = context.coordinator
return CustomCameraController.shared
}
func updateUIViewController(_ cameraViewController: CustomCameraController, context: Context) {}
func takePhoto() {
CustomCameraController.shared.takePhoto()
}
func startRunningCaptureSession() {
CustomCameraController.shared.startRunningCaptureSession()
}
func stopRunningCaptureSession() {
CustomCameraController.shared.stopRunningCaptureSession()
}
}
extension CustomCameraRepresentable {
final class Coordinator: NSObject, AVCapturePhotoCaptureDelegate {
private let parent: CustomCameraRepresentable
init(_ parent: CustomCameraRepresentable) {
self.parent = parent
}
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?) {
if let imageData = photo.fileDataRepresentation() {
guard let newImage = UIImage(data: imageData) else { return }
parent.imageCompletion(newImage)
}
// parent.presentationMode.wrappedValue.dismiss()
}
}
}

Is it possible to save a reference to where a photo is stored from the photo library instead of saving the actual UIImage itself as data?

I want to save images in my iOS app in Core Data, but all of the images I want to save are being imported from the photo library. So it would make way more sense to save the already existing reference to that photo from the photo library rather than try convert the image to NSData and store it in the phone. Especially because images take up a lot of memory and ideally they shouldn't be saved in two places. Is what I'm trying to do possible, or will apple not let me read references to where photos from the photo library are stored?
Also if it's possible to grab the reference to where an image is stored from the photo library, how can I access it in Swift?
the localIdentifier property of the PHObject class is A unique string that persistently identifies the object. Meanwhile you can use it for the fetchAssetsWithLocalIdentifiers: method for the PHAsset that can fetch the image data.
This is a late answer, but hopefully it will help someone else.
Yes, it is possible. You can select photos using a PHPickerViewController to get a PHPickerResult, which has a assetIdentifier attribute. The assetIdentifier persistently and uniquely identifies the photo. When you want to load the image, read the saved assetIdentifier and use PHAsset.fetchAssets() to retrieve the asset and then use PHImageManager.requestImage() to get the UIImage. In this way, all that you need to store in your app is the assetIdentifier, not the image data.
Here is some example code (for iOS15) for selecting one photo and getting the assetIdentifier:
import PhotosUI
import SwiftUI
struct PhotoPicker: UIViewControllerRepresentable {
#Environment(\.dismiss) var dismiss
#Binding var assetIdentifier: String?
typealias UIViewControllerType = PHPickerViewController
func makeUIViewController(context: Context) -> PHPickerViewController {
var configuration = PHPickerConfiguration(photoLibrary: PHPhotoLibrary.shared())
configuration.selectionLimit = 1
configuration.filter = .images
let picker = PHPickerViewController(configuration: configuration)
picker.delegate = context.coordinator
return picker
}
func updateUIViewController(_ uiViewController: PHPickerViewController, context: Context) {
// Do nothing.
}
func makeCoordinator() -> Coordinator {
Coordinator(self)
}
class Coordinator: NSObject, UINavigationControllerDelegate, PHPickerViewControllerDelegate {
let parent: PhotoPicker
init(_ parent: PhotoPicker) {
self.parent = parent
}
func picker(_ picker: PHPickerViewController, didFinishPicking results: [PHPickerResult]) {
parent.dismiss()
// Only 1 image can be selected, so ignore any additional elements.
guard let result = results.first else {
return
}
parent.assetIdentifier = result.assetIdentifier
}
}
}
To use it: PhotoPicker(assetIdentifier: $assetIdentifier)
And then to retrieve the image from the assetIdentifier use:
import Photos
import SwiftUI
struct LoadedImageView: View {
#State var assetIdentifier: String
#State private var image: Image? = nil
var body: some View {
VStack {
if let image = image {
image
.resizable()
.aspectRatio(contentMode: .fit)
}
}
.onAppear(perform: loadImage)
}
func loadImage() {
let fetchResults: PHFetchResult<PHAsset> =
PHAsset.fetchAssets(withLocalIdentifiers: [assetIdentifier], options: nil)
guard let asset: PHAsset = fetchResults.firstObject else {
return
}
let manager = PHImageManager()
manager.requestImage(for: asset, targetSize: PHImageManagerMaximumSize,
contentMode: .aspectFit, options: nil) { (uiImage, _) in
if let uiImage = uiImage {
self.image = Image(uiImage: uiImage)
}
}
}
}

How to capture video using custom camera

I have set up my custom camera, and already coded the video preview. I have a button on the screen that i want to use to capture video when it is pressed. I don't know how to go about it. Everything so far is set up and working fine.
In the start recording button function, i just need the code necessary to capture the video and save it. Thank you
class CameraViewController: UIViewController, AVAudioRecorderDelegate {
#IBOutlet var recordOutlet: UIButton!
#IBOutlet var recordLabel: UILabel!
#IBOutlet var cameraView: UIView!
var tempImage: UIImageView?
var captureSession: AVCaptureSession?
var stillImageOutput: AVCapturePhotoOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var currentCaptureDevice: AVCaptureDevice?
var usingFrontCamera = false
/* This is the function i want to use to start
recording a video */
#IBAction func recordingButton(_ sender: Any) {
}
it seems as though Apple prefers developers to use the default camera for capturing video. If you are ok with that, I found a tutorial online with code to help. https://www.raywenderlich.com/94404/play-record-merge-videos-ios-swift.
You can scroll down to the "recording video" section and it will walk you through it with code.
Here's some of what it says: "
import MobileCoreServices
You’ll also need to adopt the same protocols as PlayVideoViewController, by adding the following to the end of the file:
`extension RecordVideoViewController: UIImagePickerControllerDelegate {
}
extension RecordVideoViewController: UINavigationControllerDelegate {
}
Add the following code to RecordVideoViewController:
`func startCameraFromViewController(viewController: UIViewController, withDelegate delegate: protocol<UIImagePickerControllerDelegate, UINavigationControllerDelegate>) -> Bool {
if UIImagePickerController.isSourceTypeAvailable(.Camera) == false {
return false
}
var cameraController = UIImagePickerController()
cameraController.sourceType = .Camera
cameraController.mediaTypes = [kUTTypeMovie as NSString as String]
cameraController.allowsEditing = false
cameraController.delegate = delegate
presentViewController(cameraController, animated: true, completion: nil)
return true
}`
This method follows the same logic is in PlayVideoViewController, but it accesses the .Camera instead to record video.
Now add the following to record(_:):
startCameraFromViewController(self, withDelegate: self)
You are again in familiar territory. The code simply calls startCameraControllerFromViewController(_:usingDelegate:) when you tap the “Record Video” button.
Build and run to see what you’ve got so far.
Go to the Record screen and press the “Record Video” button. Instead of the Photo Gallery, the camera UI opens. Start recording a video by tapping the red record button at the bottom of the screen, and tap it again when you’re done recording."
Cheers,
Theo
Here is worked code, you need to deal correct with optional values and error handling in real project, but you can use this next code as example:
//
// ViewController.swift
// CustomCamera
//
// Created by Taras Chernyshenko on 6/27/17.
// Copyright © 2017 Taras Chernyshenko. All rights reserved.
//
import UIKit
import AVFoundation
import AssetsLibrary
class CameraViewController: UIViewController,
AVCaptureAudioDataOutputSampleBufferDelegate,
AVCaptureVideoDataOutputSampleBufferDelegate {
#IBOutlet var recordOutlet: UIButton!
#IBOutlet var recordLabel: UILabel!
#IBOutlet var cameraView: UIView!
var tempImage: UIImageView?
private var session: AVCaptureSession = AVCaptureSession()
private var deviceInput: AVCaptureDeviceInput?
private var previewLayer: AVCaptureVideoPreviewLayer?
private var videoOutput: AVCaptureVideoDataOutput = AVCaptureVideoDataOutput()
private var audioOutput: AVCaptureAudioDataOutput = AVCaptureAudioDataOutput()
private var videoDevice: AVCaptureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
private var audioConnection: AVCaptureConnection?
private var videoConnection: AVCaptureConnection?
private var assetWriter: AVAssetWriter?
private var audioInput: AVAssetWriterInput?
private var videoInput: AVAssetWriterInput?
private var fileManager: FileManager = FileManager()
private var recordingURL: URL?
private var isCameraRecording: Bool = false
private var isRecordingSessionStarted: Bool = false
private var recordingQueue = DispatchQueue(label: "recording.queue")
var captureSession: AVCaptureSession?
var stillImageOutput: AVCapturePhotoOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
var currentCaptureDevice: AVCaptureDevice?
var usingFrontCamera = false
/* This is the function i want to use to start
recording a video */
#IBAction func recordingButton(_ sender: Any) {
if self.isCameraRecording {
self.stopRecording()
} else {
self.startRecording()
}
self.isCameraRecording = !self.isCameraRecording
}
override func viewDidLoad() {
super.viewDidLoad()
self.setup()
}
private func setup() {
self.session.sessionPreset = AVCaptureSessionPresetHigh
self.recordingURL = URL(fileURLWithPath: "\(NSTemporaryDirectory() as String)/file.mov")
if self.fileManager.isDeletableFile(atPath: self.recordingURL!.path) {
_ = try? self.fileManager.removeItem(atPath: self.recordingURL!.path)
}
self.assetWriter = try? AVAssetWriter(outputURL: self.recordingURL!,
fileType: AVFileTypeQuickTimeMovie)
let audioSettings = [
AVFormatIDKey : kAudioFormatAppleIMA4,
AVNumberOfChannelsKey : 1,
AVSampleRateKey : 16000.0
] as [String : Any]
let videoSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : UIScreen.main.bounds.size.width,
AVVideoHeightKey : UIScreen.main.bounds.size.height
] as [String : Any]
self.videoInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo,
outputSettings: videoSettings)
self.audioInput = AVAssetWriterInput(mediaType: AVMediaTypeAudio,
outputSettings: audioSettings)
self.videoInput?.expectsMediaDataInRealTime = true
self.audioInput?.expectsMediaDataInRealTime = true
if self.assetWriter!.canAdd(self.videoInput!) {
self.assetWriter?.add(self.videoInput!)
}
if self.assetWriter!.canAdd(self.audioInput!) {
self.assetWriter?.add(self.audioInput!)
}
self.deviceInput = try? AVCaptureDeviceInput(device: self.videoDevice)
if self.session.canAddInput(self.deviceInput) {
self.session.addInput(self.deviceInput)
}
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
self.previewLayer?.videoGravity = AVLayerVideoGravityResizeAspect
let rootLayer = self.view.layer
rootLayer.masksToBounds = true
self.previewLayer?.frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
rootLayer.insertSublayer(self.previewLayer!, at: 0)
self.session.startRunning()
DispatchQueue.main.async {
self.session.beginConfiguration()
if self.session.canAddOutput(self.videoOutput) {
self.session.addOutput(self.videoOutput)
}
self.videoConnection = self.videoOutput.connection(withMediaType: AVMediaTypeVideo)
if self.videoConnection?.isVideoStabilizationSupported == true {
self.videoConnection?.preferredVideoStabilizationMode = .auto
}
self.session.commitConfiguration()
let audioDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeAudio)
let audioIn = try? AVCaptureDeviceInput(device: audioDevice)
if self.session.canAddInput(audioIn) {
self.session.addInput(audioIn)
}
if self.session.canAddOutput(self.audioOutput) {
self.session.addOutput(self.audioOutput)
}
self.audioConnection = self.audioOutput.connection(withMediaType: AVMediaTypeAudio)
}
}
private func startRecording() {
if self.assetWriter?.startWriting() != true {
print("error: \(self.assetWriter?.error.debugDescription ?? "")")
}
self.videoOutput.setSampleBufferDelegate(self, queue: self.recordingQueue)
self.audioOutput.setSampleBufferDelegate(self, queue: self.recordingQueue)
}
private func stopRecording() {
self.videoOutput.setSampleBufferDelegate(nil, queue: nil)
self.audioOutput.setSampleBufferDelegate(nil, queue: nil)
self.assetWriter?.finishWriting {
print("saved")
}
}
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer
sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if !self.isRecordingSessionStarted {
let presentationTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
self.assetWriter?.startSession(atSourceTime: presentationTime)
self.isRecordingSessionStarted = true
}
let description = CMSampleBufferGetFormatDescription(sampleBuffer)!
if CMFormatDescriptionGetMediaType(description) == kCMMediaType_Audio {
if self.audioInput!.isReadyForMoreMediaData {
print("appendSampleBuffer audio");
self.audioInput?.append(sampleBuffer)
}
} else {
if self.videoInput!.isReadyForMoreMediaData {
print("appendSampleBuffer video");
if !self.videoInput!.append(sampleBuffer) {
print("Error writing video buffer");
}
}
}
}
}

Resources