I'm trying to first just get my app to show the camera preview of the wide angle camera.
But what I do it just doesn't show up. (the app has video and audio permission and is being tested on an iPhone 7 Plus)
Here's my session configuration code:
func configureSession() throws {
session.beginConfiguration()
// configure inputs
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let audioDevice = AVCaptureDevice.default(.builtInMicrophone, for: .audio, position: .unspecified)
guard let videoInput = try? AVCaptureDeviceInput(device: videoDevice!), let audioInput = try? AVCaptureDeviceInput(device: audioDevice!), session.canAddInput(videoInput), session.canAddInput(audioInput) else {
throw CameraError.configurationFailed
}
// configure outputs
let output = AVCaptureMovieFileOutput()
let previewOutput = AVCaptureVideoDataOutput()
previewOutput.alwaysDiscardsLateVideoFrames = true
previewOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32BGRA)]
guard session.canAddOutput(output) && session.canAddOutput(previewOutput) else {
throw CameraError.configurationFailed
}
session.sessionPreset = .high
session.addOutput(output)
session.addOutput(previewOutput)
session.commitConfiguration()
}
and this is the code for the previewLayer:
// adds given view as previewView to the session
func configureAsPreview(_ view: UIView) {
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.layer.frame
}
and it is being called in viewDidLoad:
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
if cameraManager.hasPermissionForVideo {
try! cameraManager.configureSession()
cameraManager.configureAsPreview(self.previewView)
cameraManager.startSession()
} else {
performSegue(withIdentifier: "presentPermissions", sender: nil)
}
}
What am I doing wrong?
I really can't figure it out and it's probably just something stupidly small.
Thanks.
Try update layer frame when needed, and use bounds instead of frame. Layers dont support autoresizing, you need to manage it manually.
private var __previewLayer = AVCaptureVideoPreviewLayer? = nil
func configureAsPreview(_ view: UIView) {
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.bounds
self.__previewLayer = previewLayer
}
override viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.__previewLayer?.frame = self.view.bounds
}
Related
I have iPhone 13 pro with iOS version 15.5. I am using XCode version 13.3.1.
I am trying to achieve camera zoom with the AVFoundation.
Below is the code I have tried out. The camera device selected is the .builtInTripleCamera. I would like to take macro images. I am trying out different zoom factors like 1.0, 3.0 and 15.0. But the camera preview is not changing. I am not sure whether this is the current method to obtain zoomed-in camera preview as well as the output. Did anybody manage to correctly set the camera zoom factor with AVFoundation?
private let captureSession = AVCaptureSession()
private lazy var previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private let videoDataOutput = AVCaptureVideoDataOutput()
private var cameraDevice: AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
self.setCameraInput()
self.setCameraOutput()
self.showCameraFeed()
self.captureSession.beginConfiguration()
self.captureSession.sessionPreset = AVCaptureSession.Preset.high;
self.captureSession.commitConfiguration()
self.captureSession.startRunning()
}
private func showCameraFeed() {
self.previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.view.layer.insertSublayer(self.previewLayer, at: 1)
self.previewLayer.frame = self.view.frame
// self.view.addGestureRecognizer(pgr)
}
private func setCameraInput() {
let deviceTypes: [AVCaptureDevice.DeviceType]
if #available(iOS 13, *) {
deviceTypes = [.builtInTripleCamera, .builtInDualWideCamera, .builtInDualCamera, .builtInWideAngleCamera]
} else {
deviceTypes = [.builtInDualCamera, .builtInWideAngleCamera]
}
let session = AVCaptureDevice.DiscoverySession(
deviceTypes: deviceTypes,
mediaType: .video,
position: .back
)
guard let device = session.devices.first else {
fatalError("No back camera device found.")
}
cameraDevice = device
addInputToSession(device: cameraDevice!, session: self.captureSession)
}
private func addInputToSession(device:AVCaptureDevice,session:AVCaptureSession){
let cameraInput = try! AVCaptureDeviceInput(device: device)
if(session.canAddInput(cameraInput)){
do{
try device.lockForConfiguration()
cameraInput.device.videoZoomFactor = 15
session.addInput(cameraInput)
device.unlockForConfiguration()
}catch{
print(error)
}
}
}
private func setCameraOutput() {
self.videoDataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
self.videoDataOutput.alwaysDiscardsLateVideoFrames = true
self.videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue"))
self.captureSession.addOutput(self.videoDataOutput)
guard let connection = self.videoDataOutput.connection(with: AVMediaType.video),
connection.isVideoOrientationSupported else { return }
connection.videoOrientation = .portrait
}
Handle camera zoom with pinch recognizer.
cameraDevice.videoZoomFactor
and set it to the recognizer on .began state like this
#objc private func viewPinched(recognizer: UIPinchGestureRecognizer) {
switch recognizer.state {
case .began:
recognizer.scale = cameraDevice.videoZoomFactor
case .changed:
let scale = recognizer.scale
do {
try cameraDevice.lockForConfiguration()
cameraDevice.videoZoomFactor = max(cameraDevice.minAvailableVideoZoomFactor, min(scale, cameraDevice.maxAvailableVideoZoomFactor))
cameraDevice.unlockForConfiguration()
}
catch {
print(error)
}
default:
break
}
}
I am trying to capture photos with depth data using the back camera I followed the example on apple website here but the depth data is always nil. I tried this solution on multiple ios devices but I keep getting nil
my code:
func setupCamare(){
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInDualCamera,
for: .video, position: .back)
else { fatalError("No dual camera.") }
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
guard self.captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession.beginConfiguration()
self.captureSession.addInput(videoDeviceInput)
// Set up photo output for depth data capture.
photoOutput.isHighResolutionCaptureEnabled = true
print(photoOutput.isDepthDataDeliverySupported)
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
self.captureSession.addOutput(photoOutput)
self.captureSession.sessionPreset = .photo
self.captureSession.commitConfiguration()
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession.startRunning()
self.view.bringSubviewToFront(self.button)
}
#IBAction func takePhoto(_ sender:UIButton){
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey:AVVideoCodecType.jpeg])
photoSettings.isDepthDataFiltered = photoOutput.isDepthDataDeliveryEnabled;
photoOutput.capturePhoto(with: photoSettings, delegate: self)
}
I'm currently working on a camera app. Everything worked fine, but when I tried to change the constraints of the Vision View the log suddenly printed this error.
[warning]the specified colorspace format is not supported. falling back on libyuv.
I have no Idea where it comes from and what I should change. Below I'll past the relevant code where I set up the camera.
func initializeCameraSession() {
// Set up Values
//1: Create a new AV Session
// , xf , AVCaptureVideoDataOutputSampleBufferDelegate // Get camera devices
let devices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices
//2: Select a capture device
avSession.sessionPreset = .low
do {
if let captureDevice = devices.first {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
}
avSession.beginConfiguration()
if avSession.canAddInput(captureDeviceInput) {
avSession.addInput(captureDeviceInput)
self.videoDeviceInput = captureDeviceInput
} else {
print("Couldn't add video device input to the session.")
avSession.commitConfiguration()
return
}
avSession.commitConfiguration()
}
} catch {
print(error.localizedDescription)
}
//3: Show output on a preview layer
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as String) : NSNumber(value: kCVPixelFormatType_32BGRA )]
avSession.addOutput(captureOutput)
let previewLayer = AVCaptureVideoPreviewLayer(session: avSession)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = visionView.bounds
visionView.layer.addSublayer(previewLayer)
view.bringSubviewToFront(visionView)
visionView.isHidden = true
visionView.alpha = 0.0
avSession.startRunning()
}
}
I am trying to add a button on top of my AVCapturesession preview layer. this button is to capture images (fire the capture function)..however i can not seem to set one up.
func configureCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .unspecified)
for device in deviceDiscoverySession.devices {
if device.position == .back {
rearCam = device
} else if device.position == .front {
frontCam = device
}
}
currentDevice = rearCam
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
// wheelOverlay.image = UIImage(named: "wheel")
// wheelOverlayLayer.contents = wheelOverlay
stillPhotoOutput = AVCapturePhotoOutput()
captureSession.addInput(captureDeviceInput)
captureSession.addOutput(stillPhotoOutput)
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.frame = view.layer.frame
print("Capture session setup")
// view.layer.addSublayer(wheelOverlayLayer)
// view.layer.insertSublayer(wheelOverlayLayer, above: cameraPreviewLayer)
captureSession.startRunning()
}
where abouts do i need to add the subview?
You need to add it to the view after you add the cameraLayer like this
view.layer.addSublayer(cameraPreviewLayer)
let bt = UIButton(type:.system)
bt.setTitle("Record",for:.normal)
bt.frame = /////
view.addSubview(bt)
I am working on recording video using AVCaptureSession in Swift and it is working fine but my client requirement is to remove the Camera with transform animation is it possible to achive this.
Please check the below code I have tried :
var captureDevice : AVCaptureDevice!
var captureSession: AVCaptureSession?
var stillImageOutput: AVCaptureStillImageOutput?
var previewLayer: AVCaptureVideoPreviewLayer?
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPresetPhoto
if let availbleDevices = AVCaptureDeviceDiscoverySession.init(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .front).devices {
captureDevice = availbleDevices.first
captureSession?.beginConfiguration()
configureVideoPreset()
addVideoInput()
configureVideoOutput()
configurePhotoOutput()
captureSession?.commitConfiguration()
beginSession()
...
}
In the begin session I have add preview layer in view.layer
func beginSession(){
do {
let input = try AVCaptureDeviceInput(device: captureDevice)
captureSession!.addInput(input)
} catch{
print(error.localizedDescription)
}
if let previewLayer = AVCaptureVideoPreviewLayer(session:captureSession){
self.previewLayer = previewLayer
self.previewLayer?.frame = view.bounds
self.previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as NSString:kCVPixelFormatType_32BGRA]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureDevice.hasFlash == true {
do {
try captureDevice.lockForConfiguration()
captureDevice.flashMode = AVCaptureFlashMode.on
captureDevice.unlockForConfiguration()
} catch {
print("[SwiftyCam]: \(error)")
}
}
self.view.layer.addSublayer(self.previewLayer!)
captureSession?.commitConfiguration()
captureSession!.startRunning()
captureSession?.beginConfiguration()
if (captureSession?.canAddOutput(dataOutput))!{
captureSession?.addOutput(dataOutput)
}
captureSession?.commitConfiguration()
let output = AVCaptureMetadataOutput()
output.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
captureSession?.addOutput(output)
output.metadataObjectTypes = [AVMetadataObjectTypeFace]
print("array \(output.metadataObjectTypes)")
let queue = DispatchQueue.init(label: "com.UOO.captureQueue")
dataOutput.setSampleBufferDelegate(self , queue: queue)
}
}
It's all is working fine and I am able to record video also but I am not able to hide camera with animation.
func setView(view: UIView, hidden: Bool) {
UIView.transition(with: view, duration: 0.5, options: .transitionCrossDissolve, animations: {() -> Void in
view.isHidden = hidden
}, completion: nil)
}
use this method like
self.setView(view: previewLayer as! UIView, hidden:true)
I assume from your question, preview layer is the one remove from view without animation