I am trying to add a button on top of my AVCapturesession preview layer. this button is to capture images (fire the capture function)..however i can not seem to set one up.
func configureCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInDualCamera], mediaType: AVMediaType.video, position: .unspecified)
for device in deviceDiscoverySession.devices {
if device.position == .back {
rearCam = device
} else if device.position == .front {
frontCam = device
}
}
currentDevice = rearCam
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
// wheelOverlay.image = UIImage(named: "wheel")
// wheelOverlayLayer.contents = wheelOverlay
stillPhotoOutput = AVCapturePhotoOutput()
captureSession.addInput(captureDeviceInput)
captureSession.addOutput(stillPhotoOutput)
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.frame = view.layer.frame
print("Capture session setup")
// view.layer.addSublayer(wheelOverlayLayer)
// view.layer.insertSublayer(wheelOverlayLayer, above: cameraPreviewLayer)
captureSession.startRunning()
}
where abouts do i need to add the subview?
You need to add it to the view after you add the cameraLayer like this
view.layer.addSublayer(cameraPreviewLayer)
let bt = UIButton(type:.system)
bt.setTitle("Record",for:.normal)
bt.frame = /////
view.addSubview(bt)
Related
Thanks for reading.
I am creating a custom camera using AVFoundation.
When I maximize the wide angle with the ultra wide angle camera (when videoZoomFactor is minimized), the wide angle field of view is narrower compared to the apple default camera.
Looking at the metadata from the album, the focal length is 13mm for the apple default camera, while it is 16mm for the one I created. Below is an excerpt of the code.
Camera Settings
if let captureDevice = AVCaptureDevice.default(
.builtInTripleCamera,
for: .video,
position: .back
) {
self.captureDevice = captureDevice
} else if let captureDevice = AVCaptureDevice.default(
.builtInDualWideCamera,
for: .video,
position: .back
) {
self.captureDevice = captureDevice
} else if let captureDevice = AVCaptureDevice.default(
.builtInWideAngleCamera,
for: .video,
position: .back
) {
self.captureDevice = captureDevice
}
do {
let input = try AVCaptureDeviceInput(device: captureDevice)
let videoDataOutput = AVCaptureVideoDataOutput()
// Omitted
photoOutput = AVCapturePhotoOutput()
guard let photoOutput = photoOutput else { return }
photoOutput.isHighResolutionCaptureEnabled = true
session.sessionPreset = .photo
// Omitted
} catch {
}
for connection in session.connections {
connection.preferredVideoStabilizationMode = .cinematicExtended
}
zoom function
func zoom(zoomFactor: CGFloat, ramping: Bool = false) {
do {
try captureDevice?.lockForConfiguration()
self.zoomFactor = zoomFactor
if ramping {
captureDevice?.ramp(toVideoZoomFactor: zoomFactor, withRate: 10.0)
} else {
captureDevice?.videoZoomFactor = zoomFactor
}
captureDevice?.unlockForConfiguration()
} catch {
errorReportingService.reportError(error: error)
}
}
Test devices: iPhone 11, 12mini
Thanks for reading this far. I want to make it as wide angle as the apple default camera!
This app allows for a wider angle than the one I created. So I believe there is a way.
I have iPhone 13 pro with iOS version 15.5. I am using XCode version 13.3.1.
I am trying to achieve camera zoom with the AVFoundation.
Below is the code I have tried out. The camera device selected is the .builtInTripleCamera. I would like to take macro images. I am trying out different zoom factors like 1.0, 3.0 and 15.0. But the camera preview is not changing. I am not sure whether this is the current method to obtain zoomed-in camera preview as well as the output. Did anybody manage to correctly set the camera zoom factor with AVFoundation?
private let captureSession = AVCaptureSession()
private lazy var previewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
private let videoDataOutput = AVCaptureVideoDataOutput()
private var cameraDevice: AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
self.setCameraInput()
self.setCameraOutput()
self.showCameraFeed()
self.captureSession.beginConfiguration()
self.captureSession.sessionPreset = AVCaptureSession.Preset.high;
self.captureSession.commitConfiguration()
self.captureSession.startRunning()
}
private func showCameraFeed() {
self.previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.view.layer.insertSublayer(self.previewLayer, at: 1)
self.previewLayer.frame = self.view.frame
// self.view.addGestureRecognizer(pgr)
}
private func setCameraInput() {
let deviceTypes: [AVCaptureDevice.DeviceType]
if #available(iOS 13, *) {
deviceTypes = [.builtInTripleCamera, .builtInDualWideCamera, .builtInDualCamera, .builtInWideAngleCamera]
} else {
deviceTypes = [.builtInDualCamera, .builtInWideAngleCamera]
}
let session = AVCaptureDevice.DiscoverySession(
deviceTypes: deviceTypes,
mediaType: .video,
position: .back
)
guard let device = session.devices.first else {
fatalError("No back camera device found.")
}
cameraDevice = device
addInputToSession(device: cameraDevice!, session: self.captureSession)
}
private func addInputToSession(device:AVCaptureDevice,session:AVCaptureSession){
let cameraInput = try! AVCaptureDeviceInput(device: device)
if(session.canAddInput(cameraInput)){
do{
try device.lockForConfiguration()
cameraInput.device.videoZoomFactor = 15
session.addInput(cameraInput)
device.unlockForConfiguration()
}catch{
print(error)
}
}
}
private func setCameraOutput() {
self.videoDataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA)] as [String : Any]
self.videoDataOutput.alwaysDiscardsLateVideoFrames = true
self.videoDataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "camera_frame_processing_queue"))
self.captureSession.addOutput(self.videoDataOutput)
guard let connection = self.videoDataOutput.connection(with: AVMediaType.video),
connection.isVideoOrientationSupported else { return }
connection.videoOrientation = .portrait
}
Handle camera zoom with pinch recognizer.
cameraDevice.videoZoomFactor
and set it to the recognizer on .began state like this
#objc private func viewPinched(recognizer: UIPinchGestureRecognizer) {
switch recognizer.state {
case .began:
recognizer.scale = cameraDevice.videoZoomFactor
case .changed:
let scale = recognizer.scale
do {
try cameraDevice.lockForConfiguration()
cameraDevice.videoZoomFactor = max(cameraDevice.minAvailableVideoZoomFactor, min(scale, cameraDevice.maxAvailableVideoZoomFactor))
cameraDevice.unlockForConfiguration()
}
catch {
print(error)
}
default:
break
}
}
I am trying to capture photos with depth data using the back camera I followed the example on apple website here but the depth data is always nil. I tried this solution on multiple ios devices but I keep getting nil
my code:
func setupCamare(){
// Select a depth-capable capture device.
guard let videoDevice = AVCaptureDevice.default(.builtInDualCamera,
for: .video, position: .back)
else { fatalError("No dual camera.") }
guard let videoDeviceInput = try? AVCaptureDeviceInput(device: videoDevice),
self.captureSession.canAddInput(videoDeviceInput)
else { fatalError("Can't add video input.") }
guard self.captureSession.canAddOutput(photoOutput)
else { fatalError("Can't add photo output.") }
self.captureSession.beginConfiguration()
self.captureSession.addInput(videoDeviceInput)
// Set up photo output for depth data capture.
photoOutput.isHighResolutionCaptureEnabled = true
print(photoOutput.isDepthDataDeliverySupported)
photoOutput.isDepthDataDeliveryEnabled = photoOutput.isDepthDataDeliverySupported
self.captureSession.addOutput(photoOutput)
self.captureSession.sessionPreset = .photo
self.captureSession.commitConfiguration()
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer?.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer!)
captureSession.startRunning()
self.view.bringSubviewToFront(self.button)
}
#IBAction func takePhoto(_ sender:UIButton){
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey:AVVideoCodecType.jpeg])
photoSettings.isDepthDataFiltered = photoOutput.isDepthDataDeliveryEnabled;
photoOutput.capturePhoto(with: photoSettings, delegate: self)
}
I'm currently working on a camera app. Everything worked fine, but when I tried to change the constraints of the Vision View the log suddenly printed this error.
[warning]the specified colorspace format is not supported. falling back on libyuv.
I have no Idea where it comes from and what I should change. Below I'll past the relevant code where I set up the camera.
func initializeCameraSession() {
// Set up Values
//1: Create a new AV Session
// , xf , AVCaptureVideoDataOutputSampleBufferDelegate // Get camera devices
let devices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices
//2: Select a capture device
avSession.sessionPreset = .low
do {
if let captureDevice = devices.first {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
}
avSession.beginConfiguration()
if avSession.canAddInput(captureDeviceInput) {
avSession.addInput(captureDeviceInput)
self.videoDeviceInput = captureDeviceInput
} else {
print("Couldn't add video device input to the session.")
avSession.commitConfiguration()
return
}
avSession.commitConfiguration()
}
} catch {
print(error.localizedDescription)
}
//3: Show output on a preview layer
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as String) : NSNumber(value: kCVPixelFormatType_32BGRA )]
avSession.addOutput(captureOutput)
let previewLayer = AVCaptureVideoPreviewLayer(session: avSession)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = visionView.bounds
visionView.layer.addSublayer(previewLayer)
view.bringSubviewToFront(visionView)
visionView.isHidden = true
visionView.alpha = 0.0
avSession.startRunning()
}
}
I'm trying to first just get my app to show the camera preview of the wide angle camera.
But what I do it just doesn't show up. (the app has video and audio permission and is being tested on an iPhone 7 Plus)
Here's my session configuration code:
func configureSession() throws {
session.beginConfiguration()
// configure inputs
let videoDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
let audioDevice = AVCaptureDevice.default(.builtInMicrophone, for: .audio, position: .unspecified)
guard let videoInput = try? AVCaptureDeviceInput(device: videoDevice!), let audioInput = try? AVCaptureDeviceInput(device: audioDevice!), session.canAddInput(videoInput), session.canAddInput(audioInput) else {
throw CameraError.configurationFailed
}
// configure outputs
let output = AVCaptureMovieFileOutput()
let previewOutput = AVCaptureVideoDataOutput()
previewOutput.alwaysDiscardsLateVideoFrames = true
previewOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : NSNumber(value: kCVPixelFormatType_32BGRA)]
guard session.canAddOutput(output) && session.canAddOutput(previewOutput) else {
throw CameraError.configurationFailed
}
session.sessionPreset = .high
session.addOutput(output)
session.addOutput(previewOutput)
session.commitConfiguration()
}
and this is the code for the previewLayer:
// adds given view as previewView to the session
func configureAsPreview(_ view: UIView) {
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.layer.frame
}
and it is being called in viewDidLoad:
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
if cameraManager.hasPermissionForVideo {
try! cameraManager.configureSession()
cameraManager.configureAsPreview(self.previewView)
cameraManager.startSession()
} else {
performSegue(withIdentifier: "presentPermissions", sender: nil)
}
}
What am I doing wrong?
I really can't figure it out and it's probably just something stupidly small.
Thanks.
Try update layer frame when needed, and use bounds instead of frame. Layers dont support autoresizing, you need to manage it manually.
private var __previewLayer = AVCaptureVideoPreviewLayer? = nil
func configureAsPreview(_ view: UIView) {
let previewLayer = AVCaptureVideoPreviewLayer(session: session)
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.bounds
self.__previewLayer = previewLayer
}
override viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.__previewLayer?.frame = self.view.bounds
}