I want to make a custom cameraView overlay. I want to use the overlay which is an image as a template. But the rect of clear space will change depends on phone.
Template:
I tried to create a view as container behind image. But the image that got captured will include the part that I dont want
self.session = AVCaptureSession()
self.session!.sessionPreset = AVCaptureSession.Preset.photo
let backCamera = AVCaptureDevice.default(for: AVMediaType.video)
var error: NSError?
var input: AVCaptureDeviceInput!
do {
input = try AVCaptureDeviceInput(device: backCamera!)
} catch let error1 as NSError {
error = error1
input = nil
print(error!.localizedDescription)
}
if error == nil && session!.canAddInput(input) {
session!.addInput(input)
stillImageOutput = AVCaptureStillImageOutput()
stillImageOutput?.outputSettings = [AVVideoCodecKey: AVVideoCodecJPEG]
if session!.canAddOutput(stillImageOutput!) {
session!.addOutput(stillImageOutput!)
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.session!)
videoPreviewLayer!.videoGravity = AVLayerVideoGravity.resizeAspect
videoPreviewLayer!.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
previewView.layer.addSublayer(videoPreviewLayer!)
session!.startRunning()
}
}
//inviewdidload
videoPreviewLayer!.frame = previewView.bounds//previewView is uiview behind image
The expected result is that the white space would be the custom camera view. also it seems that AVCaptureStillImageOutput was deprecated in iOS 10.0
Related
I'm currently working on a camera app. Everything worked fine, but when I tried to change the constraints of the Vision View the log suddenly printed this error.
[warning]the specified colorspace format is not supported. falling back on libyuv.
I have no Idea where it comes from and what I should change. Below I'll past the relevant code where I set up the camera.
func initializeCameraSession() {
// Set up Values
//1: Create a new AV Session
// , xf , AVCaptureVideoDataOutputSampleBufferDelegate // Get camera devices
let devices = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .front).devices
//2: Select a capture device
avSession.sessionPreset = .low
do {
if let captureDevice = devices.first {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
}
avSession.beginConfiguration()
if avSession.canAddInput(captureDeviceInput) {
avSession.addInput(captureDeviceInput)
self.videoDeviceInput = captureDeviceInput
} else {
print("Couldn't add video device input to the session.")
avSession.commitConfiguration()
return
}
avSession.commitConfiguration()
}
} catch {
print(error.localizedDescription)
}
//3: Show output on a preview layer
let captureOutput = AVCaptureVideoDataOutput()
captureOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as String) : NSNumber(value: kCVPixelFormatType_32BGRA )]
avSession.addOutput(captureOutput)
let previewLayer = AVCaptureVideoPreviewLayer(session: avSession)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = visionView.bounds
visionView.layer.addSublayer(previewLayer)
view.bringSubviewToFront(visionView)
visionView.isHidden = true
visionView.alpha = 0.0
avSession.startRunning()
}
}
When I try to create an instance of type AVCaptureDeviceInput I get the following error.
What have I done so far:
1) I have gain permissions for camera use and microphone.
2) I have tested the code on an iPhone 7 and an iPhone 5s.
3) When it displays the error I printed of the value of session.isRunning and it returns true value.
4) All the properties are retain strong.
Error Domain=AVFoundationErrorDomain Code=-11814 "Cannot Record" UserInfo={NSLocalizedDescription=Cannot Record, NSLocalizedRecoverySuggestion=Try recording again.
This is the code:
let session = AVCaptureSession()
self.session = session
session.sessionPreset = AVCaptureSessionPresetPhoto
do {
let input = try AVCaptureDeviceInput(device: device)
session.addInput(input)
if session.canAddInput(input) {
let stillImageOutput = AVCapturePhotoOutput()
self.stillImageOutput = stillImageOutput
let settings = AVCapturePhotoSettings()
stillImageOutput.capturePhoto(with: settings, delegate: self)
if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
if let previewLayer = AVCaptureVideoPreviewLayer(session: session) {
self.previewLayer = previewLayer
previewLayer.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer.connection!.videoOrientation = AVCaptureVideoOrientation.portrait
viewController.centerView.layer.insertSublayer(previewLayer, at: 0)
session.startRunning()
}
}
}
} catch {
print(error.localizedDescription)
}
There are several issues I find in your code, so the updated version with the comments on it would look like this:
let session = AVCaptureSession()
self.session = session
// `This method is used to start the flow of data from the inputs to the outputs connected to the AVCaptureSession instance that is the receiver.`
//session.startRunning() // Don't startRunning until everything is configured
session.sessionPreset = AVCaptureSessionPresetPhoto
do {
let input = try AVCaptureDeviceInput(device: device)
// session.addInput(input) // This one has to be after you check if you `canAddInput`
if session.canAddInput(input) {
session.addInput(input)
let stillImageOutput = AVCapturePhotoOutput()
self.stillImageOutput = stillImageOutput
let settings = AVCapturePhotoSettings()
// stillImageOutput.capturePhoto(with: settings, delegate: self) // This one might want to be called after you add it as an output to the `session`
if session.canAddOutput(stillImageOutput) {
session.addOutput(stillImageOutput)
if let previewLayer = AVCaptureVideoPreviewLayer(session: session) {
self.previewLayer = previewLayer
previewLayer.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer.connection!.videoOrientation = AVCaptureVideoOrientation.portrait
viewController.centerView.layer.insertSublayer(previewLayer, at: 0)
session.startRunning()
}
stillImageOutput.capturePhoto(with: settings, delegate: self)
}
}
} catch {
print(error.localizedDescription)
}
I have a AVCaptureVideoPreviewLayerin my app that works well and is showing the same preview video as the camera app. I would like to implement the 2x zoom functionality of the camera app. How do I do this?
Basically I want my previewlayer to change the video feed to same scale as what you see in the camera app when you tap on the 1x icon to change it to 2x.
setting up preview layer
func startSession(){
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
// Catch error using the do catch block
do {
let input = try AVCaptureDeviceInput(device: backCamera)
if (captureSession?.canAddInput(input) != nil){
captureSession?.addInput(input)
// Setup the preview layer
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.portrait
tempImageView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
// Set up AVCaptureVideoDataOutput
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString) : NSNumber(value: kCVPixelFormatType_32BGRA as UInt32)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if (captureSession?.canAddOutput(dataOutput) == true) {
captureSession?.addOutput(dataOutput)
}
let queue = DispatchQueue(label: "com.bigbob.videoQueue")
dataOutput.setSampleBufferDelegate(self, queue: queue)
}
} catch _ {
print("Error setting up camera!")
}
Set the videoZoomFactor of your AVCaptureDevice.defaultDevice and the preview layer's zoom will follow suit. Note Swift 4 it is now called AVCaptureDevice.default.
do {
try backCamera?.lockForConfiguration()
let zoomFactor:CGFloat = 2
backCamera?.videoZoomFactor = zoomFactor
backCamera?.unlockForConfiguration()
} catch {
//Catch error from lockForConfiguration
}
Here's a bit of an updated answer that fist checks to make sure the zoom factor is available before you even attempt to set it. The will prevent possibly unneeded exception catches and you can adjust the zoom check and set easily with one variable.
if let captureDevice = AVCaptureDevice.default(for: AVMediaType.video) {
let zoomFactor : CGFloat = 2
if (captureDevice.maxAvailableVideoZoomFactor >= zoomFactor) {
try? captureDevice.lockForConfiguration()
captureDevice.videoZoomFactor = zoomFactor
captureDevice.unlockForConfiguration()
}
}
I'm using AVFoundation to recognize text and perform OCR. How do I add autofocus? I don't want to have the yellow square thing when user taps the screen, I just want it to automatically focus on the object, a credit card for example.
Here is my session code.
func setupSession() {
session = AVCaptureSession()
session.sessionPreset = AVCaptureSessionPresetHigh
let camera = AVCaptureDevice
.defaultDeviceWithMediaType(AVMediaTypeVideo)
do { input = try AVCaptureDeviceInput(device: camera) } catch { return }
output = AVCaptureStillImageOutput()
output.outputSettings = [ AVVideoCodecKey: AVVideoCodecJPEG ]
guard session.canAddInput(input)
&& session.canAddOutput(output) else { return }
session.addInput(input)
session.addOutput(output)
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer!.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer!.connection?.videoOrientation = .Portrait
view.layer.addSublayer(previewLayer!)
session.startRunning()
}
On my 6S the default camera focus mode is .ContinuousAutoFocus, which continuously focuses on whatever is taking up most of the camera's field of vision. Sounds like that's what you want.
You can check if your camera supports auto focus like so:
camera.isFocusModeSupported(.ContinuousAutoFocus)
and if it's not already set, set it like so:
try! camera.lockForConfiguration()
camera.focusMode = .ContinuousAutoFocus
camera.unlockForConfiguration()
Here is what I did:
//get instance of phone camera
let captureDevice = AVCaptureDevice.defaultDevice(withMediaType: AVMediaTypeVideo)
//try to enable auto focus
if(captureDevice!.isFocusModeSupported(.continuousAutoFocus)) {
try! captureDevice!.lockForConfiguration()
captureDevice!.focusMode = .continuousAutoFocus
captureDevice!.unlockForConfiguration()
}
I am attempting to make a camera view appear, as you can see in my code below I have it set up to display any errors and not break the program if any errors occur, however when I do run this code no error occurs or camera view is displayed. I am running it on an actual phone and the phone did request if it had permission to use the camera. Below is the code
override func viewWillAppear(animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession?.sessionPreset = AVCaptureSessionPreset1920x1080
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
var input : AVCaptureDeviceInput?
let error : NSError?
do {
input = try AVCaptureDeviceInput(device: backCamera)
} catch let error as NSError? {
print(error)
if error == nil && (captureSession?.canAddInput(input))!{
captureSession?.addInput(input)
videoOutput = AVCaptureVideoDataOutput()
//videoOutput?.outputSettings = [AVVideoCodecKey : AVVideoCodecKey]
if ((captureSession?.canAddOutput(videoOutput)) != nil){
captureSession?.addOutput(videoOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.Portrait
cameraView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
}
}
}
}
The body of catch is executed only if an error has occurred. so since your code is inside the catch block, it won't execute as it doesn't find any error.
So remove the code from the catch block and put it outside as suggested by #penatheboss.
Don't put code in the catch. That is if something goes wrong.
do {
input = try AVCaptureDeviceInput(device: backCamera)
} catch let error as NSError? {
print(error)
return//Stop rest of code
}
if (captureSession?.canAddInput(input))!{
captureSession?.addInput(input)
videoOutput = AVCaptureVideoDataOutput()
//videoOutput?.outputSettings = [AVVideoCodecKey : AVVideoCodecKey]
if ((captureSession?.canAddOutput(videoOutput)) != nil){
captureSession?.addOutput(videoOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.Portrait
cameraView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
}
}