Set up camera on the background of UIView - ios

I'm trying set camera on the background of UIView in UIViewController, in order to be able to draw on it.
How to do that?

UPDATED TO SWIFT 5
You could try something like this:
I add two UIViews to my UIViewController's main view, one called previewView (for the camera) and another UIView called boxView (which is above the camera view)
class ViewController: UIViewController {
var previewView : UIView!
var boxView:UIView!
//Camera Capture requiered properties
var videoDataOutput: AVCaptureVideoDataOutput!
var videoDataOutputQueue: DispatchQueue!
var previewLayer:AVCaptureVideoPreviewLayer!
var captureDevice : AVCaptureDevice!
let session = AVCaptureSession()
var currentFrame: CIImage!
var done = false
override func viewDidLoad() {
super.viewDidLoad()
previewView = UIView(frame: CGRect(x: 0, y: 0, width: UIScreen.main.bounds.size.width, height: UIScreen.main.bounds.size.height))
previewView.contentMode = .scaleAspectFit
view.addSubview(previewView)
//Add a box view
boxView = UIView(frame: CGRect(x: 0, y: 0, width: 100, height: 200))
boxView.backgroundColor = UIColor.green
boxView.alpha = 0.3
view.addSubview(boxView)
self.setupAVCapture()
}
override func viewWillAppear(_ animated: Bool) {
if !done {
session.startRunning()
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
override var shouldAutorotate: Bool {
if (UIDevice.current.orientation == UIDeviceOrientation.landscapeLeft ||
UIDevice.current.orientation == UIDeviceOrientation.landscapeRight ||
UIDevice.current.orientation == UIDeviceOrientation.unknown) {
return false
}
else {
return true
}
}
}
// AVCaptureVideoDataOutputSampleBufferDelegate protocol and related methods
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func setupAVCapture(){
session.sessionPreset = AVCaptureSession.Preset.vga640x480
guard let device = AVCaptureDevice
.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera,
for: .video,
position: AVCaptureDevice.Position.front) else{
return
}
captureDevice = device
beginSession()
done = true
}
func beginSession(){
var deviceInput: AVCaptureDeviceInput!
do {
deviceInput = try AVCaptureDeviceInput(device: captureDevice)
guard deviceInput != nil else {
print("error: cant get deviceInput")
return
}
if self.session.canAddInput(deviceInput){
self.session.addInput(deviceInput)
}
videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.alwaysDiscardsLateVideoFrames=true
videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue")
videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
if session.canAddOutput(self.videoDataOutput){
session.addOutput(self.videoDataOutput)
}
videoDataOutput.connection(with: AVMediaType.video)?.isEnabled = true
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
self.previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect
let rootLayer: CALayer = self.previewView.layer
rootLayer.masksToBounds = true
self.previewLayer.frame = rootLayer.bounds
rootLayer.addSublayer(self.previewLayer)
session.startRunning()
} catch let error as NSError {
deviceInput = nil
print("error: \(error.localizedDescription)")
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
currentFrame = self.convertImageFromCMSampleBufferRef(sampleBuffer)
}
// clean up AVCapture
func stopCamera(){
session.stopRunning()
done = false
}
func convertImageFromCMSampleBufferRef(_ sampleBuffer:CMSampleBuffer) -> CIImage{
let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
let ciImage:CIImage = CIImage(cvImageBuffer: pixelBuffer)
return ciImage
}
}
You can replace the boxView's frame with mainView's frameand don't set its background property. This way you can use this view to add more subviews.
IMPORTANT
Remember that in iOS 10 you need to first ask the user for permission in order to have access to the camera. You do this by adding a usage
key to your app’s Info.plist together with a purpose string
because if you fail to declare the usage, your app will crash when it
first makes the access.
Here's a screenshot to show the Camera access request
I hope this can help!

An other way, SceneView is useful for augmented reality applications.
Create a preview layer with AVFramework or UIView, then add preview
layer to view's sublayer.
Create and custumize a sceneview. Then add sceneview to view's
subview.
Create and custimize scene. Finally add to scenview's scene.
// 1. Create a preview layer with AVFramework or UIView, then add preview layer to view's sublayer.
self.previewLayer!.frame = view.layer.bounds
view.clipsToBounds = true
view.layer.addSublayer(self.previewLayer!)
// 2. Create and custumize a sceneview. Then add sceneview to view's subview.
let sceneView = SCNView()
sceneView.frame = view.bounds
sceneView.backgroundColor = UIColor.clearColor()
self.previewLayer!.frame = view.bounds
view.addSubview(sceneView)
// 3 . Create and custimize scene. Finally add to scenview's scene.
let scene = SCNScene()
sceneView.autoenablesDefaultLighting = true
sceneView.allowsCameraControl = true
let boxGeometry = SCNBox(width: 800 , height: 400, length: 1.0, chamferRadius: 1.0)
let yellow = UIColor.yellowColor()
let semi = yellow.colorWithAlphaComponent(0.3)
boxGeometry.firstMaterial?.diffuse.contents = semi
let boxNode = SCNNode(geometry: boxGeometry)
scene.rootNode.addChildNode(boxNode)
sceneView.scene = scene

One easy way of doing this is to add overlay view on imagepickercontroller and hide the default view.
The other way is to use AV framework that will give you much more options and freedom.
Choice depends on your needs.

Related

Swift - how to resize a UIView dynamically based on the size of a video player frame

I have a video player and a UIView overlaying it which I use for Gesture Recognition in the video area. I am doing this so you can tap different area of the video to play, rewind and do other functions.
At the moment, this works fine in landscape but if I rotate the device to portrait. The UIView doesn't resize but the video player does. I tried to use constraints programmatically but I can't figure out how to access the NSLayout anchor of the video player.
I have a function which retrieve the size of the video frame which I currently use to set the UIView to the size of the video player.
The code below can be just copied into a project to play a video and show the UIView I want to adjust. You just need to add UIView to the UIViewController storyboard. The code does not contain the GestureRecogniser part.
import UIKit
import AVFoundation
class ViewController: UIViewController {
let controlContainerView: UIView = {
let view = UIView()
view.backgroundColor = UIColor(white: 0, alpha: 1)
return view
}()
let activityIndicatorView: UIActivityIndicatorView = {
let aiv = UIActivityIndicatorView(style: .whiteLarge)
aiv.translatesAutoresizingMaskIntoConstraints = false
aiv.startAnimating()
return aiv
}()
#IBOutlet weak var videoContainer: UIView!
var player: AVPlayer?
var playerLayer: AVPlayerLayer?
var tapRegionBounds: UIView!
var tapRegionAreaCreated: Bool = false
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
let urlString = "https://www.radiantmediaplayer.com/media/bbb-360p.mp4"
videoPlayer(filename: urlString)
player?.addObserver(self, forKeyPath: "currentItem.loadedTimeRanges", options: .new, context: nil)
}
override func viewDidAppear(_ animated: Bool) {
self.playerLayer?.frame = CGRect(x: 0, y: 0, width: self.videoContainer.frame.width, height: self.videoContainer.frame.height)
updateTapRegions()
}
func videoPlayer(filename: String){
let item = AVPlayerItem(url: URL(string: filename)!)
player = AVPlayer(playerItem: item)
playerLayer = AVPlayerLayer(player: player)
playerLayer?.videoGravity = .resizeAspect
playerLayer?.frame = CGRect(x: 0, y: 0, width: self.videoContainer.frame.width, height: self.videoContainer.frame.height)
videoContainer.layer.addSublayer(playerLayer!)
player?.play()
}
override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) {
if keyPath == "currentItem.loadedTimeRanges" {
activityIndicatorView.stopAnimating()
controlContainerView.backgroundColor = .clear
}
}
func videoFrame() -> CGRect? {
guard let layer = playerLayer
else { return nil }
let frame = layer.videoRect
if frame.height > 0.0 {
let frameInWindow = layer.convert(frame, to: nil)
return view.convert(frameInWindow, from: nil)
} else {
return nil
}
}
//THE CODE BELOW KIND OF WORKS TO SHOW HOW I WANT IT TO WORK BUT WITHOUT HAVING TO DELETE THE VIEW EACH TIME
func updateTapRegions() {
guard let video = videoFrame() else { return }
let container = videoContainer.bounds
if tapRegionAreaCreated == true {
tapRegionBounds.removeFromSuperview()
tapRegionAreaCreated = false
}
tapRegionBounds = UIView()
tapRegionBounds.frame = CGRect(x: video.minX, y: video.minY, width: container.width, height: video.height)
tapRegionBounds?.alpha = 0.5
tapRegionBounds.backgroundColor = UIColor.red
if tapRegionAreaCreated == false {
view.addSubview(tapRegionBounds)
tapRegionAreaCreated = true
}
}
override func viewWillTransition(to size: CGSize, with coordinator: UIViewControllerTransitionCoordinator) {
super.viewWillTransition(to: size, with: coordinator)
coordinator.animate(alongsideTransition: { (context) in
}) { (context) in
self.playerLayer?.frame = CGRect(x: 0, y: 0, width: self.videoContainer.frame.width, height: self.videoContainer.frame.height)
self.updateTapRegions()
}
}
}
I tried to update the frame in viewWillLayoutSubviews() but nothing changed either. I know my updateTapRegions function isn't the right way but if anyone could point me in the right direction that would be great thanks.
Unfortunately you didn't say in your question how you are resizing the video container view on which all this depends. But here's a possibility. I've used auto layout to get these results in portrait and landscape:
Now suppose we have a sublayer (an AVPlayerLayer) and a subview (the tap gesture view). Well, the subview can be resized to fit automatically using autolayout! So only the player layer is left; it doesn't automatically resize using autolayout, but you can resize it manually in viewDidLayoutSubviews.
Finally, I worked it out by moving the code from transition function(even though the video player resized properly) to viewDidLayoutSubviews which then worked fine. Also I no longer to create and remove the view each time the device changes orientation.
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
self.playerLayer?.frame = CGRect(x: 0, y: 0, width: self.videoContainer.frame.width, height: self.videoContainer.frame.height)
guard let video = videoFrame() else { return }
if tapRegionBounds != nil {
self.tapRegionBounds.frame = CGRect(x: video.minX, y: video.minY, width: video.width, height: video.height)
}
}

How to change uiimageview content mode at runtime?

I have a UIViewController with an UIImageView, if I tap the screen, i want the UIImageView to change its contentMode. But I found out that this is not working with some images, mainly those from AVCaptureSession.
Screenshots:
Aspect fill
Aspect fit
I also found out that it's working fine when I change device orientation to landscape and back. But when I tap the screen is not working again.
Aspect fit after changed orientation to landscape and back (this is how I want it to look everytime in aspect fit)
My code:
CameraController:
class CameraController: UIViewController {
private var captureSession = AVCaptureSession()
private var captureDevice: AVCaptureDevice!
private var capturePhotoOutput = AVCapturePhotoOutput()
private var previewLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupCaptureDevice()
setupInputAndOutput()
setupPreviewLayer()
startCaptureSession()
setupLayout()
}
private func setupCaptureSession() {
captureSession.sessionPreset = .photo
}
private func setupCaptureDevice() {
guard let device = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices.first else { return }
captureDevice = device
}
private func setupInputAndOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
let captureSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
capturePhotoOutput.setPreparedPhotoSettingsArray([captureSettings], completionHandler: nil)
captureSession.addOutput(capturePhotoOutput)
} catch {
print(error.localizedDescription)
}
}
private func setupPreviewLayer() {
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = view.frame
view.layer.insertSublayer(previewLayer, at: 0)
}
private func startCaptureSession() {
captureSession.startRunning()
}
private func setupLayout() {
let captureButton = UIButton(frame: CGRect(x: 0, y: 0, width: 44, height: 44))
captureButton.backgroundColor = .white
captureButton.layer.cornerRadius = 22
captureButton.addTarget(self, action: #selector(didPressCaptureButton), for: .touchUpInside)
captureButton.center.x = view.center.x
captureButton.center.y = view.frame.height - 50
view.addSubview(captureButton)
}
#objc private func didPressCaptureButton() {
let settings = AVCapturePhotoSettings()
capturePhotoOutput.capturePhoto(with: settings, delegate: self)
}
}
extension CameraController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation() else { return }
guard let image = UIImage(data: imageData) else { return }
print("Image size: ", image.size)
let previewController = PreviewController()
previewController.image = image
present(previewController, animated: true, completion: {
self.captureSession.stopRunning()
})
}
}
PreviewController:
class PreviewController: UIViewController {
var imageView: UIImageView!
var image: UIImage!
override func viewDidLoad() {
super.viewDidLoad()
setupImageView()
}
func setupImageView() {
imageView = UIImageView(image: image)
imageView.contentMode = .scaleAspectFill
view.addSubview(imageView)
imageView.addConstraintsToFillSuperview()
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let contentMode: UIViewContentMode = imageView.contentMode == .scaleAspectFill ? .scaleAspectFit : .scaleAspectFill
imageView.contentMode = contentMode
}
}
What am I doing wrong here?
Thank You!
You can change ImageView contentMode on runtime with:
self.imageView.contentMode = newContentMode
self.imageView.setNeedsDisplay()

How to rotate a AVCaptureVideoPreviewLayer by 90 degrees to correct it in iOS11?

I have an AVCaptureVideoPreviewLayer which is set to capture the live preview of a session using the front camera feed. However, the output is rotated by 90 degrees, and I am trying to get the layer to be aligned to the rest of my app, which is in Landscape orientation. Is there a way to rotate it by 90 degrees in order to correct it?
Here is my code so far:
override func viewDidLoad() {
super.viewDidLoad()
let captureSession = AVCaptureSession()
guard let captureDevice = getDevice(position: .front) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
previewLayer.videoGravity = .resizeAspectFill
previewLayer.connection?.automaticallyAdjustsVideoMirroring = false
previewLayer.connection?.isVideoMirrored = false
}
func getDevice(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let devices = AVCaptureDevice.devices() as NSArray;
for de in devices {
let deviceConverted = de as! AVCaptureDevice
if(deviceConverted.position == position){
return deviceConverted
}
}
return nil
}
Can you add previewLayer to another UIView, and try rotating like so:
private var captureView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
self.captureView = UIView(frame: self.view.bounds)
self.captureView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
self.view.addSubview(captureView)
// ...
captureView.layer.addSublayer(previewLayer)
// ...
self.rotateView(animated: false)
}
// Rotating captureView based on device orientation
func rotateView(animated: Bool) {
var transform: CGAffineTransform?
switch UIDevice.current.orientation {
case .portraitUpsideDown:
transform = CGAffineTransform(rotationAngle: CGFloat.pi)
case .landscapeLeft:
transform = CGAffineTransform(rotationAngle: CGFloat.pi / 2)
case .landscapeRight:
transform = CGAffineTransform(rotationAngle: -CGFloat.pi / 2)
default:
break
}
guard animated else {
self.captureView.transform(transform)
return
}
UIView.animate(withDuration: 0.5, animations: {[weak self] in
self?.captureView.transform(transform)
})
}

Taking a snapshot of AVCaptureVideoPreviewLayer's view

I'm using WebRTC to build a video chat between two users. I want to take a snapshot of the localView view, which shows one of the persons.
This is my class with the configureLocalPreview method which connects the video streams with the UIViews:
#IBOutlet var remoteView: RTCEAGLVideoView!
#IBOutlet var localView: UIView!
var captureSession: AVCaptureSession?
var videoSource: RTCAVFoundationVideoSource?
var videoTrack: RTCVideoTrack?
func configureLocalPreview() {
self.videoTrack = self.signaling.localMediaStream.self.videoTracks.first as! RTCVideoTrack?
self.videoSource = (self.videoTrack?.source as? RTCAVFoundationVideoSource)
self.captureSession = self.videoSource?.self.captureSession
self.previewLayer = AVCaptureVideoPreviewLayer.init(session: self.captureSession)
self.previewLayer.frame = self.localView.bounds
self.localView.layer.addSublayer(self.previewLayer)
self.localView.isUserInteractionEnabled = true
//self.localView.layer.position = CGPointMake(100, 100);
}
At the place I want to access the snapshot, I call:
self.localView.pb_takeSnapshot()
pb_takeSnapshot comes from a UIView extension which I found in another post. It's defined like this:
extension UIView {
func pb_takeSnapshot() -> UIImage {
UIGraphicsBeginImageContextWithOptions(bounds.size, false, UIScreen.main.scale)
drawHierarchy(in: self.bounds, afterScreenUpdates: true)
let image = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
}
When I take a look at the image in the Xcode debugger, it looks completely green and the person, which I can see on the iphone screen (inside that view), isn't there:
What could the reason that the person isn't visible? Is it somehow just not possible to make a snaphot of a stream? Thank you for taking a look!
You should create the localView using the RTCEAGLVideoView instead of UIView. I am using the same for my localView and able to take the snapshot using the same code snippet as mentioned in your post.
Below is the sample code which will start your camera and show the local preview:
class ViewController: UIViewController,RTCEAGLVideoViewDelegate {
var captureSession: AVCaptureSession?
var previewLayer :AVCaptureVideoPreviewLayer?
var peerConnectionFactory: RTCPeerConnectionFactory!
var videoSource:RTCAVFoundationVideoSource!
var localTrack :RTCVideoTrack!
#IBOutlet var myView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
/*myView = UIView(frame: CGRect(x: 0,
y: 0,
width: UIScreen.main.bounds.size.width,
height: UIScreen.main.bounds.size.height))*/
startCamera()
// Do any additional setup after loading the view, typically from a nib.
}
fileprivate func startCamera() {
peerConnectionFactory = RTCPeerConnectionFactory()
RTCInitializeSSL();
RTCSetupInternalTracer();
RTCSetMinDebugLogLevel(RTCLoggingSeverity.info)
videoSource = peerConnectionFactory.avFoundationVideoSource(with: nil);
localTrack = peerConnectionFactory.videoTrack(with: videoSource, trackId: "ARDAMSv0")
let localScaleX = CGFloat(1.0)
let localView : RTCEAGLVideoView = RTCEAGLVideoView(frame: self.view.bounds)
self.view.insertSubview(localView, at: 1)
localView.frame = self.view.bounds;
localView.transform = CGAffineTransform(scaleX: localScaleX, y: 1)
localTrack.add(localView)
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
override func viewDidAppear(_ animated: Bool) {
//previewLayer?.frame.size = myView.frame.size
}
func videoView(_ videoView: RTCEAGLVideoView, didChangeVideoSize size: CGSize) {
print("Inside didChangeVideoSize")
}
}
Because AVCaptureVideoPreviewLayer is implemented as an OpenGL layer you can't use regular CoreGraphic's context. I can suggest trying to access raw data.
Add AVCaptureVideoDataOutput with delegate:
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
let captureVideoOutput = AVCaptureVideoDataOutput()
captureVideoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
captureSession?.addOutput(captureVideoOutput)
previewLayer.frame = localView.bounds
Conform your controller (or whatever) to AVCaptureVideoDataOutputSampleBufferDelegate.
Declare shouldCaptureFrame variable and set it up whenever you need to take a picture.
var shouldCaptureFrame: Bool = false
...
func takeSnapshot() {
shouldCaptureFrame = true
}
And implement didOutputSampleBuffer from delegate:
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if !shouldCaptureFrame {
return
}
let image = UIImage.from(sampleBuffer: sampleBuffer)
shouldCaptureFrame = false
}
Finally, the extension with from(sampleBuffer:) function:
extension UIImage {
static func from(sampleBuffer: CMSampleBuffer) -> UIImage? {
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return nil
}
CVPixelBufferLockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))
let baseAddresses = CVPixelBufferGetBaseAddress(imageBuffer)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGContext(
data: baseAddresses,
width: CVPixelBufferGetWidth(imageBuffer),
height: CVPixelBufferGetHeight(imageBuffer),
bitsPerComponent: 8,
bytesPerRow: CVPixelBufferGetBytesPerRow(imageBuffer),
space: colorSpace,
bitmapInfo: CGBitmapInfo.byteOrder32Little.rawValue
)
let quartzImage = context?.makeImage()
CVPixelBufferUnlockBaseAddress(imageBuffer, CVPixelBufferLockFlags(rawValue: 0))
if let quartzImage = quartzImage {
let image = UIImage(cgImage: quartzImage)
return image
}
return nil
}
}
For the WebRTC video layer you should use RTCEAGLVideoView for the views. For more details take a look at these WebRTC sample application which is on here AppRTC App

How to Add Live Camera Preview to UIView

I run into a problem, I'm trying to solve within UIView boundary, is there any way to Add Camera Preview to UIView? And Add other content on top of The UIView (Buttons, Label etc.)?
I try to Using AVFoundation Framework but there is not enough documentation for Swift.
UPDATED TO SWIFT 5
You can try something like this:
import UIKit
import AVFoundation
class ViewController: UIViewController{
var previewView : UIView!
var boxView:UIView!
let myButton: UIButton = UIButton()
//Camera Capture requiered properties
var videoDataOutput: AVCaptureVideoDataOutput!
var videoDataOutputQueue: DispatchQueue!
var previewLayer:AVCaptureVideoPreviewLayer!
var captureDevice : AVCaptureDevice!
let session = AVCaptureSession()
override func viewDidLoad() {
super.viewDidLoad()
previewView = UIView(frame: CGRect(x: 0,
y: 0,
width: UIScreen.main.bounds.size.width,
height: UIScreen.main.bounds.size.height))
previewView.contentMode = UIView.ContentMode.scaleAspectFit
view.addSubview(previewView)
//Add a view on top of the cameras' view
boxView = UIView(frame: self.view.frame)
myButton.frame = CGRect(x: 0, y: 0, width: 200, height: 40)
myButton.backgroundColor = UIColor.red
myButton.layer.masksToBounds = true
myButton.setTitle("press me", for: .normal)
myButton.setTitleColor(UIColor.white, for: .normal)
myButton.layer.cornerRadius = 20.0
myButton.layer.position = CGPoint(x: self.view.frame.width/2, y:200)
myButton.addTarget(self, action: #selector(self.onClickMyButton(sender:)), for: .touchUpInside)
view.addSubview(boxView)
view.addSubview(myButton)
self.setupAVCapture()
}
override var shouldAutorotate: Bool {
if (UIDevice.current.orientation == UIDeviceOrientation.landscapeLeft ||
UIDevice.current.orientation == UIDeviceOrientation.landscapeRight ||
UIDevice.current.orientation == UIDeviceOrientation.unknown) {
return false
}
else {
return true
}
}
#objc func onClickMyButton(sender: UIButton){
print("button pressed")
}
}
// AVCaptureVideoDataOutputSampleBufferDelegate protocol and related methods
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func setupAVCapture(){
session.sessionPreset = AVCaptureSession.Preset.vga640x480
guard let device = AVCaptureDevice
.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera,
for: .video,
position: AVCaptureDevice.Position.back) else {
return
}
captureDevice = device
beginSession()
}
func beginSession(){
var deviceInput: AVCaptureDeviceInput!
do {
deviceInput = try AVCaptureDeviceInput(device: captureDevice)
guard deviceInput != nil else {
print("error: cant get deviceInput")
return
}
if self.session.canAddInput(deviceInput){
self.session.addInput(deviceInput)
}
videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.alwaysDiscardsLateVideoFrames=true
videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue")
videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
if session.canAddOutput(self.videoDataOutput){
session.addOutput(self.videoDataOutput)
}
videoDataOutput.connection(with: .video)?.isEnabled = true
previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect
let rootLayer :CALayer = self.previewView.layer
rootLayer.masksToBounds=true
previewLayer.frame = rootLayer.bounds
rootLayer.addSublayer(self.previewLayer)
session.startRunning()
} catch let error as NSError {
deviceInput = nil
print("error: \(error.localizedDescription)")
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// do stuff here
}
// clean up AVCapture
func stopCamera(){
session.stopRunning()
}
}
Here i use a UIView called previewView to start the camera and then i add a new UIView called boxView wich is above previewView. I add a UIButton to boxView
IMPORTANT
Remember that in iOS 10 and later you need to first ask the user for permission in order to have access to the camera. You do this by adding a usage
key to your app’s Info.plist together with a purpose string
because if you fail to declare the usage, your app will crash when it
first makes the access.
Here's a screenshot to show the Camera access request
Swift 4
Condensed version of mauricioconde's solution
You can use this as a drop in component:
//
// CameraView.swift
import Foundation
import AVFoundation
import UIKit
final class CameraView: UIView {
private lazy var videoDataOutput: AVCaptureVideoDataOutput = {
let v = AVCaptureVideoDataOutput()
v.alwaysDiscardsLateVideoFrames = true
v.setSampleBufferDelegate(self, queue: videoDataOutputQueue)
v.connection(with: .video)?.isEnabled = true
return v
}()
private let videoDataOutputQueue: DispatchQueue = DispatchQueue(label: "JKVideoDataOutputQueue")
private lazy var previewLayer: AVCaptureVideoPreviewLayer = {
let l = AVCaptureVideoPreviewLayer(session: session)
l.videoGravity = .resizeAspect
return l
}()
private let captureDevice: AVCaptureDevice? = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
private lazy var session: AVCaptureSession = {
let s = AVCaptureSession()
s.sessionPreset = .vga640x480
return s
}()
override init(frame: CGRect) {
super.init(frame: frame)
commonInit()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
commonInit()
}
private func commonInit() {
contentMode = .scaleAspectFit
beginSession()
}
private func beginSession() {
do {
guard let captureDevice = captureDevice else {
fatalError("Camera doesn't work on the simulator! You have to test this on an actual device!")
}
let deviceInput = try AVCaptureDeviceInput(device: captureDevice)
if session.canAddInput(deviceInput) {
session.addInput(deviceInput)
}
if session.canAddOutput(videoDataOutput) {
session.addOutput(videoDataOutput)
}
layer.masksToBounds = true
layer.addSublayer(previewLayer)
previewLayer.frame = bounds
session.startRunning()
} catch let error {
debugPrint("\(self.self): \(#function) line: \(#line). \(error.localizedDescription)")
}
}
override func layoutSubviews() {
super.layoutSubviews()
previewLayer.frame = bounds
}
}
extension CameraView: AVCaptureVideoDataOutputSampleBufferDelegate {}
iOS 13/14 and Swift 5.3:
private var imageVC: UIImagePickerController?
and then call showCameraVC() when you want to show the camera view
func showCameraVC() {
self.imageVC = UIImagePickerController()
if UIImagePickerController.isCameraDeviceAvailable(.front) {
self.imageVC?.sourceType = .camera
self.imageVC?.cameraDevice = .front
self.imageVC?.showsCameraControls = false
let screenSize = UIScreen.main.bounds.size
let cameraAspectRatio = CGFloat(4.0 / 3.0)
let cameraImageHeight = screenSize.width * cameraAspectRatio
let scale = screenSize.height / cameraImageHeight
self.imageVC?.cameraViewTransform = CGAffineTransform(translationX: 0, y: (screenSize.height - cameraImageHeight)/2)
self.imageVC?.cameraViewTransform = self.imageVC!.cameraViewTransform.scaledBy(x: scale, y: scale)
self.imageVC?.view.frame = CGRect(x: 0, y: 0, width: screenSize.width, height: screenSize.height)
self.view.addSubview(self.imageVC!.view)
self.view.sendSubviewToBack(self.imageVC!.view)
}
}
Camera view will be also fullscreen (other answers wouldn't fix a letterboxed view)
Swift 3:
#IBOutlet weak var cameraContainerView:UIView!
var imagePickers:UIImagePickerController?
On ViewDidLoad:
override func viewDidLoad() {
super.viewDidLoad()
addImagePickerToContainerView()
}
Add Camera Preview to the container view:
func addImagePickerToContainerView(){
imagePickers = UIImagePickerController()
if UIImagePickerController.isCameraDeviceAvailable( UIImagePickerControllerCameraDevice.front) {
imagePickers?.delegate = self
imagePickers?.sourceType = UIImagePickerControllerSourceType.camera
//add as a childviewcontroller
addChildViewController(imagePickers!)
// Add the child's View as a subview
self.cameraContainerView.addSubview((imagePickers?.view)!)
imagePickers?.view.frame = cameraContainerView.bounds
imagePickers?.allowsEditing = false
imagePickers?.showsCameraControls = false
imagePickers?.view.autoresizingMask = [.flexibleWidth, .flexibleHeight]
}
}
On custom button action:
#IBAction func cameraButtonPressed(_ sender: Any) {
if UIImagePickerController.isSourceTypeAvailable(.camera){
imagePickers?.takePicture()
} else{
//Camera not available.
}
}
swift 5
easy way
import UIKit
import AVFoundation
class ViewController: UIViewController, UINavigationControllerDelegate,UIImagePickerControllerDelegate{
//Camera Capture requiered properties
var imagePickers:UIImagePickerController?
#IBOutlet weak var customCameraView: UIView!
override func viewDidLoad() {
addCameraInView()
super.viewDidLoad()
}
func addCameraInView(){
imagePickers = UIImagePickerController()
if UIImagePickerController.isCameraDeviceAvailable( UIImagePickerController.CameraDevice.rear) {
imagePickers?.delegate = self
imagePickers?.sourceType = UIImagePickerController.SourceType.camera
//add as a childviewcontroller
addChild(imagePickers!)
// Add the child's View as a subview
self.customCameraView.addSubview((imagePickers?.view)!)
imagePickers?.view.frame = customCameraView.bounds
imagePickers?.allowsEditing = false
imagePickers?.showsCameraControls = false
imagePickers?.view.autoresizingMask = [.flexibleWidth, .flexibleHeight]
}
}
#IBAction func cameraButtonPressed(_ sender: Any) {
if UIImagePickerController.isSourceTypeAvailable(.camera){
imagePickers?.takePicture()
} else{
//Camera not available.
}
}
}

Resources