I would save a sublayer. But I can not get a target.
I want to save the layer that is displayed on the screen that contains the view of the camera. When I want it back, I get a white image. I concluded that I targeted my male layer when I record in dernnière function.
Here is my code:
#IBOutlet weak var blutEffect: UIVisualEffectView!
#IBOutlet weak var background: UIView! // vu d'affichage
var previewLayer = AVCaptureVideoPreviewLayer()
var captureSession = AVCaptureSession()
override func viewDidLoad() {
super.viewDidLoad()
camera()
}
#IBAction func takePhoto(sender: AnyObject) {
captureSession.stopRunning()
}
func camera () {
captureSession = AVCaptureSession()
previewLayer = AVCaptureVideoPreviewLayer()
var captureDevice : AVCaptureDevice?
let devices = AVCaptureDevice.devices()
captureSession.sessionPreset = AVCaptureSessionPresetHigh
background.layer.sublayers?.removeAll()
for device in devices {
if (device.hasMediaType(AVMediaTypeVideo)) {
if(device.position == AVCaptureDevicePosition.Back) {
captureDevice = device as? AVCaptureDevice
if captureDevice != nil {
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
} catch _ as NSError {
print("ERROR")
}
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.background.layer.addSublayer(previewLayer)
previewLayer.frame = self.background.layer.frame
captureSession.startRunning()
}
}
}
}
}
func screenShotMethod() {
UIGraphicsBeginImageContext(background.layer.frame.size)
background.layer.renderInContext(UIGraphicsGetCurrentContext())
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
//Save it to the camera roll
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil)
}
Thanks for your help !
Related
I have made a custom camera and want to overlay another image over it. I am using AVKit now to get the custom camera. I was able to overlay the image when I was using the built-in camera. This is the code for what I have for the custom camera. "newImage" is the image that i would like to overlay over the camera.
import UIKit
import AVKit
class liveView: UIViewController, AVCapturePhotoCaptureDelegate {
#IBOutlet weak var previewView: UIView!
#IBOutlet weak var captureImageView: UIImageView!
var captureSession: AVCaptureSession!
var stillImageOutput: AVCapturePhotoOutput!
var videoPreviewLayer: AVCaptureVideoPreviewLayer!
var newImage: UIImage!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
captureSession = AVCaptureSession()
captureSession.sessionPreset = .medium
guard let backCamera = AVCaptureDevice.default(for: AVMediaType.video)
else {
print("Unable to access back camera!")
return
}
do {
let input = try AVCaptureDeviceInput(device: backCamera)
stillImageOutput = AVCapturePhotoOutput()
if captureSession.canAddInput(input) && captureSession.canAddOutput(stillImageOutput) {
captureSession.addInput(input)
captureSession.addOutput(stillImageOutput)
// videoPreviewLayer?.frame = self.newImage.accessibilityFrame
setupLivePreview()
}
}
catch let error {
print("Error Unable to initialize back camera: \(error.localizedDescription)")
}
}
func setupLivePreview() {
videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer.videoGravity = .resizeAspect
videoPreviewLayer.connection?.videoOrientation = .portrait
previewView.layer.addSublayer(videoPreviewLayer)
DispatchQueue.global(qos: .userInitiated).async {
self.captureSession.startRunning()
DispatchQueue.main.async {
self.videoPreviewLayer.frame = self.previewView.bounds
}
}
}
#IBAction func didTakePhoto(_sender : UIBarButtonItem) {
let settings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
stillImageOutput.capturePhoto(with: settings, delegate: self)
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation()
else { return }
let image = UIImage(data: imageData)
captureImageView.image = image
}
override func viewWillDisappear(_ animated: Bool) {
super.viewWillDisappear(animated)
self.captureSession.stopRunning()
}
}
I am working on a project that contains custom camera view.And i am unable to capture the subview over the main view.My main view consist of AVCaptureSession and i want to take photo of superview and subview both in single image.
What i am trying in code:
class ViewController: UIViewController {
#IBOutlet weak var cameraButton: UIButton!
var captureSession = AVCaptureSession()
var backCamera: AVCaptureDevice?
var frontCamera: AVCaptureDevice?
var currentDevice: AVCaptureDevice?
var photoOutput: AVCapturePhotoOutput?
var cameraPreviewLayer:AVCaptureVideoPreviewLayer?
var image: UIImage?
var toggleCameraGestureRecognizer = UISwipeGestureRecognizer()
var zoomInGestureRecognizer = UISwipeGestureRecognizer()
var zoomOutGestureRecognizer = UISwipeGestureRecognizer()
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupDevice()
setupInputOutput()
setupPreviewLayer()
captureSession.startRunning()
toggleCameraGestureRecognizer.direction = .up
toggleCameraGestureRecognizer.addTarget(self, action: #selector(self.switchCamera))
view.addGestureRecognizer(toggleCameraGestureRecognizer)
// Zoom In recognizer
zoomInGestureRecognizer.direction = .right
zoomInGestureRecognizer.addTarget(self, action: #selector(zoomIn))
view.addGestureRecognizer(zoomInGestureRecognizer)
// Zoom Out recognizer
zoomOutGestureRecognizer.direction = .left
zoomOutGestureRecognizer.addTarget(self, action: #selector(zoomOut))
view.addGestureRecognizer(zoomOutGestureRecognizer)
styleCaptureButton()
}
cameraButton.layer.borderColor = UIColor.white.cgColor
cameraButton.layer.borderWidth = 5
cameraButton.clipsToBounds = true
cameraButton.layer.cornerRadius = min(cameraButton.frame.width, cameraButton.frame.height) / 2
}
func setupCaptureSession() {
captureSession.sessionPreset = AVCaptureSession.Preset.photo
}
func setupDevice() {
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [AVCaptureDevice.DeviceType.builtInWideAngleCamera], mediaType: AVMediaType.video, position: AVCaptureDevice.Position.unspecified)
let devices = deviceDiscoverySession.devices
for device in devices {
if device.position == AVCaptureDevice.Position.back {
backCamera = device
} else if device.position == AVCaptureDevice.Position.front {
frontCamera = device
}
}
currentDevice = backCamera
}
func setupInputOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: currentDevice!)
captureSession.addInput(captureDeviceInput)
photoOutput = AVCapturePhotoOutput()
photoOutput!.setPreparedPhotoSettingsArray([AVCapturePhotoSettings(format: [AVVideoCodecKey : AVVideoCodecType.jpeg])], completionHandler: nil)
captureSession.addOutput(photoOutput!)
} catch {
print(error)
}
}
func setupPreviewLayer() {
self.cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
self.cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.cameraPreviewLayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
self.cameraPreviewLayer?.frame = view.frame
self.view.layer.insertSublayer(self.cameraPreviewLayer!, at: 0)
}
#IBAction func cameraButton_TouchUpInside(_ sender: Any) {
let settings = AVCapturePhotoSettings()
self.photoOutput?.capturePhoto(with: settings, delegate: self)
}
}
extension ViewController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
if let imageData = photo.fileDataRepresentation() {
self.image = UIImage(data: imageData)
performSegue(withIdentifier: "Preview_Segue", sender: nil)
}
}
}
Please help me
If I understood you are trying to get the content (as an image) of what the camera is grabbing and some overlay views.
As far as I remember is not possible to grab what is inside the AVPreviewLayer, maybe they changed something in the latest version. When I tried (iOS6) it wasn't possible, the area with the AVPreviewLayer was always empty.
What you can do is take the current camera buffer and draw inside it. By setting a class as a session delegate you can receive this callback optional
func captureOutput(_ output: AVCaptureOutput,
didOutput sampleBuffer: CMSampleBuffer,
from connection: AVCaptureConnection)
Here you will receive the image from the camera, this buffer can be converted into images using Accelerate framework or CoreImage.
Is not easy, but also not impossible.
Im making a iPhone app with a AVFoundation camera but the camera is not scaling properly.
I think I have done a lot to make it the same size, I changed the video gravity to ResizeAspectFill and I changed the previewlayer.frame.size to self.layer.frame.size.
Why isn't my preview layer stretching over the entire view? Is it something I have typed wrong or just forgotten that I need to type out? Thanks!
Image: http://imgur.com/O713SoE
code:
import AVFoundation
import UIKit
import QuartzCore
class View1: UIViewController {
let captureSession = AVCaptureSession()
var previewLayer: CALayer!
var captureDevice: AVCaptureDevice!
#IBOutlet weak var photoButton: UIButton!
#IBOutlet weak var cameraView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
photoButton.layer.zPosition = 1
}
#IBAction func photoButtonpressed(_ sender: UIButton) {
let button = sender as UIButton
if (button.tag == 1){
print("Photobutton clicked")
}
}
func prepareCamera(){
captureSession.sessionPreset = AVCaptureSessionPreset1920x1080
if let availableDevices = AVCaptureDeviceDiscoverySession(deviceTypes: [.builtInWideAngleCamera],
mediaType: AVMediaTypeVideo,
position: .back).devices {
captureDevice = availableDevices.first
beginSession()
}
}
func beginSession(){
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
} catch {
print(error.localizedDescription)
}
if let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession){
self.previewLayer = previewLayer
self.view.layer.addSublayer(self.previewLayer)
self.previewLayer.frame = self.view.layer.frame
self.previewLayer.bounds = self.view.bounds
self.previewLayer.contentsGravity = AVLayerVideoGravityResizeAspectFill
captureSession.startRunning()
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.videoSettings = [(kCVPixelBufferPixelFormatTypeKey as NSString): NSNumber(value: kCVPixelFormatType_32BGRA)]
dataOutput.alwaysDiscardsLateVideoFrames = true
if captureSession.canAddOutput(dataOutput) {
captureSession.addOutput(dataOutput)
captureSession.commitConfiguration()
}
}
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
prepareCamera()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
}
I copy your code and run on iOS 10.1.1, iPhone6, XCode 8.2.1 it works.
How you load View1? programmatically? initiate in storyboard? the view of View1 might have different size with your device screen.
I am making a custom camera app in which i can i capture my photos by takePhoto button, but there is an error in takePhoto saying AVCapturePhotoOutput has no member 'captureStillImageAsynchronously'.
ViewController
import UIKit
import AVFoundation
import CoreImage
#available(iOS 10.0, *)
class ViewController: UIViewController {
var captureSession = AVCaptureSession()
var sessionOutput = AVCapturePhotoOutput()
var previewLayer = AVCaptureVideoPreviewLayer()
#IBOutlet weak var imageViewReal: UIImageView!
#IBOutlet weak var cameraView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
let deviceSession = AVCaptureDeviceDiscoverySession.init(deviceTypes: [.builtInDuoCamera, .builtInTelephotoCamera, .builtInWideAngleCamera], mediaType: AVMediaTypeVideo, position: .unspecified)
for device in (deviceSession?.devices)! {
if (device as AnyObject).position == AVCaptureDevicePosition.front {
do {
let input = try AVCaptureDeviceInput(device: device )
if captureSession.canAddInput(input) {
captureSession.addInput(input)
if captureSession.canAddOutput(sessionOutput){
captureSession.addOutput(sessionOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill
previewLayer.connection.videoOrientation = .portrait
cameraView.layer.addSublayer(previewLayer)
cameraView.addSubview(takePhoto)
previewLayer.position = CGPoint (x: self.imageViewReal.frame.width / 2, y: self.imageViewReal.frame.height / 2 )
previewLayer.bounds = imageViewReal.frame
captureSession.startRunning()
}
}
}
catch {
print("Error")
}
}
}
}
#IBAction func takePhoto(_ sender: Any) {
if let videoConnection = sessionOutput.connection(withMediaType: AVMediaTypeVideo) {
sessionOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: {
buffer, Error in
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(buffer)
UIImageWriteToSavedPhotosAlbum(UIImage(data : imageData!)!, nil, nil, nil)
})
}
}
}
If your app is compatible with iOS9, use AVCaptureStillImageOutput's method:
func captureStillImageAsynchronously(from connection: AVCaptureConnection!,
completionHandler handler: ((CMSampleBuffer?, Error?) -> Void)!)
If your app begins from iOS10, you should use AVCapturePhotoOutput's method:
open func capturePhoto(with settings: AVCapturePhotoSettings, delegate: AVCapturePhotoCaptureDelegate)
So,var sessionOutput = AVCapturePhotoOutput() should be var sessionOutput = AVCaptureStillImageOutput().
Have a nice try.
I am trying to capture an Image and set it to the UIImageView, hence to create the camera I have the following code:
class HomeController: BaseController, UIImagePickerControllerDelegate {
var detector: AFDXDetector?
var captureSession : AVCaptureSession?
var stillImageOutput : AVCapturePhotoOutput?
var previewLayer : AVCaptureVideoPreviewLayer?
var camera : AVCaptureDevice!
#IBOutlet weak var cameraBtn: UIButton!
#IBOutlet weak var cameraView: UIView!
#IBOutlet weak var cameraImageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
startCamera()
}
func startCamera() {
do {
captureSession = AVCaptureSession()
camera = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera, mediaType: AVMediaTypeVideo, position: .front)
captureSession?.sessionPreset = AVCaptureSessionPreset1280x720
let input = try AVCaptureDeviceInput(device: camera)
if (captureSession?.canAddInput(input))!{
captureSession?.addInput(input)
stillImageOutput = AVCapturePhotoOutput()
if (captureSession?.canAddOutput(stillImageOutput))!{
print("output added")
captureSession?.canAddOutput(stillImageOutput)
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.videoGravity = AVLayerVideoGravityResizeAspect
previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.portrait
cameraView.layer.addSublayer(previewLayer!)
captureSession?.startRunning()
}
}
} catch {
}
}
#IBAction func cameraBtnPressed(_ sender: Any) {
if (stillImageOutput?.connection(withMediaType: AVMediaTypeVideo)) != nil
{
print("video connection detected")
}
}
}
For some reason, the print statement "video connection detected" doesn't get called although the camera is working
Does anyone else know why?
Within the if statment of captureSession?.canAddOutput(stillImageOutput) change captureSession?.canAddOutput(stillImageOutput) to .addOutput