Add gesture pinch zoom to camera preview layer, iOS, Swift - ios

I am trying to add a pinch zoom to camera preview layer that is added programmatically. I have this code below as a function but that is all I have, from tips. I don't have any other code relating to it. I cannot find any more info around they all seem to focus on still images.
override func viewDidLoad() {
super.viewDidLoad()
let pinchRecognizer = UIPinchGestureRecognizer(target: self, action:#selector(pinch(_:)))
pinchRecognizer.delegate = self
self.cameraPreviewlayer.addGestureRecognizer(pinchRecognizer)
}
I get an error on this line
self.cameraPreviewlayer.addGestureRecognizer(pinchRecognizer)
it says cameraPreviewLayer does not have add geture.
Here is the function.
#objc func pinch(_ pinch: UIPinchGestureRecognizer) {
let device = videoDeviceInput.device
// Return zoom value between the minimum and maximum zoom values
func minMaxZoom(_ factor: CGFloat) -> CGFloat {
return min(min(max(factor, minimumZoom), maximumZoom), device.activeFormat.videoMaxZoomFactor)
}
func update(scale factor: CGFloat) {
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
device.videoZoomFactor = factor
} catch {
print("\(error.localizedDescription)")
}
}
let newScaleFactor = minMaxZoom(pinch.scale * lastZoomFactor)
switch pinch.state {
case .began: fallthrough
case .changed: update(scale: newScaleFactor)
case .ended:
lastZoomFactor = minMaxZoom(newScaleFactor)
update(scale: lastZoomFactor)
default: break
}
}

I was able to fix this.
All I had to do was move the
let pinchRecognizer = UIPinchGestureRecognizer(target: self, action:#selector(pinch(_:)))
pinchRecognizer.delegate = self
self.cameraPreviewlayer.addGestureRecognizer(pinchRecognizer)
from view did load to where I set up the camera preview layer.
cameraPreviewlayer = AVCaptureVideoPreviewLayer(session: captureSession)
cameraPreviewlayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewlayer?.connection?.videoOrientation = AVCaptureVideoOrientation.portrait
cameraPreviewlayer?.frame = self.view.frame
// scanArea.setRegionOfInterestWithProposedRegionOfInterest(regionOfInterest)
self.view.layer.insertSublayer(cameraPreviewlayer!, at: 0)
let pinchRecognizer = UIPinchGestureRecognizer(target: self, action:#selector(pinch(_:)))
pinchRecognizer.delegate = self
self.view.addGestureRecognizer(pinchRecognizer)

Related

Pan gesture (hold/drag) zoom on camera like Snapchat

I am trying to replicate Snapchat camera's zoom feature where once you have started recording you can drag your finger up or down and it will zoom in or out accordingly. I have been successful with zooming on pinch but have been stuck on zooming with the PanGestureRecognizer.
Here is the code I've tried the problem is that I do not know how to replace the sender.scale that I use for pinch gesture recognizer zooming. I'm using AVFoundation. Basically, I'm asking how I can do the hold zoom (one finger drag) like in TikTok or Snapchat properly.
let minimumZoom: CGFloat = 1.0
let maximumZoom: CGFloat = 15.0
var lastZoomFactor: CGFloat = 1.0
var latestDirection: Int = 0
#objc func panGesture(_ sender: UIPanGestureRecognizer) {
let velocity = sender.velocity(in: doubleTapSwitchCamButton)
var currentDirection: Int = 0
if velocity.y > 0 || velocity.y < 0 {
let originalCapSession = captureSession
var devitce : AVCaptureDevice!
let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera, .builtInDuoCamera], mediaType: AVMediaType.video, position: .unspecified)
let devices = videoDeviceDiscoverySession.devices
devitce = devices.first!
guard let device = devitce else { return }
// Return zoom value between the minimum and maximum zoom values
func minMaxZoom(_ factor: CGFloat) -> CGFloat {
return min(min(max(factor, minimumZoom), maximumZoom), device.activeFormat.videoMaxZoomFactor)
}
func update(scale factor: CGFloat) {
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
device.videoZoomFactor = factor
} catch {
print("\(error.localizedDescription)")
}
}
//These 2 lines below are the problematic ones, pinch zoom uses this one below, and the newScaleFactor below that is a testing one that did not work.
let newScaleFactor = minMaxZoom(sender.scale * lastZoomFactor)
//let newScaleFactor = CGFloat(exactly: number + lastZoomFactor)
switch sender.state {
case .began: fallthrough
case .changed: update(scale: newScaleFactor!)
case .ended:
lastZoomFactor = minMaxZoom(newScaleFactor!)
update(scale: lastZoomFactor)
default: break
}
} else {
}
latestDirection = currentDirection
}
You can use the gesture recogniser's translation property, to calculate the displacement in points, and normalise this displacement as a zoom factor.
Fitting this into your code, you could try:
... somewhere in your view setup code, i.e. viewDidLoad....
let panGestureRecognizer = UIPanGestureRecognizer(target: self, action: #selector(panGesture))
button.addGestureRecognizer(panGestureRecognizer)
private var initialZoom: CGFloat = 1.0
#objc func panGesture(_ sender: UIPanGestureRecognizer) {
// note that 'view' here is the overall video preview
let velocity = sender.velocity(in: view)
if velocity.y > 0 || velocity.y < 0 {
let originalCapSession = captureSession
var devitce : AVCaptureDevice!
let videoDeviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera, .builtInDuoCamera], mediaType: AVMediaType.video, position: .unspecified)
let devices = videoDeviceDiscoverySession.devices
devitce = devices.first!
guard let device = devitce else { return }
let minimumZoomFactor: CGFloat = 1.0
let maximumZoomFactor: CGFloat = min(device.activeFormat.videoMaxZoomFactor, 10.0) // artificially set a max useable zoom of 10x
// clamp a zoom factor between minimumZoom and maximumZoom
func clampZoomFactor(_ factor: CGFloat) -> CGFloat {
return min(max(factor, minimumZoomFactor), maximumZoomFactor)
}
func update(scale factor: CGFloat) {
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
device.videoZoomFactor = factor
} catch {
print("\(error.localizedDescription)")
}
}
switch sender.state {
case .began:
initialZoom = device.videoZoomFactor
startRecording() /// call to start recording your video
case .changed:
// distance in points for the full zoom range (e.g. min to max), could be view.frame.height
let fullRangeDistancePoints: CGFloat = 300.0
// extract current distance travelled, from gesture start
let currentYTranslation: CGFloat = sender.translation(in: view).y
// calculate a normalized zoom factor between [-1,1], where up is positive (ie zooming in)
let normalizedZoomFactor = -1 * max(-1,min(1,currentYTranslation / fullRangeDistancePoints))
// calculate effective zoom scale to use
let newZoomFactor = clampZoomFactor(initialZoom + normalizedZoomFactor * (maximumZoomFactor - minimumZoomFactor))
// update device's zoom factor'
update(scale: newZoomFactor)
case .ended, .cancelled:
stopRecording() /// call to start recording your video
break
default:
break
}
}
}

How to rotate a AVCaptureVideoPreviewLayer by 90 degrees to correct it in iOS11?

I have an AVCaptureVideoPreviewLayer which is set to capture the live preview of a session using the front camera feed. However, the output is rotated by 90 degrees, and I am trying to get the layer to be aligned to the rest of my app, which is in Landscape orientation. Is there a way to rotate it by 90 degrees in order to correct it?
Here is my code so far:
override func viewDidLoad() {
super.viewDidLoad()
let captureSession = AVCaptureSession()
guard let captureDevice = getDevice(position: .front) else { return }
guard let input = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(input)
captureSession.startRunning()
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame
previewLayer.videoGravity = .resizeAspectFill
previewLayer.connection?.automaticallyAdjustsVideoMirroring = false
previewLayer.connection?.isVideoMirrored = false
}
func getDevice(position: AVCaptureDevice.Position) -> AVCaptureDevice? {
let devices = AVCaptureDevice.devices() as NSArray;
for de in devices {
let deviceConverted = de as! AVCaptureDevice
if(deviceConverted.position == position){
return deviceConverted
}
}
return nil
}
Can you add previewLayer to another UIView, and try rotating like so:
private var captureView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
self.captureView = UIView(frame: self.view.bounds)
self.captureView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
self.view.addSubview(captureView)
// ...
captureView.layer.addSublayer(previewLayer)
// ...
self.rotateView(animated: false)
}
// Rotating captureView based on device orientation
func rotateView(animated: Bool) {
var transform: CGAffineTransform?
switch UIDevice.current.orientation {
case .portraitUpsideDown:
transform = CGAffineTransform(rotationAngle: CGFloat.pi)
case .landscapeLeft:
transform = CGAffineTransform(rotationAngle: CGFloat.pi / 2)
case .landscapeRight:
transform = CGAffineTransform(rotationAngle: -CGFloat.pi / 2)
default:
break
}
guard animated else {
self.captureView.transform(transform)
return
}
UIView.animate(withDuration: 0.5, animations: {[weak self] in
self?.captureView.transform(transform)
})
}

iOS 11 ARKit : Drag Object in 3D View

I have a node object in 3d view and i need to drag that object,
So far i have tried from here : Placing, Dragging and Removing SCNNodes in ARKit
and converted in swift
#objc func handleDragGesture(_ gestureRecognizer: UIGestureRecognizer) {
let tapPoint = gestureRecognizer.location(in: self.sceneView)
switch gestureRecognizer.state {
case .began:
print("Object began to move")
let hitResults = self.sceneView.hitTest(tapPoint, options: nil)
if hitResults.isEmpty { return }
let hitResult = hitResults.first
if let node = hitResult?.node.parent?.parent?.parent {
self.photoNode = node
}
case .changed:
print("Moving object position changed")
if let _ = self.photoNode {
let hitResults = self.sceneView.hitTest(tapPoint, types: .featurePoint)
let hitResult = hitResults.last
if let transform = hitResult?.worldTransform {
let matrix = SCNMatrix4FromMat4(transform)
let vector = SCNVector3Make(matrix.m41, matrix.m42, matrix.m43)
self.photoNode?.position = vector
}
}
case .ended:
print("Done moving object")
default:
break
}
}
but it is not working properly. what is the correct way to do?
You can do this using panGestureRecongniser... see basic swift Playground code for handling a SCNNode.
import UIKit
import ARKit
import SceneKit
import PlaygroundSupport
public var textNode : SCNNode?
// Main ARKIT ViewController
class ViewController : UIViewController, ARSCNViewDelegate, ARSessionDelegate {
var textNode: SCNNode!
var counter = 0
#IBOutlet var sceneView: ARSCNView!
override func viewDidLoad() {
super.viewDidLoad()
// set the views delegate
sceneView.delegate = self as! ARSCNViewDelegate
// show statistics such as fps and timing information
sceneView.showsStatistics = true
// Create a new scene
sceneView.scene.rootNode
// Add ligthing
sceneView.autoenablesDefaultLighting = true
let text = SCNText(string: "Drag Me with Pan Gesture!", extrusionDepth: 1)
// create material
let material = SCNMaterial()
material.diffuse.contents = UIColor.green
text.materials = [material]
//Create Node object
textNode = SCNNode()
textNode.name = "textNode"
textNode.scale = SCNVector3(x:0.004,y:0.004,z:0.004)
textNode.geometry = text
textNode.position = SCNVector3(x: 0, y:0.02, z: -1)
// add new node to root node
self.sceneView.scene.rootNode.addChildNode(textNode)
// Add pan gesture for dragging the textNode about
sceneView.addGestureRecognizer(UIPanGestureRecognizer(target: self, action: #selector(panGesture(_:))))
}
override func loadView() {
sceneView = ARSCNView(frame:CGRect(x: 0.0, y: 0.0, width: 500.0, height: 600.0))
// Set the view's delegate
sceneView.delegate = self
let config = ARWorldTrackingConfiguration()
config.planeDetection = .horizontal
// Now we'll get messages when planes were detected...
sceneView.session.delegate = self
self.view = sceneView
sceneView.session.run(config)
}
#objc func panGesture(_ gesture: UIPanGestureRecognizer) {
gesture.minimumNumberOfTouches = 1
let results = self.sceneView.hitTest(gesture.location(in: gesture.view), types: ARHitTestResult.ResultType.featurePoint)
guard let result: ARHitTestResult = results.first else {
return
}
let position = SCNVector3Make(result.worldTransform.columns.3.x, result.worldTransform.columns.3.y, result.worldTransform.columns.3.z)
textNode.position = position
}
}
PlaygroundPage.current.liveView = ViewController()
PlaygroundPage.current.needsIndefiniteExecution = true
EDIT:
The above drag function only worked if you had 1 object in the view, so it was not really necessary to hit the node to start dragging. It will just drag from where ever you tapped on the screen. If you have multiple objects in the view, and you want to drag nodes independently. You could change the panGesture function to the following, detect each node tapped first:
// drags nodes independently
#objc func panGesture(_ gesture: UIPanGestureRecognizer) {
gesture.minimumNumberOfTouches = 1
let results = self.sceneView.hitTest(gesture.location(in: gesture.view), types: ARHitTestResult.ResultType.featurePoint)
guard let result: ARHitTestResult = results.first else {
return
}
let hits = self.sceneView.hitTest(gesture.location(in: gesture.view), options: nil)
if let tappedNode = hits.first?.node {
let position = SCNVector3Make(result.worldTransform.columns.3.x, result.worldTransform.columns.3.y, result.worldTransform.columns.3.z)
tappedNode.position = position
}
}
REF: https://stackoverflow.com/a/48220751/5589073
This code works for me
private func drag(sender: UIPanGestureRecognizer) {
switch sender.state {
case .began:
let location = sender.location(in: self.sceneView)
guard let hitNodeResult = self.sceneView.hitTest(location,
options: nil).first else { return }
self.PCoordx = hitNodeResult.worldCoordinates.x
self.PCoordy = hitNodeResult.worldCoordinates.y
self.PCoordz = hitNodeResult.worldCoordinates.z
case .changed:
// when you start to pan in screen with your finger
// hittest gives new coordinates of touched location in sceneView
// coord-pcoord gives distance to move or distance paned in sceneview
let hitNode = sceneView.hitTest(sender.location(in: sceneView), options: nil)
if let coordx = hitNode.first?.worldCoordinates.x,
let coordy = hitNode.first?.worldCoordinates.y,
let coordz = hitNode.first?.worldCoordinates.z {
let action = SCNAction.moveBy(x: CGFloat(coordx - self.PCoordx),
y: CGFloat(coordy - self.PCoordy),
z: CGFloat(coordz - self.PCoordz),
duration: 0.0)
self.photoNode.runAction(action)
self.PCoordx = coordx
self.PCoordy = coordy
self.PCoordz = coordz
}
sender.setTranslation(CGPoint.zero, in: self.sceneView)
case .ended:
self.PCoordx = 0.0
self.PCoordy = 0.0
self.PCoordz = 0.0
default:
break
}
}

How to be able to zoom with front facing camera

In my custom camera, I implemented a pinch gesture recognizer. Then i added a function for the video to be able to zoom. The camera zooms when I am on the back camera, but when i switch it to the front facing camera, I am not allowed to zoom. This is my function
var zoomFactor: CGFloat = 1.0
#objc func pinch(_ pinch: UIPinchGestureRecognizer) {
print("123")
guard let device = videoCaptureDevice else { return }
func minMaxZoom(_ factor: CGFloat) -> CGFloat { return min(max(factor, 1.0), device.activeFormat.videoMaxZoomFactor) }
func update(scale factor: CGFloat) {
do {
try device.lockForConfiguration()
defer { device.unlockForConfiguration() }
device.videoZoomFactor = factor
} catch {
debugPrint(error)
}
}
let newScaleFactor = minMaxZoom(pinch.scale * zoomFactor)
switch pinch.state {
case .began: fallthrough
case .changed: update(scale: newScaleFactor)
case .ended:
zoomFactor = minMaxZoom(newScaleFactor)
update(scale: zoomFactor)
default: break
}
}

Program not working anymore if I rotate device due to gesture recognizer

If I press a button and rotate my device afterwards my program isn't working anymore. I know, that the gesture recognizer is the problem but how can I fix it?
I add the gesture recognizer to my view and if the device rotates in landscape mode the gesture recognizer isn't working anymore. What could be done to solve that?
Here's some code:
// take a photo
#IBAction func takePhoto(sender: AnyObject) {
self.fullScreenView.hidden = false
self.recordButton.enabled = false
self.takephoto.enabled = false
self.recordButton.hidden = true
self.takephoto.hidden = true
session.startRunning()
currentGesture = "Photo"
// customize the quality level or bitrate of the output photo
session.sessionPreset = AVCaptureSessionPresetPhoto
// add the AVCaptureVideoPreviewLayer to the view and set the view in fullscreen
fullScreenView.frame = view.bounds
videoPreviewLayer.frame = fullScreenView.bounds
fullScreenView.layer.addSublayer(videoPreviewLayer)
// add action to fullScreenView
gestureFullScreenView = UITapGestureRecognizer(target: self, action: #selector(ViewController.takePhoto(_:)))
self.fullScreenView.addGestureRecognizer(gestureFullScreenView)
// add action to myView
gestureView = UITapGestureRecognizer(target: self, action: #selector(ViewController.setFrontpage(_:)))
self.view.addGestureRecognizer(gestureView)
if (preview == true) {
if let videoConnection = stillImageOutput!.connectionWithMediaType(AVMediaTypeVideo) {
// code for photo capture goes here...
stillImageOutput?.captureStillImageAsynchronouslyFromConnection(videoConnection, completionHandler: { (sampleBuffer, error) -> Void in
// process the image data (sampleBuffer) here to get an image file we can put in our view
if (sampleBuffer != nil) {
let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)
let image = UIImage(data: imageData, scale: 1.0)
var orientation: UIImageOrientation!
// image with the current orientation
let newImage = UIImage(CGImage: (image?.CGImage)!, scale: 1, orientation: orientation)
// remove the gesture recognizer and stop the current session
self.fullScreenView.hidden = true
self.fullScreenView.gestureRecognizers?.forEach(self.fullScreenView.removeGestureRecognizer)
self.session.stopRunning()
// save image to the library
UIImageWriteToSavedPhotosAlbum(newImage, nil, nil, nil)
self.imageViewBackground = UIImageView(frame: self.view.bounds)
self.imageViewBackground.image = newImage
self.imageViewBackground.tag = self.key
self.view.addSubview(self.imageViewBackground)
}
})
}
}
else {
preview = true
}
}
and here I set the new frame:
override func viewWillLayoutSubviews() {
// if the device rotates, set these components into fullscreen
fullScreenView.frame = view.bounds
videoPreviewLayer?.frame = view.bounds
playerLayer?.frame = view.bounds
imageViewBackground?.frame = view.bounds
let orientation: UIDeviceOrientation = UIDevice.currentDevice().orientation
switch (orientation) {
case .Portrait:
videoPreviewLayer?.connection.videoOrientation = .Portrait
countLabel.frame = CGRectMake(width * 0.4, -(width * 0.24), height/2, (width/2) * 1.25)
case .LandscapeRight:
videoPreviewLayer?.connection.videoOrientation = .LandscapeLeft
countLabel.frame = CGRectMake((width * 0.72), -(width * 0.26), height/2, (width/2) * 1.25)
case .LandscapeLeft:
videoPreviewLayer?.connection.videoOrientation = .LandscapeRight
countLabel.frame = CGRectMake((width * 0.72), -(width * 0.26), height/2, (width/2) * 1.25)
case .PortraitUpsideDown:
videoPreviewLayer?.connection.videoOrientation = .PortraitUpsideDown
countLabel.frame = CGRectMake(width * 0.4, -(width * 0.24), height/2, (width/2) * 1.25)
default:
videoPreviewLayer?.connection.videoOrientation = .Portrait
countLabel.frame = CGRectMake(width * 0.4, -(width * 0.24), height/2, (width/2) * 1.25)
}
}

Resources