Combining Images in CameraView with Overlay. (Swift 3)? - ios

I just about have this solved. Thanks to some brilliant help getting me on the right track.
This is the code I have now.
Basically, I can now make an image out of the drawn overlay, and the cameraPreview. But can't yet combine them. There seems very little useful code that I can find that does this simply.
So the important part is the extension block right at the top, and the additions to the
func saveToCamera() near the bottom of the code.
In short, I now have the two images I need, I think. The snap of the myImage is appearing on a white background - so not sure if that's natural - or not. That's how it appears on a Simulator. So it may just be natural.
Image 1. A screen capture.
Image 2. The saved image of myImage as per the explaination.
import UIKit
import AVFoundation
import Foundation
// extension must be outside class
extension UIImage {
convenience init(view: UIView) {
UIGraphicsBeginImageContext(view.frame.size)
view.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
self.init(cgImage: (image?.cgImage)!)
}
}
class ViewController: UIViewController {
#IBOutlet weak var navigationBar: UINavigationBar!
#IBOutlet weak var imgOverlay: UIImageView!
#IBOutlet weak var btnCapture: UIButton!
#IBOutlet weak var shapeLayer: UIView!
let captureSession = AVCaptureSession()
let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
//var shapeLayer : CALayer?
// If we find a device we'll store it here for later use
var captureDevice : AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
//=======================
let midX = self.view.bounds.midX
let midY = self.view.bounds.midY
for index in 1...10 {
let circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat((index * 10)), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
let shapeLayerPath = CAShapeLayer()
shapeLayerPath.path = circlePath.cgPath
//change the fill color
shapeLayerPath.fillColor = UIColor.clear.cgColor
//you can change the stroke color
shapeLayerPath.strokeColor = UIColor.blue.cgColor
//you can change the line width
shapeLayerPath.lineWidth = 0.5
// add the blue-circle layer to the shapeLayer ImageView
shapeLayer.layer.addSublayer(shapeLayerPath)
}
print("Shape layer drawn")
//=====================
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] {
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.back) {
captureDevice = device
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
}
}
#IBAction func actionCameraCapture(_ sender: AnyObject) {
print("Camera button pressed")
saveToCamera()
}
func beginSession() {
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
catch {
print("error: \(error.localizedDescription)")
}
guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else {
print("no preview layer")
return
}
// this is what displays the camera view. But - it's on TOP of the drawn view, and under the overview. ??
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
print("Capture session running")
self.view.addSubview(navigationBar)
//self.view.addSubview(imgOverlay)
self.view.addSubview(btnCapture)
// shapeLayer ImageView is already a subview created in IB
// but this will bring it to the front
self.view.addSubview(shapeLayer)
}
func saveToCamera() {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
if let cameraImage = UIImage(data: imageData) {
// cameraImage is the camera preview image.
// I need to combine/merge it with the myImage that is actually the blue circles.
// This converts the UIView of the bllue circles to an image. Uses 'extension' at top of code.
let myImage = UIImage(view: self.shapeLayer)
print("converting myImage to an image")
UIImageWriteToSavedPhotosAlbum(cameraImage, nil, nil, nil)
}
}
})
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}

Give this a try... instead of combining your overlay view, it draws the circles and combines the output:
import UIKit
import AVFoundation
import Foundation
class CameraWithTargetViewController: UIViewController {
#IBOutlet weak var navigationBar: UINavigationBar!
#IBOutlet weak var imgOverlay: UIImageView!
#IBOutlet weak var btnCapture: UIButton!
#IBOutlet weak var shapeLayer: UIView!
let captureSession = AVCaptureSession()
let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
//var shapeLayer : CALayer?
// If we find a device we'll store it here for later use
var captureDevice : AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
//=======================
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] {
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.back) {
captureDevice = device
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
}
}
#IBAction func actionCameraCapture(_ sender: AnyObject) {
print("Camera button pressed")
saveToCamera()
}
func beginSession() {
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
catch {
print("error: \(error.localizedDescription)")
}
guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else {
print("no preview layer")
return
}
// this is what displays the camera view. But - it's on TOP of the drawn view, and under the overview. ??
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.layer.frame
imgOverlay.frame = self.view.frame
imgOverlay.image = self.drawCirclesOnImage(fromImage: nil, targetSize: imgOverlay.bounds.size)
self.view.bringSubview(toFront: navigationBar)
self.view.bringSubview(toFront: imgOverlay)
self.view.bringSubview(toFront: btnCapture)
// don't use shapeLayer anymore...
// self.view.bringSubview(toFront: shapeLayer)
captureSession.startRunning()
print("Capture session running")
}
func getImageWithColor(color: UIColor, size: CGSize) -> UIImage {
let rect = CGRect(origin: CGPoint(x: 0, y: 0), size: CGSize(width: size.width, height: size.height))
UIGraphicsBeginImageContextWithOptions(size, false, 0)
color.setFill()
UIRectFill(rect)
let image: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
func drawCirclesOnImage(fromImage: UIImage? = nil, targetSize: CGSize? = CGSize.zero) -> UIImage? {
if fromImage == nil && targetSize == CGSize.zero {
return nil
}
var tmpimg: UIImage?
if targetSize == CGSize.zero {
tmpimg = fromImage
} else {
tmpimg = getImageWithColor(color: UIColor.clear, size: targetSize!)
}
guard let img = tmpimg else {
return nil
}
let imageSize = img.size
let scale: CGFloat = 0
UIGraphicsBeginImageContextWithOptions(imageSize, false, scale)
img.draw(at: CGPoint.zero)
let w = imageSize.width
let midX = imageSize.width / 2
let midY = imageSize.height / 2
// red circles - radius in %
let circleRads = [ 0.07, 0.13, 0.17, 0.22, 0.29, 0.36, 0.40, 0.48, 0.60, 0.75 ]
// center "dot" - radius is 1.5%
var circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat(w * 0.015), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
UIColor.red.setFill()
circlePath.stroke()
circlePath.fill()
// blue circle is between first and second red circles
circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: w * CGFloat((circleRads[0] + circleRads[1]) / 2.0), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
UIColor.blue.setStroke()
circlePath.lineWidth = 2.5
circlePath.stroke()
UIColor.red.setStroke()
for pct in circleRads {
let rad = w * CGFloat(pct)
circlePath = UIBezierPath(arcCenter: CGPoint(x: midX, y: midY), radius: CGFloat(rad), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
circlePath.lineWidth = 2.5
circlePath.stroke()
}
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
func saveToCamera() {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
if let cameraImage = UIImage(data: imageData) {
if let nImage = self.drawCirclesOnImage(fromImage: cameraImage, targetSize: CGSize.zero) {
UIImageWriteToSavedPhotosAlbum(nImage, nil, nil, nil)
}
}
}
})
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}

Ok, solved it pretty much. The important code is here. The resulting image is slightly out of skew, but I'll work away and fix that, unless someone can see a good fix for it.
func saveToCamera() {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
if let cameraImage = UIImage(data: imageData) {
// cameraImage is the camera preview image.
// I need to combine/merge it with the myImage that is actually the blue circles.
// This converts the UIView of the bllue circles to an image. Uses 'extension' at top of code.
let myImage = UIImage(view: self.shapeLayer)
print("converting myImage to an image")
let newImage = self.composite(image:cameraImage, overlay:(myImage), scaleOverlay:true)
UIImageWriteToSavedPhotosAlbum(newImage!, nil, nil, nil)
}
}
})
}
}
func composite(image:UIImage, overlay:(UIImage), scaleOverlay: Bool = false)->UIImage?{
UIGraphicsBeginImageContext(image.size)
var rect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
image.draw(in: rect)
if scaleOverlay == false {
rect = CGRect(x: 0, y: 0, width: overlay.size.width, height: overlay.size.height)
}
overlay.draw(in: rect)
return UIGraphicsGetImageFromCurrentImageContext()
}
The resulting saved image.

Related

Swipeable CIFilter over video

I am currently trying to implement something similar to Instagram's story feature where you take a picture or a video and when swiping left or right you change the current filter over the content. ( here is an example of what I managed to do in my app for images https://imgur.com/a/pYKrPkA )
As you can see in the example, I got it done for images but now my problem is that I am trying to make if work for videos aswell and I am a bit lost from where to start.
final class Filter: NSObject {
var isEnabled: Bool = true
var overlayImage: CIImage?
var ciFilter: CIFilter?
init(ciFilter: CIFilter?) {
self.ciFilter = ciFilter
super.init()
}
/// Empty filter for the original photo
static func emptyFilter() -> Filter {
return Filter(ciFilter: nil)
}
func imageByProcessingImage(_ image: CIImage, at time: CFTimeInterval) -> CIImage? {
guard isEnabled else { return image }
var image = image
if let overlayImage = overlayImage {
image = overlayImage.composited(over: image)
}
guard let ciFilter = ciFilter else {
return image
}
ciFilter.setValue(image, forKey: kCIInputImageKey)
return ciFilter.value(forKey: kCIOutputImageKey) as? CIImage
}
}
class StoriesImageView: UIView {
private var metalView: MTKView?
private var ciImage: CIImage?
private var preferredCIImageTransform: CGAffineTransform?
private let device = MTLCreateSystemDefaultDevice()
private var commandQueue: MTLCommandQueue?
private var context: CIContext?
override func layoutSubviews() {
super.layoutSubviews()
metalView?.frame = bounds
}
override func setNeedsDisplay() {
super.setNeedsDisplay()
metalView?.setNeedsDisplay()
}
func setImage(with image: UIImage) {
preferredCIImageTransform = preferredCIImageTransform(from: image)
if let cgImage = image.cgImage {
ciImage = CIImage(cgImage: cgImage)
loadContextIfNeeded()
}
setNeedsDisplay()
}
/// Return the image fitted to 1080x1920.
func renderedUIImage() -> UIImage? {
return renderedUIImage(in: CGRect(origin: .zero, size: CGSize(width: 1080, height: 1920)))
}
/// Returns CIImage in fitted to main screen bounds.
func renderedCIIImage() -> CIImage? {
return renderedCIImage(in: CGRect(rect: bounds, contentScale: UIScreen.main.scale))
}
func renderedUIImage(in rect: CGRect) -> UIImage? {
if let image = renderedCIImage(in: rect), let context = context {
if let imageRef = context.createCGImage(image, from: image.extent) {
return UIImage(cgImage: imageRef)
}
}
return nil
}
func renderedCIImage(in rect: CGRect) -> CIImage? {
if var image = ciImage, let transform = preferredCIImageTransform {
image = image.transformed(by: transform)
return scaleAndResize(image, for: rect)
}
return nil
}
private func cleanupContext() {
metalView?.removeFromSuperview()
metalView?.releaseDrawables()
metalView = nil
}
private func loadContextIfNeeded() {
setContext()
}
private func setContext() {
let mView = MTKView(frame: bounds, device: device)
mView.clearColor = MTLClearColor(red: 0, green: 0, blue: 0, alpha: 0)
mView.framebufferOnly = false
mView.enableSetNeedsDisplay = true
mView.contentScaleFactor = contentScaleFactor
mView.delegate = self
metalView = mView
commandQueue = device?.makeCommandQueue()
context = CIContext(mtlDevice: device!)
insertSubview(metalView!, at: 0)
}
private func scaleAndResize(_ image: CIImage, for rect: CGRect) -> CIImage {
let imageSize = image.extent.size
let horizontalScale = rect.size.width / imageSize.width
let verticalScale = rect.size.height / imageSize.height
let scale = min(horizontalScale, verticalScale)
return image.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
}
private func preferredCIImageTransform(from image: UIImage) -> CGAffineTransform {
if image.imageOrientation == .up {
return .identity
}
var transform: CGAffineTransform = .identity
switch image.imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: image.size.height)
transform = transform.rotated(by: .pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.rotated(by: .pi / 2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: image.size.height)
transform = transform.rotated(by: .pi / -2)
case .up, .upMirrored: break
#unknown default: fatalError("Unknown image orientation")
}
switch image.imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: image.size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .up, .down, .left, .right: break
#unknown default: fatalError("Unknown image orientation")
}
return transform
}
}
extension StoriesImageView: MTKViewDelegate {
func draw(in view: MTKView) {
autoreleasepool {
let rect = CGRect(rect: view.bounds, contentScale: UIScreen.main.scale)
if let image = renderedCIImage(in: rect) {
let commandBuffer = commandQueue?.makeCommandBuffer()
guard let drawable = view.currentDrawable else {
return
}
let heightDifference = (view.drawableSize.height - image.extent.size.height) / 2
let destination = CIRenderDestination(width: Int(view.drawableSize.width),
height: Int(view.drawableSize.height - heightDifference),
pixelFormat: view.colorPixelFormat,
commandBuffer: commandBuffer,
mtlTextureProvider: { () -> MTLTexture in
return drawable.texture
})
_ = try? context?.startTask(toRender: image, to: destination)
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {}
}
final class StoriesSwipeableImageView: StoriesImageView {
private let scrollView: UIScrollView = UIScrollView()
private let preprocessingFilter: Filter? = nil
var isRefreshingAutomaticallyWhenScrolling: Bool = true
var filters: [Filter]? {
didSet {
updateScrollViewContentSize()
updateCurrentSelected(notify: true)
}
}
var selectedFilter: Filter? {
didSet {
if selectedFilter != oldValue {
setNeedsLayout()
}
}
}
override init(frame: CGRect) {
super.init(frame: frame)
setup()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
setup()
}
override func layoutSubviews() {
super.layoutSubviews()
scrollView.frame = bounds
updateScrollViewContentSize()
}
private func setup() {
scrollView.delegate = self
scrollView.isPagingEnabled = true
scrollView.showsHorizontalScrollIndicator = false
scrollView.showsVerticalScrollIndicator = false
scrollView.bounces = true
scrollView.alwaysBounceVertical = true
scrollView.alwaysBounceHorizontal = true
scrollView.backgroundColor = .clear
addSubview(scrollView)
}
private func updateScrollViewContentSize() {
let filterCount = filters?.count ?? 0
scrollView.contentSize = CGSize(width: filterCount * Int(frame.size.width) * 3,
height: Int(frame.size.height))
if let selectedFilter = selectedFilter {
scroll(to: selectedFilter, animated: false)
}
}
private func scroll(to filter: Filter, animated: Bool) {
if let index = filters?.firstIndex(where: { $0 === filter }) {
let contentOffset = CGPoint(x: scrollView.contentSize.width / 3 + scrollView.frame.size.width * CGFloat(index), y: 0)
scrollView.setContentOffset(contentOffset, animated: animated)
updateCurrentSelected(notify: false)
} else {
fatalError("Filter is not available in filters collection")
}
}
private func updateCurrentSelected(notify: Bool) {
guard frame.size.width != 0 else { return }
let filterCount = filters?.count ?? 0
let selectedIndex = Int(scrollView.contentOffset.x + scrollView.frame.size.width / 2) / Int(scrollView.frame.size.width) % filterCount
var newFilterGroup: Filter?
if selectedIndex >= 0 && selectedIndex < filterCount {
newFilterGroup = filters?[selectedIndex]
} else {
fatalError("Invalid contentOffset")
}
if selectedFilter != newFilterGroup {
selectedFilter = newFilterGroup
if notify {
// Notify delegate?
}
}
}
override func renderedCIImage(in rect: CGRect) -> CIImage? {
guard var image = super.renderedCIImage(in: rect) else {
print("Failed to render image")
return nil
}
let timeinterval: CFTimeInterval = 0
if let preprocessingFilter = self.preprocessingFilter {
image = preprocessingFilter.imageByProcessingImage(image, at: timeinterval)!
}
let extent = image.extent
let contentSize = scrollView.bounds.size
if contentSize.width == 0 {
return image
}
let filtersCount = filters?.count ?? 0
if filtersCount == 0 {
return image
}
let ratio = scrollView.contentOffset.x / contentSize.width
var index = Int(ratio)
let upIndex = Int(ceil(ratio))
let remaningRatio = ratio - CGFloat(index)
var xImage = extent.size.width * -remaningRatio
var outputImage: CIImage? = CIImage(color: CIColor(red: 0, green: 0, blue: 0))
while index <= upIndex {
let currentIndex = index % filtersCount
let filter = filters?[currentIndex]
var filteredImage = filter?.imageByProcessingImage(image, at: timeinterval)
filteredImage = filteredImage?.cropped(to:
CGRect(x: extent.origin.x + xImage,
y: extent.origin.y,
width: extent.size.width,
height: extent.size.height)
)
guard let output = outputImage else { return nil }
outputImage = filteredImage?.composited(over: output)
xImage += extent.size.width
index += 1
}
outputImage = outputImage?.cropped(to: extent)
return outputImage
}
}
extension StoriesSwipeableImageView: UIScrollViewDelegate {
func scrollViewDidScroll(_ scrollView: UIScrollView) {
let width = scrollView.frame.size.width
let contentOffsetX = scrollView.contentOffset.x
let contentSizeWidth = scrollView.contentSize.width
let normalWidth = CGFloat(filters?.count ?? 0) * width
if width > 0 && contentSizeWidth > 0 {
if contentOffsetX <= 0 {
scrollView.contentOffset = CGPoint(x: contentOffsetX + normalWidth, y: scrollView.contentOffset.y)
} else if contentOffsetX + width >= contentSizeWidth {
scrollView.contentOffset = CGPoint(x: contentOffsetX - normalWidth, y: scrollView.contentOffset.y)
}
}
if isRefreshingAutomaticallyWhenScrolling {
setNeedsDisplay()
}
}
func scrollViewDidScrollToTop(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndScrollingAnimation(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDragging(_ scrollView: UIScrollView, willDecelerate decelerate: Bool) {
if !decelerate {
updateCurrentSelected(notify: true)
}
}
}
These 3 are the classes that do the magic for the image part. Does anyone have a suggestion or a starting point for this? I tried looking over at https://github.com/rFlex/SCRecorder but I get a bit lost in Obj-C.
In iOS 9 / OS X 10.11 / tvOS, there's a convenience method for applying CIFilters to video. It works on an AVVideoComposition, so you can use it both for playback and for file-to-file import/export. See AVVideoComposition.init(asset:applyingCIFiltersWithHandler:) for the method docs.
There's an example in Apple's Core Image Programming Guide, too:
let filter = CIFilter(name: "CIGaussianBlur")!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source = request.sourceImage.clampingToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
// Vary filter parameters based on video timing
let seconds = CMTimeGetSeconds(request.compositionTime)
filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output = filter.outputImage!.cropping(to: request.sourceImage.extent)
// Provide the filter output to the composition
request.finish(with: output, context: nil)
})
That part sets up the composition. After you've done that, you can either play it by assigning it to an AVPlayer or write it to a file with AVAssetExportSession. Since you're after the latter, here's an example of that:
let export = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset1920x1200)
export.outputFileType = AVFileTypeQuickTimeMovie
export.outputURL = outURL
export.videoComposition = composition
export.exportAsynchronouslyWithCompletionHandler(/*...*/)
There's a bit more about this in the WWDC15 session on Core Image, starting around 20 minutes in.

Crop CGRect from UIImage taken from camera

I have a view controller which takes a photo with a circular view in the center.
After taking a photo, I need to crop the CGRect with which I created the circular view. I need to crop the rectangle, not the circle.
I tried https://stackoverflow.com/a/57258806/12411655 and many other solutions, but it doesn't crop CGRect that I need.
How do I convert the CGRect in the view's coordinates to UIImage's coordinates?
class CircularCameraViewController: UIViewController {
var captureSession: AVCaptureSession!
var capturePhotoOutput: AVCapturePhotoOutput!
var cropRect: CGRect!
public lazy var shutterButton: ShutterButton = {
let button = ShutterButton()
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(capturePhoto), for: .touchUpInside)
return button
}()
private lazy var cancelButton: UIButton = {
let button = UIButton()
button.setTitle("Cancel", for: .normal)
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(dismissCamera), for: .touchUpInside)
return button
}()
private lazy var flashButton: UIButton = {
let image = UIImage(named: "flash", in: Bundle(for: ScannerViewController.self), compatibleWith: nil)?.withRenderingMode(.alwaysTemplate)
let button = UIButton()
button.setImage(image, for: .normal)
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(toggleFlash), for: .touchUpInside)
button.tintColor = .white
return button
}()
override func viewDidLoad() {
super.viewDidLoad()
setupCamera()
setupPhotoOutput()
setupViews()
setupConstraints()
captureSession.startRunning()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
}
override func viewWillDisappear(_ animated: Bool) {
captureSession.stopRunning()
}
private func setupCamera() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
var input: AVCaptureDeviceInput
do {
input = try AVCaptureDeviceInput(device: captureDevice!)
} catch {
fatalError("Error configuring capture device: \(error)");
}
captureSession = AVCaptureSession()
captureSession.addInput(input)
// Setup the preview view.
let videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer)
let camPreviewBounds = view.bounds
cropRect = CGRect(
x: camPreviewBounds.minX + (camPreviewBounds.width - 150) * 0.5,
y: camPreviewBounds.minY + (camPreviewBounds.height - 150) * 0.5,
width: 150,
height: 150
)
let path = UIBezierPath(roundedRect: camPreviewBounds, cornerRadius: 0)
path.append(UIBezierPath(ovalIn: cropRect))
let layer = CAShapeLayer()
layer.path = path.cgPath
layer.fillRule = CAShapeLayerFillRule.evenOdd;
layer.fillColor = UIColor.black.cgColor
layer.opacity = 0.5;
view.layer.addSublayer(layer)
}
private func setupViews() {
view.addSubview(shutterButton)
view.addSubview(flashButton)
view.addSubview(cancelButton)
}
private func setupConstraints() {
var cancelButtonConstraints = [NSLayoutConstraint]()
var shutterButtonConstraints = [NSLayoutConstraint]()
var flashConstraints = [NSLayoutConstraint]()
shutterButtonConstraints = [
shutterButton.centerXAnchor.constraint(equalTo: view.centerXAnchor),
shutterButton.widthAnchor.constraint(equalToConstant: 65.0),
shutterButton.heightAnchor.constraint(equalToConstant: 65.0)
]
flashConstraints = [
flashButton.leftAnchor.constraint(equalTo: view.leftAnchor, constant: 24.0),
flashButton.topAnchor.constraint(equalTo: view.topAnchor, constant: 30)
]
if #available(iOS 11.0, *) {
cancelButtonConstraints = [
cancelButton.leftAnchor.constraint(equalTo: view.safeAreaLayoutGuide.leftAnchor, constant: 24.0),
view.safeAreaLayoutGuide.bottomAnchor.constraint(equalTo: cancelButton.bottomAnchor, constant: (65.0 / 2) - 10.0)
]
let shutterButtonBottomConstraint = view.safeAreaLayoutGuide.bottomAnchor.constraint(equalTo: shutterButton.bottomAnchor, constant: 8.0)
shutterButtonConstraints.append(shutterButtonBottomConstraint)
} else {
cancelButtonConstraints = [
cancelButton.leftAnchor.constraint(equalTo: view.leftAnchor, constant: 24.0),
view.bottomAnchor.constraint(equalTo: cancelButton.bottomAnchor, constant: (65.0 / 2) - 10.0)
]
let shutterButtonBottomConstraint = view.bottomAnchor.constraint(equalTo: shutterButton.bottomAnchor, constant: 8.0)
shutterButtonConstraints.append(shutterButtonBottomConstraint)
}
NSLayoutConstraint.activate(cancelButtonConstraints + shutterButtonConstraints + flashConstraints)
}
private func setupPhotoOutput() {
capturePhotoOutput = AVCapturePhotoOutput()
capturePhotoOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(capturePhotoOutput!)
}
#objc func dismissCamera() {
self.dismiss(animated: true, completion: nil)
}
#objc private func toggleFlash() {
if let avDevice = AVCaptureDevice.default(for: AVMediaType.video) {
if (avDevice.hasTorch) {
do {
try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
if avDevice.isTorchActive {
avDevice.torchMode = AVCaptureDevice.TorchMode.off
} else {
avDevice.torchMode = AVCaptureDevice.TorchMode.on
}
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
}
extension CircularCameraViewController : AVCapturePhotoCaptureDelegate {
#objc private func capturePhoto() {
let photoSettings = AVCapturePhotoSettings()
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.flashMode = .auto
// Set ourselves as the delegate for `capturePhoto`.
capturePhotoOutput?.capturePhoto(with: photoSettings, delegate: self)
}
#available(iOS 11.0, *)
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?) {
guard error == nil else {
fatalError("Failed to capture photo: \(String(describing: error))")
}
guard let imageData = photo.fileDataRepresentation() else {
fatalError("Failed to convert pixel buffer")
}
guard let image = UIImage(data: imageData) else {
fatalError("Failed to convert image data to UIImage")
}
guard let croppedImg = image.cropToRect(rect: cropRect) else {
fatalError("Failed to crop image")
}
UIImageWriteToSavedPhotosAlbum(croppedImg, nil, nil, nil);
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
guard error == nil, let photoSample = photoSampleBuffer else {
fatalError("Failed to capture photo: \(String(describing: error))")
}
guard let imgData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSample, previewPhotoSampleBuffer: previewPhotoSampleBuffer) else {
fatalError("Failed to get image data: \(String(describing: error))")
}
guard let image = UIImage(data: imgData) else {
fatalError("Failed to convert image data to UIImage: \(String(describing: error))")
}
}
}
UIImage extension:
func cropToRect(rect: CGRect!) -> UIImage? {
let scaledRect = CGRect(x: rect.origin.x * self.scale, y: rect.origin.y * self.scale, width: rect.size.width * self.scale, height: rect.size.height * self.scale);
guard let imageRef: CGImage = self.cgImage?.cropping(to:scaledRect)
else {
return nil
}
let croppedImage: UIImage = UIImage(cgImage: imageRef, scale: self.scale, orientation: self.imageOrientation)
return croppedImage
}
When cropping an image, you need to scale the "crop rect" from its size relative to the image size.
Also, when capturing from the camera, you need to take .imageOrientation into account.
Try changing your UIImage extension to this:
extension UIImage {
func cropToRect(rect: CGRect, viewSize: CGSize) -> UIImage? {
var cr = rect
switch self.imageOrientation {
case .right, .rightMirrored, .left, .leftMirrored:
// rotate the crop rect if needed
cr.origin.x = rect.origin.y
cr.origin.y = rect.origin.x
cr.size.width = rect.size.height
cr.size.height = rect.size.width
default:
break
}
let imageViewScale = max(self.size.width / viewSize.width,
self.size.height / viewSize.height)
// scale the crop rect
let cropZone = CGRect(x:cr.origin.x * imageViewScale,
y:cr.origin.y * imageViewScale,
width:cr.size.width * imageViewScale,
height:cr.size.height * imageViewScale)
// Perform cropping in Core Graphics
guard let cutImageRef: CGImage = self.cgImage?.cropping(to:cropZone)
else {
return nil
}
// Return image to UIImage
let croppedImage: UIImage = UIImage(cgImage: cutImageRef, scale: self.scale, orientation: self.imageOrientation)
return croppedImage
}
}
and change your call in photoOutput() to:
guard let croppedImg = image.cropToRect(rect: cropRect, viewSize: view.frame.size) else {
fatalError("Failed to crop image")
}
Since your code is using the full view, that should work fine. If you change it to use a different sized view as your videoPreviewLayer then use that size instead of view.frame.size.

how to make save button to swift 5?

I'm creating a wallpaper app for iOS. I've created a UIImageView, but am stuck on saving the image. I have solved the permissions but am unable to have the user save the image. I created the save button itself, but I don't know to make save any image from the image array in the user's image gallery.
Here is my code so far:
class ViewController: UIViewController {
#IBOutlet var imageview: [UIScrollView]!
#IBOutlet weak var saveButton: UIButton!
#IBAction func saveButtonPressed(_ sender: UIButton) {
// TODO: - How to save the image here
}
let scrollView: UIScrollView = {
let scroll = UIScrollView()
scroll.isPagingEnabled = true
scroll.showsVerticalScrollIndicator = false
scroll.showsHorizontalScrollIndicator = false
scroll.frame = CGRect(x: 0, y: 0, width: UIScreen.main.bounds.width, height: UIScreen.main.bounds.height)
return scroll
}()
var imageArray = [UIImage]()
func setupImages(_ images: [UIImage]){
for i in 0..<images.count {
let imageView = UIImageView()
imageView.image = images[i]
let xPosition = UIScreen.main.bounds.width * CGFloat(i)
imageView.frame = CGRect(x: xPosition, y: 0, width: scrollView.frame.width, height: scrollView.frame.height)
imageView.contentMode = .scaleAspectFit
scrollView.contentSize.width = scrollView.frame.width * CGFloat(i + 1)
scrollView.addSubview(imageView)
//scrollView.delegate = (self as! UIScrollViewDelegate)
}
}
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view.
view.addSubview(scrollView)
imageArray = [#imageLiteral(resourceName: "1"),#imageLiteral(resourceName: "10"),#imageLiteral(resourceName: "9"),#imageLiteral(resourceName: "8"),#imageLiteral(resourceName: "3")]
setupImages(imageArray)
}
}
You will need to add a saveImage function:
func saveImage(image: UIImage) -> Bool {
guard let data = UIImageJPEGRepresentation(image, 1) ?? UIImagePNGRepresentation(image) else {
return false
}
guard let directory = try? FileManager.default.url(for: .documentDirectory, in: .userDomainMask, appropriateFor: nil, create: false) as NSURL else {
return false
}
do {
try data.write(to: directory.appendingPathComponent("fileName.png")!)
return true
} catch {
print(error.localizedDescription)
return false
}
}
And then in saveButtonPressed:
let success = saveImage(image: imageArray[0])
print("Did \(success ? "" : "not ")store image successfully")
You'll need to add some logic to actually select the image.

Drawing on previewLayer: AVCaptureVideoPreviewLayer

I have a small app, a SimpleCamera that shows a live (video) preview, with a button on the screen to take a photo. The photo is then displayed and you can save it or discard it. It all works, and I have used this code to draw a grey border around the screen preview. That too works fine. But that's all I can draw on that preview screen? I can't work out how to add the next bit of code shown below this first code block?
// Provide a camera preview
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(cameraPreviewLayer!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.frame = view.layer.frame
//Add preview layer for drawing
let previewLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = self.view.layer.frame
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.view.layer.addSublayer(previewLayer)
//Add Rectangle
let cgRect = CGRect(x: 0, y: 0, width: self.view.bounds.width, height: self.view.bounds.height)
let myView = UIImageView()
myView.frame = cgRect
myView.backgroundColor = UIColor.clear
myView.isOpaque = false
myView.layer.cornerRadius = 10
myView.layer.borderColor = UIColor.lightGray.cgColor
myView.layer.borderWidth = 3
myView.layer.masksToBounds = true
previewLayer.addSublayer(myView.layer)
// Bring the camera button to front
view.bringSubview(toFront: cameraButton)
captureSession.startRunning()
No matter where I put this code, it simply doesn't show up.
//Add circles
let midX = screenWidth / 2
let midY = screenHeight / 2
let w = screenWidth
var circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat(w * 0.010), startAngle: CGFloat(0), endAngle:CGFloat(Double.pi * 2), clockwise: true)
let circleRads = [ 0.07, 0.13, 0.17, 0.22, 0.29, 0.36, 0.40, 0.48, 0.60, 0.75 ]
for pct in circleRads {
let rad = w * CGFloat(pct)
circlePath = UIBezierPath(arcCenter: CGPoint(x: midX, y: midY), radius: CGFloat(rad), startAngle: CGFloat(0), endAngle:CGFloat(Double.pi * 2), clockwise: true)
circlePath.lineWidth = 2.5
circlePath.stroke()
}
// draw text time stamp on image
let now = Date()
let formatter = DateFormatter()
formatter.timeZone = TimeZone.current
formatter.dateFormat = "yyyy-MM-dd HH:mm"
let dateString = formatter.string(from: now)
let paragraphStyle = NSMutableParagraphStyle()
paragraphStyle.alignment = .center
let attrs = [NSAttributedStringKey.font: UIFont(name: "HelveticaNeue-Thin", size: 26)!, NSAttributedStringKey.paragraphStyle: paragraphStyle]
let string = dateString
string.draw(with: CGRect(x: 12, y: 38, width: 448, height: 448), options: .usesLineFragmentOrigin, attributes: attrs, context: nil)
Part answered. I can draw a Border around the entire screen. This is the SimpleCamera app from the AppCoda Swift 4 Intermediate iOS 11 Book. This is the code for the CameraController.swift file, and the Border drawing section is Line 176 to Line 192 when opened in XCode.
But I still can't figure out how to make the commented section draw a set of circles, and put a date stamp on the image, and save it.
//
// CameraController.swift
// Camera
//
// Created by Simon Ng on 16/10/2016.
// Copyright © 2016 AppCoda. All rights reserved.
//
import UIKit
import AVFoundation
import Foundation
class CameraController: UIViewController {
#IBOutlet var cameraButton:UIButton!
//===================================
#IBOutlet weak var navigationBar: UINavigationBar!
#IBOutlet weak var imgOverlay: UIImageView!
#IBOutlet weak var btnCapture: UIButton!
#IBOutlet weak var btnInfo: UIButton!
#IBOutlet weak var btnSocial: UIButton!
#IBOutlet weak var shapeLayer: UIView!
#IBOutlet weak var btnRed: UIButton!
#IBOutlet weak var btnGreen: UIButton!
#IBOutlet weak var btnBlue: UIButton!
#IBOutlet weak var btnYellow: UIButton!
#IBOutlet weak var btnWhite: UIButton!
//===================================
var backFacingCamera: AVCaptureDevice?
var frontFacingCamera: AVCaptureDevice?
var currentDevice: AVCaptureDevice!
var stillImageOutput: AVCapturePhotoOutput!
var stillImage: UIImage?
var cameraPreviewLayer: AVCaptureVideoPreviewLayer?
let captureSession = AVCaptureSession()
var toggleCameraGestureRecognizer = UISwipeGestureRecognizer()
var zoomInGestureRecognizer = UISwipeGestureRecognizer()
var zoomOutGestureRecognizer = UISwipeGestureRecognizer()
//===============================
//let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
let screenWidth = UIScreen.main.bounds.size.width
let screenHeight = UIScreen.main.bounds.size.height
var aspectRatio: CGFloat = 1.0
var viewFinderHeight: CGFloat = 0.0
var viewFinderWidth: CGFloat = 0.0
var viewFinderMarginLeft: CGFloat = 0.0
var viewFinderMarginTop: CGFloat = 0.0
var lineColor : UIColor?
var color: Int = 0
//==============================
override func viewDidLoad() {
super.viewDidLoad()
configure()
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
// MARK: - Action methods
#IBAction func capture(sender: UIButton) {
// Set photo settings
let photoSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.flashMode = .off
stillImageOutput.isHighResolutionCaptureEnabled = true
stillImageOutput.capturePhoto(with: photoSettings, delegate: self)
}
// MARK: - Segues
#IBAction func unwindToCameraView(segue: UIStoryboardSegue) {
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
// Get the new view controller using segue.destinationViewController.
// Pass the selected object to the new view controller.
if segue.identifier == "showPhoto" {
let photoViewController = segue.destination as! PhotoViewController
photoViewController.image = stillImage
}
}
// MARK: - Helper methods
private func configure() {
// Preset the session for taking photo in full resolution
captureSession.sessionPreset = AVCaptureSession.Preset.photo
// Get the front and back-facing camera for taking photos
let deviceDiscoverySession = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .unspecified)
for device in deviceDiscoverySession.devices {
if device.position == .back {
backFacingCamera = device
} else if device.position == .front {
frontFacingCamera = device
}
}
currentDevice = backFacingCamera
guard let captureDeviceInput = try? AVCaptureDeviceInput(device: currentDevice) else {
return
}
// Configure the session with the output for capturing still images
stillImageOutput = AVCapturePhotoOutput()
// Configure the session with the input and the output devices
captureSession.addInput(captureDeviceInput)
captureSession.addOutput(stillImageOutput)
// Provide a camera preview
cameraPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(cameraPreviewLayer!)
cameraPreviewLayer?.videoGravity = AVLayerVideoGravity.resizeAspectFill
cameraPreviewLayer?.frame = view.layer.frame
//////////////
//Add circles
// red circles - radius in %
/*
let midX = screenWidth / 2
let midY = screenHeight / 2
let w = screenWidth
//let h = screenHeight
var circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat(w * 0.010), startAngle: CGFloat(0), endAngle:CGFloat(Double.pi * 2), clockwise: true)
let circleRads = [ 0.07, 0.13, 0.17, 0.22, 0.29, 0.36, 0.40, 0.48, 0.60, 0.75 ]
for pct in circleRads {
let rad = w * CGFloat(pct)
circlePath = UIBezierPath(arcCenter: CGPoint(x: midX, y: midY), radius: CGFloat(rad), startAngle: CGFloat(0), endAngle:CGFloat(Double.pi * 2), clockwise: true)
circlePath.lineWidth = 2.5
circlePath.stroke()
}
// draw text time stamp on image
let now = Date()
let formatter = DateFormatter()
formatter.timeZone = TimeZone.current
formatter.dateFormat = "yyyy-MM-dd HH:mm"
let dateString = formatter.string(from: now)
// print(dateString)
let paragraphStyle = NSMutableParagraphStyle()
paragraphStyle.alignment = .center
let attrs = [NSAttributedStringKey.font: UIFont(name: "HelveticaNeue-Thin", size: 26)!, NSAttributedStringKey.paragraphStyle: paragraphStyle]
let string = dateString
string.draw(with: CGRect(x: 22, y: 18, width: 448, height: 448), options: .usesLineFragmentOrigin, attributes: attrs, context: nil)
print("Did the date")
*/
//Add Rectangular border
let previewLayer: AVCaptureVideoPreviewLayer = AVCaptureVideoPreviewLayer(session: self.captureSession)
previewLayer.frame = self.view.layer.frame
previewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
self.view.layer.addSublayer(previewLayer)
let cgRect = CGRect(x: 0, y: 0, width: self.view.bounds.width, height: self.view.bounds.height)
let myView = UIImageView()
myView.frame = cgRect
myView.backgroundColor = UIColor.clear
myView.isOpaque = false
myView.layer.cornerRadius = 10
myView.layer.borderColor = UIColor.lightGray.cgColor
myView.layer.borderWidth = 3
myView.layer.masksToBounds = true
previewLayer.addSublayer(myView.layer)
///////////////
// Bring the camera button to front
view.bringSubview(toFront: cameraButton)
captureSession.startRunning()
print("so far 2")
// Toggle Camera recognizer
toggleCameraGestureRecognizer.direction = .up
toggleCameraGestureRecognizer.addTarget(self, action: #selector(toggleCamera))
view.addGestureRecognizer(toggleCameraGestureRecognizer)
// Zoom In recognizer
zoomInGestureRecognizer.direction = .right
zoomInGestureRecognizer.addTarget(self, action: #selector(zoomIn))
view.addGestureRecognizer(zoomInGestureRecognizer)
// Zoom Out recognizer
zoomOutGestureRecognizer.direction = .left
zoomOutGestureRecognizer.addTarget(self, action: #selector(zoomOut))
view.addGestureRecognizer(zoomOutGestureRecognizer)
}
#objc func toggleCamera() {
captureSession.beginConfiguration()
// Change the device based on the current camera
guard let newDevice = (currentDevice?.position == AVCaptureDevice.Position.back) ? frontFacingCamera : backFacingCamera else {
return
}
// Remove all inputs from the session
for input in captureSession.inputs {
captureSession.removeInput(input as! AVCaptureDeviceInput)
}
// Change to the new input
let cameraInput:AVCaptureDeviceInput
do {
cameraInput = try AVCaptureDeviceInput(device: newDevice)
} catch {
print(error)
return
}
if captureSession.canAddInput(cameraInput) {
captureSession.addInput(cameraInput)
}
currentDevice = newDevice
captureSession.commitConfiguration()
}
#objc func zoomIn() {
if let zoomFactor = currentDevice?.videoZoomFactor {
if zoomFactor < 5.0 {
let newZoomFactor = min(zoomFactor + 1.0, 5.0)
do {
try currentDevice?.lockForConfiguration()
currentDevice?.ramp(toVideoZoomFactor: newZoomFactor, withRate: 1.0)
currentDevice?.unlockForConfiguration()
} catch {
print(error)
}
}
}
}
#objc func zoomOut() {
if let zoomFactor = currentDevice?.videoZoomFactor {
if zoomFactor > 1.0 {
let newZoomFactor = max(zoomFactor - 1.0, 1.0)
do {
try currentDevice?.lockForConfiguration()
currentDevice?.ramp(toVideoZoomFactor: newZoomFactor, withRate: 1.0)
currentDevice?.unlockForConfiguration()
} catch {
print(error)
}
}
}
}
}
extension CameraController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard error == nil else {
return
}
// Get the image from the photo buffer
guard let imageData = photo.fileDataRepresentation() else {
return
}
stillImage = UIImage(data: imageData)
performSegue(withIdentifier: "showPhoto", sender: self)
}
}
You need a CAShapeLayer to add the bezierpath to.
let circleLayer = CAShapeLayer()
circleLayer.path = circlePath.cgPath
self.view.layer.addSublayer(circleLayer)

Set up camera on the background of UIView

I'm trying set camera on the background of UIView in UIViewController, in order to be able to draw on it.
How to do that?
UPDATED TO SWIFT 5
You could try something like this:
I add two UIViews to my UIViewController's main view, one called previewView (for the camera) and another UIView called boxView (which is above the camera view)
class ViewController: UIViewController {
var previewView : UIView!
var boxView:UIView!
//Camera Capture requiered properties
var videoDataOutput: AVCaptureVideoDataOutput!
var videoDataOutputQueue: DispatchQueue!
var previewLayer:AVCaptureVideoPreviewLayer!
var captureDevice : AVCaptureDevice!
let session = AVCaptureSession()
var currentFrame: CIImage!
var done = false
override func viewDidLoad() {
super.viewDidLoad()
previewView = UIView(frame: CGRect(x: 0, y: 0, width: UIScreen.main.bounds.size.width, height: UIScreen.main.bounds.size.height))
previewView.contentMode = .scaleAspectFit
view.addSubview(previewView)
//Add a box view
boxView = UIView(frame: CGRect(x: 0, y: 0, width: 100, height: 200))
boxView.backgroundColor = UIColor.green
boxView.alpha = 0.3
view.addSubview(boxView)
self.setupAVCapture()
}
override func viewWillAppear(_ animated: Bool) {
if !done {
session.startRunning()
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
}
override var shouldAutorotate: Bool {
if (UIDevice.current.orientation == UIDeviceOrientation.landscapeLeft ||
UIDevice.current.orientation == UIDeviceOrientation.landscapeRight ||
UIDevice.current.orientation == UIDeviceOrientation.unknown) {
return false
}
else {
return true
}
}
}
// AVCaptureVideoDataOutputSampleBufferDelegate protocol and related methods
extension ViewController: AVCaptureVideoDataOutputSampleBufferDelegate{
func setupAVCapture(){
session.sessionPreset = AVCaptureSession.Preset.vga640x480
guard let device = AVCaptureDevice
.default(AVCaptureDevice.DeviceType.builtInWideAngleCamera,
for: .video,
position: AVCaptureDevice.Position.front) else{
return
}
captureDevice = device
beginSession()
done = true
}
func beginSession(){
var deviceInput: AVCaptureDeviceInput!
do {
deviceInput = try AVCaptureDeviceInput(device: captureDevice)
guard deviceInput != nil else {
print("error: cant get deviceInput")
return
}
if self.session.canAddInput(deviceInput){
self.session.addInput(deviceInput)
}
videoDataOutput = AVCaptureVideoDataOutput()
videoDataOutput.alwaysDiscardsLateVideoFrames=true
videoDataOutputQueue = DispatchQueue(label: "VideoDataOutputQueue")
videoDataOutput.setSampleBufferDelegate(self, queue:self.videoDataOutputQueue)
if session.canAddOutput(self.videoDataOutput){
session.addOutput(self.videoDataOutput)
}
videoDataOutput.connection(with: AVMediaType.video)?.isEnabled = true
self.previewLayer = AVCaptureVideoPreviewLayer(session: self.session)
self.previewLayer.videoGravity = AVLayerVideoGravity.resizeAspect
let rootLayer: CALayer = self.previewView.layer
rootLayer.masksToBounds = true
self.previewLayer.frame = rootLayer.bounds
rootLayer.addSublayer(self.previewLayer)
session.startRunning()
} catch let error as NSError {
deviceInput = nil
print("error: \(error.localizedDescription)")
}
}
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
currentFrame = self.convertImageFromCMSampleBufferRef(sampleBuffer)
}
// clean up AVCapture
func stopCamera(){
session.stopRunning()
done = false
}
func convertImageFromCMSampleBufferRef(_ sampleBuffer:CMSampleBuffer) -> CIImage{
let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)!
let ciImage:CIImage = CIImage(cvImageBuffer: pixelBuffer)
return ciImage
}
}
You can replace the boxView's frame with mainView's frameand don't set its background property. This way you can use this view to add more subviews.
IMPORTANT
Remember that in iOS 10 you need to first ask the user for permission in order to have access to the camera. You do this by adding a usage
key to your app’s Info.plist together with a purpose string
because if you fail to declare the usage, your app will crash when it
first makes the access.
Here's a screenshot to show the Camera access request
I hope this can help!
An other way, SceneView is useful for augmented reality applications.
Create a preview layer with AVFramework or UIView, then add preview
layer to view's sublayer.
Create and custumize a sceneview. Then add sceneview to view's
subview.
Create and custimize scene. Finally add to scenview's scene.
// 1. Create a preview layer with AVFramework or UIView, then add preview layer to view's sublayer.
self.previewLayer!.frame = view.layer.bounds
view.clipsToBounds = true
view.layer.addSublayer(self.previewLayer!)
// 2. Create and custumize a sceneview. Then add sceneview to view's subview.
let sceneView = SCNView()
sceneView.frame = view.bounds
sceneView.backgroundColor = UIColor.clearColor()
self.previewLayer!.frame = view.bounds
view.addSubview(sceneView)
// 3 . Create and custimize scene. Finally add to scenview's scene.
let scene = SCNScene()
sceneView.autoenablesDefaultLighting = true
sceneView.allowsCameraControl = true
let boxGeometry = SCNBox(width: 800 , height: 400, length: 1.0, chamferRadius: 1.0)
let yellow = UIColor.yellowColor()
let semi = yellow.colorWithAlphaComponent(0.3)
boxGeometry.firstMaterial?.diffuse.contents = semi
let boxNode = SCNNode(geometry: boxGeometry)
scene.rootNode.addChildNode(boxNode)
sceneView.scene = scene
One easy way of doing this is to add overlay view on imagepickercontroller and hide the default view.
The other way is to use AV framework that will give you much more options and freedom.
Choice depends on your needs.

Resources