How to rotate masked image? - ios

Right now my code displays a image like this
It would work if I could just rotate the image clockwise 90 degrees. How would I do this. Code is listed below. I took out some of the unnecessary lines of code for this.
import UIKit
class ViewController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
var screenView: UIImageView!
var image1 = UIImage(named: "w")
func cropImageIntoQuarterSquare(image: UIImage) -> UIImage? {
//shape of mask
let image = photoDispaly.image
let maskingImage = UIImage(named: "mask5")
//where the masking code starts
photoDispaly.image = maskImage(image: image!, mask: maskingImage!)
photoDispaly.autoresizingMask = [.flexibleWidth, .flexibleHeight, .flexibleBottomMargin, .flexibleRightMargin, .flexibleLeftMargin, .flexibleTopMargin]
photoDispaly.contentMode = .scaleAspectFit // OR .scaleAspectFill
photoDispaly.clipsToBounds = true
let originalImageSize: CGSize = image!.size
let smallImageSize = CGSize(width: (originalImageSize.width + 40), height: (originalImageSize.height + 40))
UIGraphicsBeginImageContext(smallImageSize)
image?.draw(at: CGPoint.zero)
let imageResult = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return imageResult
}
#IBOutlet var photoDispaly: UIImageView!
func maskImage(image:UIImage, mask:(UIImage))->UIImage{
let imageReference = image.cgImage
let maskReference = mask.cgImage
let imageMask = CGImage(maskWidth: maskReference!.width,
height: maskReference!.height,
bitsPerComponent: maskReference!.bitsPerComponent,
bitsPerPixel: maskReference!.bitsPerPixel,
bytesPerRow: maskReference!.bytesPerRow,
provider: maskReference!.dataProvider!, decode: nil, shouldInterpolate: true)
let maskedReference = imageReference!.masking(imageMask!)
let maskedImage = UIImage(cgImage:maskedReference!)
return maskedImage
}
func image(_ image: UIImage, didFinishSavingWithError error: Error?, contextInfo: UnsafeRawPointer) {
if let error = error {
return
} else {
return
}
}

you could run a transform on your UIImageView in your ViewDidLoad function
photoDispaly.transform = CGAffineTransform(rotationAngle: M_PI/4)
also you may want to rename Dispaly to Display ;)

Related

TextClassification/ Extraction from image How to get single text frame and string Using Core ML from a Image

Need to mark the rec boxes around string and then to get that string after tapping
import UIKit
import Vision
class ViewController: UIViewController, ImageGet {
//MARK: OUTLETS
#IBOutlet weak var selectButton: UIButton!
//MARK: VARIABLES
var objU = UtilityClass()
var image:UIImage?
var str:String?
var uiButton : UIButton?
var arrayString = [String]()
var imageView : UIImageView = UIImageView()
//MARK: DELEGATE FUNCTION
func img(image: UIImage) {
self.image = image
imageView.image = image
setUp()
}
override func viewDidLoad() {
super.viewDidLoad()
imageView.isUserInteractionEnabled = true
// Do any additional setup after loading the view.
}
//MARK: SETUPUI
func setUp() {
let realImg = resizeImage(image: (imageView.image!) , targetSize:CGSize(width: view.frame.width, height: view.frame.height) )
self.image = realImg
self.imageView .image = self.image
imageView.isUserInteractionEnabled = true
self.imageView.frame = CGRect(x: 0, y: 0, width: realImg.size.width, height: realImg.size.height)
view.addSubview(imageView)
guard let cgimg = realImg.cgImage else {return}
let requestHandler = VNImageRequestHandler(cgImage: cgimg)
let req = VNRecognizeTextRequest(completionHandler: recognizeTextHandler)
req.recognitionLevel = .accurate
do {
try requestHandler.perform([req])
} catch {
print("Unable to perform the request: \(error)")
}
}
//MARK: SELECT THE IMAGE
#IBAction func selectButtontapped(_ sender: Any) {
objU.delegate = self
objU.obj = self
objU.ImageGet()
}
func recognizeTextHandler(request : VNRequest , error:Error?) {
guard let observation = request.results as? [VNRecognizedTextObservation], error == nil else {
return
}
_ = observation.compactMap({
$0.topCandidates(1).first?.string
}).joined(separator: "/n")
for subView in imageView.subviews {
subView.removeFromSuperview()
}
let boundingRect :[CGRect] = observation.compactMap{
observation in
guard let candidate = observation.topCandidates(1).first else {return .zero}
//find the bounding box observation
let stringRange = candidate.string.startIndex..<candidate.string.endIndex
let boxObservation = try? candidate.boundingBox(for: stringRange)
let boundingBox = boxObservation?.boundingBox ?? .zero
str = candidate.string
self.arrayString.append(str!)
let rectInImg = VNImageRectForNormalizedRect(boundingBox, Int((imageView.frame.size.width)), Int((imageView.frame.size.height)))
let convertedRect = self.getConvertedRect(boundingBox: observation.boundingBox, inImage:image!.size , containedIn: (imageView.bounds.size))
drawBoundBox(rect: convertedRect)
return rectInImg
}
print(arrayString)
print(boundingRect)
}
func drawBoundBox(rect: CGRect) {
uiButton = UIButton(type: .custom)
uiButton?.frame = rect
uiButton?.layer.borderColor = UIColor.systemPink.cgColor
uiButton?.setTitle("", for: .normal)
uiButton?.layer.borderWidth = 2
uiButton?.tag = arrayString.count
imageView.addSubview(uiButton ?? UIButton())
uiButton?.addTarget(self, action: #selector(pressed(_:)), for: .touchUpInside)
}
#objc func pressed(_ sender : UIButton) {
alert(key: arrayString[sender.tag - 1])
}
//MARK: CONVERT THE NORMALISED BOUNDING RECT
func getConvertedRect(boundingBox: CGRect, inImage imageSize: CGSize, containedIn containerSize: CGSize) -> CGRect {
let rectOfImage: CGRect
let imageAspect = imageSize.width / imageSize.height
let containerAspect = containerSize.width / containerSize.height
if imageAspect > containerAspect { /// image extends left and right
let newImageWidth = containerSize.height * imageAspect /// the width of the overflowing image
let newX = -(newImageWidth - containerSize.width) / 2
rectOfImage = CGRect(x: newX, y: 0, width: newImageWidth, height: containerSize.height)
} else { /// image extends top and bottom
let newImageHeight = containerSize.width * (1 / imageAspect) /// the width of the overflowing image
let newY = -(newImageHeight - containerSize.height) / 2
rectOfImage = CGRect(x: 0, y: newY, width: containerSize.width, height: newImageHeight)
}
let newOriginBoundingBox = CGRect(
x: boundingBox.origin.x,
y: 1 - boundingBox.origin.y - boundingBox.height,
width: boundingBox.width,
height: boundingBox.height
)
var convertedRect = VNImageRectForNormalizedRect(newOriginBoundingBox, Int(rectOfImage.width), Int(rectOfImage.height))
/// add the margins
convertedRect.origin.x += rectOfImage.origin.x
convertedRect.origin.y += rectOfImage.origin.y
return convertedRect
}
//MARK: RESIZE THE IMAGE ACCORD TO DEVICE
func resizeImage(image: UIImage, targetSize: CGSize) -> UIImage {
let size = image.size
let widthRatio = targetSize.width / image.size.width
let heightRatio = targetSize.height / image.size.height
// Figure out what our orientation is, and use that to form the rectangle
var newSize: CGSize
if(widthRatio > heightRatio) {
newSize = CGSize(width: size.width * heightRatio, height: size.height * heightRatio)
} else {
newSize = CGSize(width: size.width * widthRatio, height: size.height * widthRatio)
}
// This is the rect that we've calculated out and this is what is actually used below
let rect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height)
// Actually do the resizing to the rect using the ImageContext stuff
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.draw(in: rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
//MARK: POPPING ALERT WITH STRING
func alert(key:String){
let alertController = UIAlertController(title: "String", message: key, preferredStyle: .alert)
let OKAction = UIAlertAction(title: "OK", style: .default) {
(action: UIAlertAction!) in
// Code in this block will trigger when OK button tapped.
}
let copyAction = UIAlertAction(title: "Copy", style: .default) {
(action: UIAlertAction!) in
UIPasteboard.general.string = key
}
alertController.addAction(copyAction)
alertController.addAction(OKAction)
self.present(alertController, animated: true, completion: nil)
}
}

Crop CGRect from UIImage taken from camera

I have a view controller which takes a photo with a circular view in the center.
After taking a photo, I need to crop the CGRect with which I created the circular view. I need to crop the rectangle, not the circle.
I tried https://stackoverflow.com/a/57258806/12411655 and many other solutions, but it doesn't crop CGRect that I need.
How do I convert the CGRect in the view's coordinates to UIImage's coordinates?
class CircularCameraViewController: UIViewController {
var captureSession: AVCaptureSession!
var capturePhotoOutput: AVCapturePhotoOutput!
var cropRect: CGRect!
public lazy var shutterButton: ShutterButton = {
let button = ShutterButton()
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(capturePhoto), for: .touchUpInside)
return button
}()
private lazy var cancelButton: UIButton = {
let button = UIButton()
button.setTitle("Cancel", for: .normal)
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(dismissCamera), for: .touchUpInside)
return button
}()
private lazy var flashButton: UIButton = {
let image = UIImage(named: "flash", in: Bundle(for: ScannerViewController.self), compatibleWith: nil)?.withRenderingMode(.alwaysTemplate)
let button = UIButton()
button.setImage(image, for: .normal)
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(toggleFlash), for: .touchUpInside)
button.tintColor = .white
return button
}()
override func viewDidLoad() {
super.viewDidLoad()
setupCamera()
setupPhotoOutput()
setupViews()
setupConstraints()
captureSession.startRunning()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
}
override func viewWillDisappear(_ animated: Bool) {
captureSession.stopRunning()
}
private func setupCamera() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
var input: AVCaptureDeviceInput
do {
input = try AVCaptureDeviceInput(device: captureDevice!)
} catch {
fatalError("Error configuring capture device: \(error)");
}
captureSession = AVCaptureSession()
captureSession.addInput(input)
// Setup the preview view.
let videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer)
let camPreviewBounds = view.bounds
cropRect = CGRect(
x: camPreviewBounds.minX + (camPreviewBounds.width - 150) * 0.5,
y: camPreviewBounds.minY + (camPreviewBounds.height - 150) * 0.5,
width: 150,
height: 150
)
let path = UIBezierPath(roundedRect: camPreviewBounds, cornerRadius: 0)
path.append(UIBezierPath(ovalIn: cropRect))
let layer = CAShapeLayer()
layer.path = path.cgPath
layer.fillRule = CAShapeLayerFillRule.evenOdd;
layer.fillColor = UIColor.black.cgColor
layer.opacity = 0.5;
view.layer.addSublayer(layer)
}
private func setupViews() {
view.addSubview(shutterButton)
view.addSubview(flashButton)
view.addSubview(cancelButton)
}
private func setupConstraints() {
var cancelButtonConstraints = [NSLayoutConstraint]()
var shutterButtonConstraints = [NSLayoutConstraint]()
var flashConstraints = [NSLayoutConstraint]()
shutterButtonConstraints = [
shutterButton.centerXAnchor.constraint(equalTo: view.centerXAnchor),
shutterButton.widthAnchor.constraint(equalToConstant: 65.0),
shutterButton.heightAnchor.constraint(equalToConstant: 65.0)
]
flashConstraints = [
flashButton.leftAnchor.constraint(equalTo: view.leftAnchor, constant: 24.0),
flashButton.topAnchor.constraint(equalTo: view.topAnchor, constant: 30)
]
if #available(iOS 11.0, *) {
cancelButtonConstraints = [
cancelButton.leftAnchor.constraint(equalTo: view.safeAreaLayoutGuide.leftAnchor, constant: 24.0),
view.safeAreaLayoutGuide.bottomAnchor.constraint(equalTo: cancelButton.bottomAnchor, constant: (65.0 / 2) - 10.0)
]
let shutterButtonBottomConstraint = view.safeAreaLayoutGuide.bottomAnchor.constraint(equalTo: shutterButton.bottomAnchor, constant: 8.0)
shutterButtonConstraints.append(shutterButtonBottomConstraint)
} else {
cancelButtonConstraints = [
cancelButton.leftAnchor.constraint(equalTo: view.leftAnchor, constant: 24.0),
view.bottomAnchor.constraint(equalTo: cancelButton.bottomAnchor, constant: (65.0 / 2) - 10.0)
]
let shutterButtonBottomConstraint = view.bottomAnchor.constraint(equalTo: shutterButton.bottomAnchor, constant: 8.0)
shutterButtonConstraints.append(shutterButtonBottomConstraint)
}
NSLayoutConstraint.activate(cancelButtonConstraints + shutterButtonConstraints + flashConstraints)
}
private func setupPhotoOutput() {
capturePhotoOutput = AVCapturePhotoOutput()
capturePhotoOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(capturePhotoOutput!)
}
#objc func dismissCamera() {
self.dismiss(animated: true, completion: nil)
}
#objc private func toggleFlash() {
if let avDevice = AVCaptureDevice.default(for: AVMediaType.video) {
if (avDevice.hasTorch) {
do {
try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
if avDevice.isTorchActive {
avDevice.torchMode = AVCaptureDevice.TorchMode.off
} else {
avDevice.torchMode = AVCaptureDevice.TorchMode.on
}
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
}
extension CircularCameraViewController : AVCapturePhotoCaptureDelegate {
#objc private func capturePhoto() {
let photoSettings = AVCapturePhotoSettings()
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.flashMode = .auto
// Set ourselves as the delegate for `capturePhoto`.
capturePhotoOutput?.capturePhoto(with: photoSettings, delegate: self)
}
#available(iOS 11.0, *)
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?) {
guard error == nil else {
fatalError("Failed to capture photo: \(String(describing: error))")
}
guard let imageData = photo.fileDataRepresentation() else {
fatalError("Failed to convert pixel buffer")
}
guard let image = UIImage(data: imageData) else {
fatalError("Failed to convert image data to UIImage")
}
guard let croppedImg = image.cropToRect(rect: cropRect) else {
fatalError("Failed to crop image")
}
UIImageWriteToSavedPhotosAlbum(croppedImg, nil, nil, nil);
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
guard error == nil, let photoSample = photoSampleBuffer else {
fatalError("Failed to capture photo: \(String(describing: error))")
}
guard let imgData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSample, previewPhotoSampleBuffer: previewPhotoSampleBuffer) else {
fatalError("Failed to get image data: \(String(describing: error))")
}
guard let image = UIImage(data: imgData) else {
fatalError("Failed to convert image data to UIImage: \(String(describing: error))")
}
}
}
UIImage extension:
func cropToRect(rect: CGRect!) -> UIImage? {
let scaledRect = CGRect(x: rect.origin.x * self.scale, y: rect.origin.y * self.scale, width: rect.size.width * self.scale, height: rect.size.height * self.scale);
guard let imageRef: CGImage = self.cgImage?.cropping(to:scaledRect)
else {
return nil
}
let croppedImage: UIImage = UIImage(cgImage: imageRef, scale: self.scale, orientation: self.imageOrientation)
return croppedImage
}
When cropping an image, you need to scale the "crop rect" from its size relative to the image size.
Also, when capturing from the camera, you need to take .imageOrientation into account.
Try changing your UIImage extension to this:
extension UIImage {
func cropToRect(rect: CGRect, viewSize: CGSize) -> UIImage? {
var cr = rect
switch self.imageOrientation {
case .right, .rightMirrored, .left, .leftMirrored:
// rotate the crop rect if needed
cr.origin.x = rect.origin.y
cr.origin.y = rect.origin.x
cr.size.width = rect.size.height
cr.size.height = rect.size.width
default:
break
}
let imageViewScale = max(self.size.width / viewSize.width,
self.size.height / viewSize.height)
// scale the crop rect
let cropZone = CGRect(x:cr.origin.x * imageViewScale,
y:cr.origin.y * imageViewScale,
width:cr.size.width * imageViewScale,
height:cr.size.height * imageViewScale)
// Perform cropping in Core Graphics
guard let cutImageRef: CGImage = self.cgImage?.cropping(to:cropZone)
else {
return nil
}
// Return image to UIImage
let croppedImage: UIImage = UIImage(cgImage: cutImageRef, scale: self.scale, orientation: self.imageOrientation)
return croppedImage
}
}
and change your call in photoOutput() to:
guard let croppedImg = image.cropToRect(rect: cropRect, viewSize: view.frame.size) else {
fatalError("Failed to crop image")
}
Since your code is using the full view, that should work fine. If you change it to use a different sized view as your videoPreviewLayer then use that size instead of view.frame.size.

Swift | Show image in Custom GMSMarker

I want to show user's profile picture in my custom marker for google maps. Like all others, I have tried this code in init: of customMarker
self.location = location
position = location
icon = UIImage(named: "live location")
groundAnchor = CGPoint(x: 0.5, y: 1)
Is there any possible way to show another image in the circle of the marker icon. ex. using xib.
You can use given two methods:
func drawImageWithProfilePic(pp: UIImage, image: UIImage) -> UIImage {
let imgView = UIImageView(image: image)
let picImgView = UIImageView(image: pp)
picImgView.frame = CGRect(x: 0, y: 0, width: 30, height: 30)
imgView.addSubview(picImgView)
picImgView.center.x = imgView.center.x
picImgView.center.y = imgView.center.y - 7
picImgView.layer.cornerRadius = picImgView.frame.width/2
picImgView.clipsToBounds = true
imgView.setNeedsLayout()
picImgView.setNeedsLayout()
let newImage = imageWithView(view: imgView)
return newImage
}
func imageWithView(view: UIView) -> UIImage {
var image: UIImage?
UIGraphicsBeginImageContextWithOptions(view.bounds.size, false, 0.0)
if let context = UIGraphicsGetCurrentContext() {
view.layer.render(in: context)
image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
}
return image ?? UIImage()
}
Here pp is your profile pic and image is the pic icon.
You can set the frame of Profile pic according to you.
I have tried this:
Edit
let marker = GMSMarker(position: coordinate)
marker.icon = drawImageWithProfilePic(pp: imgPP, image: img)
marker.appearAnimation = GMSMarkerAnimation.pop
marker.map = viewGoogleMap
and here is the output:
//put this code where you get image
let url = URL(string: "your_imageUrlString")
let data = try? Data(contentsOf: url!)
let marker = GMSMarker()
marker.position = CLLocationCoordinate2D(latitude: Double(lat)!, longitude: Double(long)!)
marker.icon = self.drawImageWithProfilePic(pp: UIImage.init(data:data!)!, image: UIImage.init(named: "red")!)
marker.title = location
marker.snippet = name + " " + mobile
marker.appearAnimation = GMSMarkerAnimation.pop
marker.map = self.mapView
//put this code in your viewController class
func drawImageWithProfilePic(pp: UIImage, image: UIImage) -> UIImage {
let imgView = UIImageView(image: image)
imgView.frame = CGRect(x: 0, y: 0, width: 100, height: 100)
let picImgView = UIImageView(image: pp)
picImgView.frame = CGRect(x: 0, y: 0, width: 40, height: 40)
imgView.addSubview(picImgView)
picImgView.center.x = imgView.center.x
picImgView.center.y = imgView.center.y - 7
picImgView.layer.cornerRadius = picImgView.frame.width/2
picImgView.clipsToBounds = true
imgView.setNeedsLayout()
picImgView.setNeedsLayout()
let newImage = imageWithView(view: imgView)
return newImage
}
func imageWithView(view: UIView) -> UIImage {
var image: UIImage?
UIGraphicsBeginImageContextWithOptions(view.bounds.size, false, 0.0)
if let context = UIGraphicsGetCurrentContext() {
view.layer.render(in: context)
image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
}
return image ?? UIImage()
}
//here "red" is dummy background image
You can create/design a custom view either in nib or programmatically and then assign it to your marker like this.
let someView = YourCustomView()
someView.markerImageView.image = UIImage(named: "markerImage")
someView.userImageView.image = UIImage(named: "userImage")
let marker = GMSMarker()
marker.map = yourMapView
marker.position = somePositionCoordinate;
marker.iconView = someView

Combining Images in CameraView with Overlay. (Swift 3)?

I just about have this solved. Thanks to some brilliant help getting me on the right track.
This is the code I have now.
Basically, I can now make an image out of the drawn overlay, and the cameraPreview. But can't yet combine them. There seems very little useful code that I can find that does this simply.
So the important part is the extension block right at the top, and the additions to the
func saveToCamera() near the bottom of the code.
In short, I now have the two images I need, I think. The snap of the myImage is appearing on a white background - so not sure if that's natural - or not. That's how it appears on a Simulator. So it may just be natural.
Image 1. A screen capture.
Image 2. The saved image of myImage as per the explaination.
import UIKit
import AVFoundation
import Foundation
// extension must be outside class
extension UIImage {
convenience init(view: UIView) {
UIGraphicsBeginImageContext(view.frame.size)
view.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
self.init(cgImage: (image?.cgImage)!)
}
}
class ViewController: UIViewController {
#IBOutlet weak var navigationBar: UINavigationBar!
#IBOutlet weak var imgOverlay: UIImageView!
#IBOutlet weak var btnCapture: UIButton!
#IBOutlet weak var shapeLayer: UIView!
let captureSession = AVCaptureSession()
let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
//var shapeLayer : CALayer?
// If we find a device we'll store it here for later use
var captureDevice : AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
//=======================
let midX = self.view.bounds.midX
let midY = self.view.bounds.midY
for index in 1...10 {
let circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat((index * 10)), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
let shapeLayerPath = CAShapeLayer()
shapeLayerPath.path = circlePath.cgPath
//change the fill color
shapeLayerPath.fillColor = UIColor.clear.cgColor
//you can change the stroke color
shapeLayerPath.strokeColor = UIColor.blue.cgColor
//you can change the line width
shapeLayerPath.lineWidth = 0.5
// add the blue-circle layer to the shapeLayer ImageView
shapeLayer.layer.addSublayer(shapeLayerPath)
}
print("Shape layer drawn")
//=====================
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] {
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.back) {
captureDevice = device
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
}
}
#IBAction func actionCameraCapture(_ sender: AnyObject) {
print("Camera button pressed")
saveToCamera()
}
func beginSession() {
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
catch {
print("error: \(error.localizedDescription)")
}
guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else {
print("no preview layer")
return
}
// this is what displays the camera view. But - it's on TOP of the drawn view, and under the overview. ??
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
print("Capture session running")
self.view.addSubview(navigationBar)
//self.view.addSubview(imgOverlay)
self.view.addSubview(btnCapture)
// shapeLayer ImageView is already a subview created in IB
// but this will bring it to the front
self.view.addSubview(shapeLayer)
}
func saveToCamera() {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
if let cameraImage = UIImage(data: imageData) {
// cameraImage is the camera preview image.
// I need to combine/merge it with the myImage that is actually the blue circles.
// This converts the UIView of the bllue circles to an image. Uses 'extension' at top of code.
let myImage = UIImage(view: self.shapeLayer)
print("converting myImage to an image")
UIImageWriteToSavedPhotosAlbum(cameraImage, nil, nil, nil)
}
}
})
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
Give this a try... instead of combining your overlay view, it draws the circles and combines the output:
import UIKit
import AVFoundation
import Foundation
class CameraWithTargetViewController: UIViewController {
#IBOutlet weak var navigationBar: UINavigationBar!
#IBOutlet weak var imgOverlay: UIImageView!
#IBOutlet weak var btnCapture: UIButton!
#IBOutlet weak var shapeLayer: UIView!
let captureSession = AVCaptureSession()
let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
//var shapeLayer : CALayer?
// If we find a device we'll store it here for later use
var captureDevice : AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
//=======================
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] {
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.back) {
captureDevice = device
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
}
}
#IBAction func actionCameraCapture(_ sender: AnyObject) {
print("Camera button pressed")
saveToCamera()
}
func beginSession() {
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
catch {
print("error: \(error.localizedDescription)")
}
guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else {
print("no preview layer")
return
}
// this is what displays the camera view. But - it's on TOP of the drawn view, and under the overview. ??
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.layer.frame
imgOverlay.frame = self.view.frame
imgOverlay.image = self.drawCirclesOnImage(fromImage: nil, targetSize: imgOverlay.bounds.size)
self.view.bringSubview(toFront: navigationBar)
self.view.bringSubview(toFront: imgOverlay)
self.view.bringSubview(toFront: btnCapture)
// don't use shapeLayer anymore...
// self.view.bringSubview(toFront: shapeLayer)
captureSession.startRunning()
print("Capture session running")
}
func getImageWithColor(color: UIColor, size: CGSize) -> UIImage {
let rect = CGRect(origin: CGPoint(x: 0, y: 0), size: CGSize(width: size.width, height: size.height))
UIGraphicsBeginImageContextWithOptions(size, false, 0)
color.setFill()
UIRectFill(rect)
let image: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
func drawCirclesOnImage(fromImage: UIImage? = nil, targetSize: CGSize? = CGSize.zero) -> UIImage? {
if fromImage == nil && targetSize == CGSize.zero {
return nil
}
var tmpimg: UIImage?
if targetSize == CGSize.zero {
tmpimg = fromImage
} else {
tmpimg = getImageWithColor(color: UIColor.clear, size: targetSize!)
}
guard let img = tmpimg else {
return nil
}
let imageSize = img.size
let scale: CGFloat = 0
UIGraphicsBeginImageContextWithOptions(imageSize, false, scale)
img.draw(at: CGPoint.zero)
let w = imageSize.width
let midX = imageSize.width / 2
let midY = imageSize.height / 2
// red circles - radius in %
let circleRads = [ 0.07, 0.13, 0.17, 0.22, 0.29, 0.36, 0.40, 0.48, 0.60, 0.75 ]
// center "dot" - radius is 1.5%
var circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat(w * 0.015), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
UIColor.red.setFill()
circlePath.stroke()
circlePath.fill()
// blue circle is between first and second red circles
circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: w * CGFloat((circleRads[0] + circleRads[1]) / 2.0), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
UIColor.blue.setStroke()
circlePath.lineWidth = 2.5
circlePath.stroke()
UIColor.red.setStroke()
for pct in circleRads {
let rad = w * CGFloat(pct)
circlePath = UIBezierPath(arcCenter: CGPoint(x: midX, y: midY), radius: CGFloat(rad), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
circlePath.lineWidth = 2.5
circlePath.stroke()
}
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
func saveToCamera() {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
if let cameraImage = UIImage(data: imageData) {
if let nImage = self.drawCirclesOnImage(fromImage: cameraImage, targetSize: CGSize.zero) {
UIImageWriteToSavedPhotosAlbum(nImage, nil, nil, nil)
}
}
}
})
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
Ok, solved it pretty much. The important code is here. The resulting image is slightly out of skew, but I'll work away and fix that, unless someone can see a good fix for it.
func saveToCamera() {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
if let cameraImage = UIImage(data: imageData) {
// cameraImage is the camera preview image.
// I need to combine/merge it with the myImage that is actually the blue circles.
// This converts the UIView of the bllue circles to an image. Uses 'extension' at top of code.
let myImage = UIImage(view: self.shapeLayer)
print("converting myImage to an image")
let newImage = self.composite(image:cameraImage, overlay:(myImage), scaleOverlay:true)
UIImageWriteToSavedPhotosAlbum(newImage!, nil, nil, nil)
}
}
})
}
}
func composite(image:UIImage, overlay:(UIImage), scaleOverlay: Bool = false)->UIImage?{
UIGraphicsBeginImageContext(image.size)
var rect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
image.draw(in: rect)
if scaleOverlay == false {
rect = CGRect(x: 0, y: 0, width: overlay.size.width, height: overlay.size.height)
}
overlay.draw(in: rect)
return UIGraphicsGetImageFromCurrentImageContext()
}
The resulting saved image.

PIP Camera Effect iOS Swift

How to achieve PIP Effect in iOS Swift app. I tried it but results are not accurate in case of placing and movement of internal picture.
Here is my attempt:
#IBOutlet var galleryImageView: UIImageView!
#IBOutlet var background: UIImageView!
#IBOutlet var behindView: UIView!
override func viewDidLoad() {
super.viewDidLoad()
let image = UIImage(named: "ph3")
let maskingImage = UIImage(named: "mask")
galleryImageView.image = maskImage(image: image!, mask: maskingImage!)
galleryImageView.contentMode = .scaleAspectFill
background.image = UIImage(named: "ph3")?.stackBlur(10)
let maskingLayer = CALayer()
maskingLayer.frame = behindView.bounds
maskingLayer.contents = (galleryImageView.image?.cgImage! as Any)
behindView.layer.mask = maskingLayer
let panGestureRecognizer = UIPanGestureRecognizer(target: self, action: #selector(self.panGestureDetected))
panGestureRecognizer.delegate = self
self.galleryImageView.addGestureRecognizer(panGestureRecognizer)
}
func maskImage(image:UIImage, mask:(UIImage))->UIImage{
let imageReference = image.cgImage
let maskReference = mask.cgImage
let imageMask = CGImage(maskWidth: maskReference!.width,
height: maskReference!.height,
bitsPerComponent: maskReference!.bitsPerComponent,
bitsPerPixel: maskReference!.bitsPerPixel,
bytesPerRow: maskReference!.bytesPerRow,
provider: maskReference!.dataProvider!, decode: nil, shouldInterpolate: true)
let maskedReference = imageReference!.masking(imageMask!)
let maskedImage = UIImage(cgImage:maskedReference!)
return maskedImage
}
galleryImageView is the internal imageView
Result should be like:

Resources