How to crop area from camera - ios

I have draw a rectangle with in the native camera view, I'm trying to use it as a guide or crop area to capture only the business card image, I'm unable to crop image from camera native view within drawn rectangle
extension UIScreen {
func fullScreenSquare() -> CGRect {
var hw:CGFloat = 0
var isLandscape = false
if UIScreen.main.bounds.size.width < UIScreen.main.bounds.size.height {
hw = UIScreen.main.bounds.size.width
}
else {
isLandscape = true
hw = UIScreen.main.bounds.size.height
}
var x:CGFloat = 0
var y:CGFloat = 0
if isLandscape {
x = (UIScreen.main.bounds.size.width / 2) - (hw / 2)
}
else {
y = (UIScreen.main.bounds.size.height / 2) - (hw / 2)
}
return CGRect(x: x, y: y, width: hw, height: hw/3*2)
}
func isLandscape() -> Bool {
return UIScreen.main.bounds.size.width > UIScreen.main.bounds.size.height
}
}
func guideForCameraOverlay() -> UIView {
let guide = UIView(frame: UIScreen.main.fullScreenSquare())
guide.backgroundColor = UIColor.clear
guide.layer.borderWidth = 4
guide.layer.borderColor = UIColor.orange.cgColor
guide.isUserInteractionEnabled = false
return guide
}
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
if setPhoto == 1 {
if let image = info[UIImagePickerController.InfoKey.editedImage] as? UIImage{
let size = CGSize(width: 600, height: 400)
//let imageCroped = image.cgImage?.cropping(to: size)
let imageCroped = image.crop(to: size)
frontPhotoImageView.image = UIImage(cgImage: imageCroped as! CGImage)
setPhoto = 0
frontPhotoImage.setTitle("", for: UIControl.State.normal)
}
else {
// Error message
}
self.dismiss(animated: true, completion: nil)
}
if setPhoto == 2 {
if let image = info[UIImagePickerController.InfoKey.editedImage] as? UIImage{
backPhotoImageView.image = image
setPhoto = 0
backPhotoImage.setTitle("", for: UIControl.State.normal)
}
else {
// Error message
}
self.dismiss(animated: true, completion: nil)
}
}
I expect to have the image from with in the drawn rectangle, but it does not happen.
I expect to crop the image to the size inside the orange rectangle on this image

Related

TextClassification/ Extraction from image How to get single text frame and string Using Core ML from a Image

Need to mark the rec boxes around string and then to get that string after tapping
import UIKit
import Vision
class ViewController: UIViewController, ImageGet {
//MARK: OUTLETS
#IBOutlet weak var selectButton: UIButton!
//MARK: VARIABLES
var objU = UtilityClass()
var image:UIImage?
var str:String?
var uiButton : UIButton?
var arrayString = [String]()
var imageView : UIImageView = UIImageView()
//MARK: DELEGATE FUNCTION
func img(image: UIImage) {
self.image = image
imageView.image = image
setUp()
}
override func viewDidLoad() {
super.viewDidLoad()
imageView.isUserInteractionEnabled = true
// Do any additional setup after loading the view.
}
//MARK: SETUPUI
func setUp() {
let realImg = resizeImage(image: (imageView.image!) , targetSize:CGSize(width: view.frame.width, height: view.frame.height) )
self.image = realImg
self.imageView .image = self.image
imageView.isUserInteractionEnabled = true
self.imageView.frame = CGRect(x: 0, y: 0, width: realImg.size.width, height: realImg.size.height)
view.addSubview(imageView)
guard let cgimg = realImg.cgImage else {return}
let requestHandler = VNImageRequestHandler(cgImage: cgimg)
let req = VNRecognizeTextRequest(completionHandler: recognizeTextHandler)
req.recognitionLevel = .accurate
do {
try requestHandler.perform([req])
} catch {
print("Unable to perform the request: \(error)")
}
}
//MARK: SELECT THE IMAGE
#IBAction func selectButtontapped(_ sender: Any) {
objU.delegate = self
objU.obj = self
objU.ImageGet()
}
func recognizeTextHandler(request : VNRequest , error:Error?) {
guard let observation = request.results as? [VNRecognizedTextObservation], error == nil else {
return
}
_ = observation.compactMap({
$0.topCandidates(1).first?.string
}).joined(separator: "/n")
for subView in imageView.subviews {
subView.removeFromSuperview()
}
let boundingRect :[CGRect] = observation.compactMap{
observation in
guard let candidate = observation.topCandidates(1).first else {return .zero}
//find the bounding box observation
let stringRange = candidate.string.startIndex..<candidate.string.endIndex
let boxObservation = try? candidate.boundingBox(for: stringRange)
let boundingBox = boxObservation?.boundingBox ?? .zero
str = candidate.string
self.arrayString.append(str!)
let rectInImg = VNImageRectForNormalizedRect(boundingBox, Int((imageView.frame.size.width)), Int((imageView.frame.size.height)))
let convertedRect = self.getConvertedRect(boundingBox: observation.boundingBox, inImage:image!.size , containedIn: (imageView.bounds.size))
drawBoundBox(rect: convertedRect)
return rectInImg
}
print(arrayString)
print(boundingRect)
}
func drawBoundBox(rect: CGRect) {
uiButton = UIButton(type: .custom)
uiButton?.frame = rect
uiButton?.layer.borderColor = UIColor.systemPink.cgColor
uiButton?.setTitle("", for: .normal)
uiButton?.layer.borderWidth = 2
uiButton?.tag = arrayString.count
imageView.addSubview(uiButton ?? UIButton())
uiButton?.addTarget(self, action: #selector(pressed(_:)), for: .touchUpInside)
}
#objc func pressed(_ sender : UIButton) {
alert(key: arrayString[sender.tag - 1])
}
//MARK: CONVERT THE NORMALISED BOUNDING RECT
func getConvertedRect(boundingBox: CGRect, inImage imageSize: CGSize, containedIn containerSize: CGSize) -> CGRect {
let rectOfImage: CGRect
let imageAspect = imageSize.width / imageSize.height
let containerAspect = containerSize.width / containerSize.height
if imageAspect > containerAspect { /// image extends left and right
let newImageWidth = containerSize.height * imageAspect /// the width of the overflowing image
let newX = -(newImageWidth - containerSize.width) / 2
rectOfImage = CGRect(x: newX, y: 0, width: newImageWidth, height: containerSize.height)
} else { /// image extends top and bottom
let newImageHeight = containerSize.width * (1 / imageAspect) /// the width of the overflowing image
let newY = -(newImageHeight - containerSize.height) / 2
rectOfImage = CGRect(x: 0, y: newY, width: containerSize.width, height: newImageHeight)
}
let newOriginBoundingBox = CGRect(
x: boundingBox.origin.x,
y: 1 - boundingBox.origin.y - boundingBox.height,
width: boundingBox.width,
height: boundingBox.height
)
var convertedRect = VNImageRectForNormalizedRect(newOriginBoundingBox, Int(rectOfImage.width), Int(rectOfImage.height))
/// add the margins
convertedRect.origin.x += rectOfImage.origin.x
convertedRect.origin.y += rectOfImage.origin.y
return convertedRect
}
//MARK: RESIZE THE IMAGE ACCORD TO DEVICE
func resizeImage(image: UIImage, targetSize: CGSize) -> UIImage {
let size = image.size
let widthRatio = targetSize.width / image.size.width
let heightRatio = targetSize.height / image.size.height
// Figure out what our orientation is, and use that to form the rectangle
var newSize: CGSize
if(widthRatio > heightRatio) {
newSize = CGSize(width: size.width * heightRatio, height: size.height * heightRatio)
} else {
newSize = CGSize(width: size.width * widthRatio, height: size.height * widthRatio)
}
// This is the rect that we've calculated out and this is what is actually used below
let rect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height)
// Actually do the resizing to the rect using the ImageContext stuff
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.draw(in: rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
//MARK: POPPING ALERT WITH STRING
func alert(key:String){
let alertController = UIAlertController(title: "String", message: key, preferredStyle: .alert)
let OKAction = UIAlertAction(title: "OK", style: .default) {
(action: UIAlertAction!) in
// Code in this block will trigger when OK button tapped.
}
let copyAction = UIAlertAction(title: "Copy", style: .default) {
(action: UIAlertAction!) in
UIPasteboard.general.string = key
}
alertController.addAction(copyAction)
alertController.addAction(OKAction)
self.present(alertController, animated: true, completion: nil)
}
}

Swipeable CIFilter over video

I am currently trying to implement something similar to Instagram's story feature where you take a picture or a video and when swiping left or right you change the current filter over the content. ( here is an example of what I managed to do in my app for images https://imgur.com/a/pYKrPkA )
As you can see in the example, I got it done for images but now my problem is that I am trying to make if work for videos aswell and I am a bit lost from where to start.
final class Filter: NSObject {
var isEnabled: Bool = true
var overlayImage: CIImage?
var ciFilter: CIFilter?
init(ciFilter: CIFilter?) {
self.ciFilter = ciFilter
super.init()
}
/// Empty filter for the original photo
static func emptyFilter() -> Filter {
return Filter(ciFilter: nil)
}
func imageByProcessingImage(_ image: CIImage, at time: CFTimeInterval) -> CIImage? {
guard isEnabled else { return image }
var image = image
if let overlayImage = overlayImage {
image = overlayImage.composited(over: image)
}
guard let ciFilter = ciFilter else {
return image
}
ciFilter.setValue(image, forKey: kCIInputImageKey)
return ciFilter.value(forKey: kCIOutputImageKey) as? CIImage
}
}
class StoriesImageView: UIView {
private var metalView: MTKView?
private var ciImage: CIImage?
private var preferredCIImageTransform: CGAffineTransform?
private let device = MTLCreateSystemDefaultDevice()
private var commandQueue: MTLCommandQueue?
private var context: CIContext?
override func layoutSubviews() {
super.layoutSubviews()
metalView?.frame = bounds
}
override func setNeedsDisplay() {
super.setNeedsDisplay()
metalView?.setNeedsDisplay()
}
func setImage(with image: UIImage) {
preferredCIImageTransform = preferredCIImageTransform(from: image)
if let cgImage = image.cgImage {
ciImage = CIImage(cgImage: cgImage)
loadContextIfNeeded()
}
setNeedsDisplay()
}
/// Return the image fitted to 1080x1920.
func renderedUIImage() -> UIImage? {
return renderedUIImage(in: CGRect(origin: .zero, size: CGSize(width: 1080, height: 1920)))
}
/// Returns CIImage in fitted to main screen bounds.
func renderedCIIImage() -> CIImage? {
return renderedCIImage(in: CGRect(rect: bounds, contentScale: UIScreen.main.scale))
}
func renderedUIImage(in rect: CGRect) -> UIImage? {
if let image = renderedCIImage(in: rect), let context = context {
if let imageRef = context.createCGImage(image, from: image.extent) {
return UIImage(cgImage: imageRef)
}
}
return nil
}
func renderedCIImage(in rect: CGRect) -> CIImage? {
if var image = ciImage, let transform = preferredCIImageTransform {
image = image.transformed(by: transform)
return scaleAndResize(image, for: rect)
}
return nil
}
private func cleanupContext() {
metalView?.removeFromSuperview()
metalView?.releaseDrawables()
metalView = nil
}
private func loadContextIfNeeded() {
setContext()
}
private func setContext() {
let mView = MTKView(frame: bounds, device: device)
mView.clearColor = MTLClearColor(red: 0, green: 0, blue: 0, alpha: 0)
mView.framebufferOnly = false
mView.enableSetNeedsDisplay = true
mView.contentScaleFactor = contentScaleFactor
mView.delegate = self
metalView = mView
commandQueue = device?.makeCommandQueue()
context = CIContext(mtlDevice: device!)
insertSubview(metalView!, at: 0)
}
private func scaleAndResize(_ image: CIImage, for rect: CGRect) -> CIImage {
let imageSize = image.extent.size
let horizontalScale = rect.size.width / imageSize.width
let verticalScale = rect.size.height / imageSize.height
let scale = min(horizontalScale, verticalScale)
return image.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
}
private func preferredCIImageTransform(from image: UIImage) -> CGAffineTransform {
if image.imageOrientation == .up {
return .identity
}
var transform: CGAffineTransform = .identity
switch image.imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: image.size.height)
transform = transform.rotated(by: .pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.rotated(by: .pi / 2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: image.size.height)
transform = transform.rotated(by: .pi / -2)
case .up, .upMirrored: break
#unknown default: fatalError("Unknown image orientation")
}
switch image.imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: image.size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .up, .down, .left, .right: break
#unknown default: fatalError("Unknown image orientation")
}
return transform
}
}
extension StoriesImageView: MTKViewDelegate {
func draw(in view: MTKView) {
autoreleasepool {
let rect = CGRect(rect: view.bounds, contentScale: UIScreen.main.scale)
if let image = renderedCIImage(in: rect) {
let commandBuffer = commandQueue?.makeCommandBuffer()
guard let drawable = view.currentDrawable else {
return
}
let heightDifference = (view.drawableSize.height - image.extent.size.height) / 2
let destination = CIRenderDestination(width: Int(view.drawableSize.width),
height: Int(view.drawableSize.height - heightDifference),
pixelFormat: view.colorPixelFormat,
commandBuffer: commandBuffer,
mtlTextureProvider: { () -> MTLTexture in
return drawable.texture
})
_ = try? context?.startTask(toRender: image, to: destination)
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {}
}
final class StoriesSwipeableImageView: StoriesImageView {
private let scrollView: UIScrollView = UIScrollView()
private let preprocessingFilter: Filter? = nil
var isRefreshingAutomaticallyWhenScrolling: Bool = true
var filters: [Filter]? {
didSet {
updateScrollViewContentSize()
updateCurrentSelected(notify: true)
}
}
var selectedFilter: Filter? {
didSet {
if selectedFilter != oldValue {
setNeedsLayout()
}
}
}
override init(frame: CGRect) {
super.init(frame: frame)
setup()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
setup()
}
override func layoutSubviews() {
super.layoutSubviews()
scrollView.frame = bounds
updateScrollViewContentSize()
}
private func setup() {
scrollView.delegate = self
scrollView.isPagingEnabled = true
scrollView.showsHorizontalScrollIndicator = false
scrollView.showsVerticalScrollIndicator = false
scrollView.bounces = true
scrollView.alwaysBounceVertical = true
scrollView.alwaysBounceHorizontal = true
scrollView.backgroundColor = .clear
addSubview(scrollView)
}
private func updateScrollViewContentSize() {
let filterCount = filters?.count ?? 0
scrollView.contentSize = CGSize(width: filterCount * Int(frame.size.width) * 3,
height: Int(frame.size.height))
if let selectedFilter = selectedFilter {
scroll(to: selectedFilter, animated: false)
}
}
private func scroll(to filter: Filter, animated: Bool) {
if let index = filters?.firstIndex(where: { $0 === filter }) {
let contentOffset = CGPoint(x: scrollView.contentSize.width / 3 + scrollView.frame.size.width * CGFloat(index), y: 0)
scrollView.setContentOffset(contentOffset, animated: animated)
updateCurrentSelected(notify: false)
} else {
fatalError("Filter is not available in filters collection")
}
}
private func updateCurrentSelected(notify: Bool) {
guard frame.size.width != 0 else { return }
let filterCount = filters?.count ?? 0
let selectedIndex = Int(scrollView.contentOffset.x + scrollView.frame.size.width / 2) / Int(scrollView.frame.size.width) % filterCount
var newFilterGroup: Filter?
if selectedIndex >= 0 && selectedIndex < filterCount {
newFilterGroup = filters?[selectedIndex]
} else {
fatalError("Invalid contentOffset")
}
if selectedFilter != newFilterGroup {
selectedFilter = newFilterGroup
if notify {
// Notify delegate?
}
}
}
override func renderedCIImage(in rect: CGRect) -> CIImage? {
guard var image = super.renderedCIImage(in: rect) else {
print("Failed to render image")
return nil
}
let timeinterval: CFTimeInterval = 0
if let preprocessingFilter = self.preprocessingFilter {
image = preprocessingFilter.imageByProcessingImage(image, at: timeinterval)!
}
let extent = image.extent
let contentSize = scrollView.bounds.size
if contentSize.width == 0 {
return image
}
let filtersCount = filters?.count ?? 0
if filtersCount == 0 {
return image
}
let ratio = scrollView.contentOffset.x / contentSize.width
var index = Int(ratio)
let upIndex = Int(ceil(ratio))
let remaningRatio = ratio - CGFloat(index)
var xImage = extent.size.width * -remaningRatio
var outputImage: CIImage? = CIImage(color: CIColor(red: 0, green: 0, blue: 0))
while index <= upIndex {
let currentIndex = index % filtersCount
let filter = filters?[currentIndex]
var filteredImage = filter?.imageByProcessingImage(image, at: timeinterval)
filteredImage = filteredImage?.cropped(to:
CGRect(x: extent.origin.x + xImage,
y: extent.origin.y,
width: extent.size.width,
height: extent.size.height)
)
guard let output = outputImage else { return nil }
outputImage = filteredImage?.composited(over: output)
xImage += extent.size.width
index += 1
}
outputImage = outputImage?.cropped(to: extent)
return outputImage
}
}
extension StoriesSwipeableImageView: UIScrollViewDelegate {
func scrollViewDidScroll(_ scrollView: UIScrollView) {
let width = scrollView.frame.size.width
let contentOffsetX = scrollView.contentOffset.x
let contentSizeWidth = scrollView.contentSize.width
let normalWidth = CGFloat(filters?.count ?? 0) * width
if width > 0 && contentSizeWidth > 0 {
if contentOffsetX <= 0 {
scrollView.contentOffset = CGPoint(x: contentOffsetX + normalWidth, y: scrollView.contentOffset.y)
} else if contentOffsetX + width >= contentSizeWidth {
scrollView.contentOffset = CGPoint(x: contentOffsetX - normalWidth, y: scrollView.contentOffset.y)
}
}
if isRefreshingAutomaticallyWhenScrolling {
setNeedsDisplay()
}
}
func scrollViewDidScrollToTop(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndScrollingAnimation(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDragging(_ scrollView: UIScrollView, willDecelerate decelerate: Bool) {
if !decelerate {
updateCurrentSelected(notify: true)
}
}
}
These 3 are the classes that do the magic for the image part. Does anyone have a suggestion or a starting point for this? I tried looking over at https://github.com/rFlex/SCRecorder but I get a bit lost in Obj-C.
In iOS 9 / OS X 10.11 / tvOS, there's a convenience method for applying CIFilters to video. It works on an AVVideoComposition, so you can use it both for playback and for file-to-file import/export. See AVVideoComposition.init(asset:applyingCIFiltersWithHandler:) for the method docs.
There's an example in Apple's Core Image Programming Guide, too:
let filter = CIFilter(name: "CIGaussianBlur")!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source = request.sourceImage.clampingToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
// Vary filter parameters based on video timing
let seconds = CMTimeGetSeconds(request.compositionTime)
filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output = filter.outputImage!.cropping(to: request.sourceImage.extent)
// Provide the filter output to the composition
request.finish(with: output, context: nil)
})
That part sets up the composition. After you've done that, you can either play it by assigning it to an AVPlayer or write it to a file with AVAssetExportSession. Since you're after the latter, here's an example of that:
let export = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset1920x1200)
export.outputFileType = AVFileTypeQuickTimeMovie
export.outputURL = outURL
export.videoComposition = composition
export.exportAsynchronouslyWithCompletionHandler(/*...*/)
There's a bit more about this in the WWDC15 session on Core Image, starting around 20 minutes in.

Strange Issue With Transparent PNG Files From iOS Photo Library

I'm having a very strange issue with transparent PNG files, sourced from the Photos app.
The issue is that I am writing an app that allows the user to bring up an instance of UIImagePickerController, where they select an image, and that image is then added to a UIImageView via its image property.
Pretty straightforward, eh? The issue is when the image in the library is a transparent PNG.
For whatever reason, whenever I try to render the image, it always has the background white.
As far as I can tell, the image is stored in the library as a transparent PNG. When I drag it out, and examine it with an image editor, it's fine. Just what I expect.
But when I extract it programmatically, it has a white background. I can't seem to get it to be transparent.
Here's the code that I use to extract the image (It's a picker callback):
func imagePickerController(_ inPicker: UIImagePickerController, didFinishPickingMediaWithInfo inInfo: [UIImagePickerController.InfoKey: Any]) {
let info = Dictionary(uniqueKeysWithValues: inInfo.map { key, value in (key.rawValue, value) })
guard let image = (info[UIImagePickerController.InfoKey.editedImage.rawValue] as? UIImage ?? info[UIImagePickerController.InfoKey.originalImage.rawValue] as? UIImage)?.resizeThisImage(toNewWidth: Self.maximumImageWidthAndHeightInPixels) else { return }
organization?.icon = image
inPicker.dismiss(animated: true) { DispatchQueue.main.async { [weak self] in
self?.imageButton?.image = image
self?.imageButton?.alpha = 1.0
self?.imageButton?.tintColor = self?.view.tintColor
self?.updateUI()
}
}
}
It's not actually a UIButton. It's a UIImageView, with an attached tap recognizer.
The resizeThisImage() method is in an extension that I wrote for UIImage. It works fine. I've been using it forever:
func resizeThisImage(toNewWidth inNewWidth: CGFloat? = nil, toNewHeight inNewHeight: CGFloat? = nil) -> UIImage? {
guard nil == inNewWidth,
nil == inNewHeight else {
var scaleX: CGFloat = (inNewWidth ?? size.width) / size.width
var scaleY: CGFloat = (inNewHeight ?? size.height) / size.height
scaleX = nil == inNewWidth ? scaleY : scaleX
scaleY = nil == inNewHeight ? scaleX : scaleY
let destinationSize = CGSize(width: size.width * scaleX, height: size.height * scaleY)
let destinationRect = CGRect(origin: .zero, size: destinationSize)
UIGraphicsBeginImageContextWithOptions(destinationSize, false, 0)
defer { UIGraphicsEndImageContext() } // This makes sure that we get rid of the offscreen context.
draw(in: destinationRect, blendMode: .normal, alpha: 1)
return UIGraphicsGetImageFromCurrentImageContext()
}
return nil
}
In any case, it happens whether or not I use the resizeThisImage() method. That's not the issue.
Does anyone have any ideas what may be causing the issue?
UPDATE: I implemented #DonMag 's example, and here's what I got:
Note that the generated "A" is surrounded by white.
I should note that I'm using a classic storyboard UIKit app (no scene stuff). I don't think that should be an issue, but I'm happy to provide my little sample app. I don't think it's worth creating a GH repo for.
There doesn't seem to be anything wrong with your code, so I have to wonder if your images really, truly have transparency?
Here's a simple example to check. It looks like this when run:
The code creates Red and Blue image views, with .contentMode = .center.
Tapping the "Create" button will generate a UIImage using SF Symbol -- green with transparent background, the size of the Red image view -- and save it to Photos in PNG format with transparency.
Tapping the "Load" button will bring up the image picker. Selecting an image (such as the one just created and saved) will load the image and - using your extension - resize it to 80 x 80 and assign it to the .image property of the Blue image view.
As you can see, the image loaded from the Photo Picker still has its transparency.
Your UIImage extension for resizing
extension UIImage {
func resizeThisImage(toNewWidth inNewWidth: CGFloat? = nil, toNewHeight inNewHeight: CGFloat? = nil) -> UIImage? {
guard nil == inNewWidth,
nil == inNewHeight else {
var scaleX: CGFloat = (inNewWidth ?? size.width) / size.width
var scaleY: CGFloat = (inNewHeight ?? size.height) / size.height
scaleX = nil == inNewWidth ? scaleY : scaleX
scaleY = nil == inNewHeight ? scaleX : scaleY
let destinationSize = CGSize(width: size.width * scaleX, height: size.height * scaleY)
let destinationRect = CGRect(origin: .zero, size: destinationSize)
UIGraphicsBeginImageContextWithOptions(destinationSize, false, 0)
defer { UIGraphicsEndImageContext() } // This makes sure that we get rid of the offscreen context.
draw(in: destinationRect, blendMode: .normal, alpha: 1)
return UIGraphicsGetImageFromCurrentImageContext()
}
return nil
}
}
UIImage extension to save to Photos in PNG format with transparency
extension UIImage {
// save to Photos in PNG format with transparency
func saveToPhotos(completion: #escaping (_ success:Bool) -> ()) {
if let pngData = self.pngData() {
PHPhotoLibrary.shared().performChanges({ () -> Void in
let creationRequest = PHAssetCreationRequest.forAsset()
let options = PHAssetResourceCreationOptions()
creationRequest.addResource(with: PHAssetResourceType.photo, data: pngData, options: options)
}, completionHandler: { (success, error) -> Void in
if success == false {
if let errorString = error?.localizedDescription {
print("Photo could not be saved: \(errorString))")
}
completion(false)
} else {
print("Photo saved!")
completion(true)
}
})
} else {
completion(false)
}
}
}
Example view controller uses (essentially) your func imagePickerController for loading a photo
class TestImageViewController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {
var imgViewA: UIImageView = UIImageView()
var imgViewB: UIImageView = UIImageView()
override func viewDidLoad() {
super.viewDidLoad()
let vStack = UIStackView()
vStack.axis = .vertical
vStack.spacing = 20
let btnStack = UIStackView()
btnStack.axis = .horizontal
btnStack.distribution = .fillEqually
btnStack.spacing = 20
let btnCreate = UIButton()
let btnLoad = UIButton()
btnCreate.setTitle("Create", for: [])
btnLoad.setTitle("Load", for: [])
[btnCreate, btnLoad].forEach { b in
b.setTitleColor(.white, for: .normal)
b.setTitleColor(.lightGray, for: .highlighted)
b.backgroundColor = UIColor(red: 0.0, green: 0.5, blue: 0.75, alpha: 1.0)
btnStack.addArrangedSubview(b)
}
vStack.translatesAutoresizingMaskIntoConstraints = false
view.addSubview(vStack)
[btnStack, imgViewA, imgViewB].forEach { v in
vStack.addArrangedSubview(v)
}
[imgViewA, imgViewB].forEach { v in
v.contentMode = .center
}
let g = view.safeAreaLayoutGuide
NSLayoutConstraint.activate([
vStack.centerXAnchor.constraint(equalTo: g.centerXAnchor),
vStack.centerYAnchor.constraint(equalTo: g.centerYAnchor),
vStack.widthAnchor.constraint(equalToConstant: 200.0),
imgViewA.heightAnchor.constraint(equalTo: imgViewA.widthAnchor),
imgViewB.heightAnchor.constraint(equalTo: imgViewB.widthAnchor),
])
imgViewA.backgroundColor = .red
imgViewB.backgroundColor = .blue
btnCreate.addTarget(self, action: #selector(self.createAndSave(_:)), for: .touchUpInside)
btnLoad.addTarget(self, action: #selector(importPicture(_:)), for: .touchUpInside)
}
#objc func createAndSave(_ sender: Any) {
let w = imgViewA.frame.width
// create a Green image with transparent background
if let img = drawSystemImage("a.circle.fill", at: 80, centeredIn: CGSize(width: w, height: w)) {
imgViewA.image = img
// save it to Photos in PNG format with transparency
img.saveToPhotos { (success) in
if success {
// image saved to photos
print("saved")
}
else {
// image not saved
fatalError("save failed")
}
}
}
}
// create UIImage from SF Symbol system image
// at Point Size
// centered in CGSize
// will draw symbol in Green on transparent background
private func drawSystemImage(_ sysName: String, at pointSize: CGFloat, centeredIn size: CGSize) -> UIImage? {
let cfg = UIImage.SymbolConfiguration(pointSize: pointSize)
guard let img = UIImage(systemName: sysName, withConfiguration: cfg)?.withTintColor(.green, renderingMode: .alwaysOriginal) else { return nil }
let x = (size.width - img.size.width) * 0.5
let y = (size.height - img.size.height) * 0.5
let renderer = UIGraphicsImageRenderer(size: size)
return renderer.image { context in
img.draw(in: CGRect(origin: CGPoint(x: x, y: y), size: img.size))
}
}
#objc func importPicture(_ sender: Any) {
let picker = UIImagePickerController()
picker.allowsEditing = true
picker.delegate = self
present(picker, animated: true)
}
func imagePickerController(_ inPicker: UIImagePickerController, didFinishPickingMediaWithInfo inInfo: [UIImagePickerController.InfoKey: Any]) {
let info = Dictionary(uniqueKeysWithValues: inInfo.map { key, value in (key.rawValue, value) })
guard let image = (info[UIImagePickerController.InfoKey.editedImage.rawValue] as? UIImage ?? info[UIImagePickerController.InfoKey.originalImage.rawValue] as? UIImage)?.resizeThisImage(toNewWidth: 80) else { return }
// organization?.icon = image
inPicker.dismiss(animated: true) {
DispatchQueue.main.async { [weak self] in
self?.imgViewB.image = image
//self?.imageButton?.image = image
//self?.imageButton?.alpha = 1.0
//self?.imageButton?.tintColor = self?.view.tintColor
//self?.updateUI()
}
}
}
}

Using Vision to scan images from photo library

Is there a way that I can use the Vision framework to scan an existing image from the user's photo library? As in, not taking a new picture using the camera, but just choosing an image that the user already has?
Yes, you can. Adding on to #Zulqarnayn's answer, here's a working example to detect and draw a bounding box on rectangles.
1. Set up the image view where the image will be displayed
#IBOutlet weak var imageView: UIImageView!
#IBAction func pickImage(_ sender: Any) {
let picker = UIImagePickerController()
picker.delegate = self
self.present(picker, animated: true)
}
override func viewDidLoad() {
super.viewDidLoad()
imageView.layer.borderWidth = 4
imageView.layer.borderColor = UIColor.blue.cgColor
imageView.contentMode = .scaleAspectFill
imageView.backgroundColor = UIColor.green.withAlphaComponent(0.3)
imageView.layer.masksToBounds = false /// allow image to overflow, for testing purposes
}
2. Get the image from the image picker
extension ViewController: UIImagePickerControllerDelegate, UINavigationControllerDelegate {
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
guard let image = info[.originalImage] as? UIImage else { return }
/// set the imageView's image
imageView.image = image
/// start the request & request handler
detectCard()
/// dismiss the picker
dismiss(animated: true)
}
}
3. Start the vision request
func detectCard() {
guard let cgImage = imageView.image?.cgImage else { return }
/// perform on background thread, so the main screen is not frozen
DispatchQueue.global(qos: .userInitiated).async {
let request = VNDetectRectanglesRequest { request, error in
/// this function will be called when the Vision request finishes
self.handleDetectedRectangle(request: request, error: error)
}
request.minimumAspectRatio = 0.0
request.maximumAspectRatio = 1.0
request.maximumObservations = 1 /// only look for 1 rectangle
let imageRequestHandler = VNImageRequestHandler(cgImage: cgImage, orientation: .up)
do {
try imageRequestHandler.perform([request])
} catch let error {
print("Error: \(error)")
}
}
}
4. Get the result from the Vision request
func handleDetectedRectangle(request: VNRequest?, error: Error?) {
if let results = request?.results {
if let observation = results.first as? VNRectangleObservation {
/// get back to the main thread
DispatchQueue.main.async {
guard let image = self.imageView.image else { return }
let convertedRect = self.getConvertedRect(
boundingBox: observation.boundingBox,
inImage: image.size,
containedIn: self.imageView.bounds.size
)
self.drawBoundingBox(rect: convertedRect)
}
}
}
}
5. Convert observation.boundingBox to the UIKit coordinates of the image view, then draw a border around the detected rectangle
I explain this more in detail in this answer.
func getConvertedRect(boundingBox: CGRect, inImage imageSize: CGSize, containedIn containerSize: CGSize) -> CGRect {
let rectOfImage: CGRect
let imageAspect = imageSize.width / imageSize.height
let containerAspect = containerSize.width / containerSize.height
if imageAspect > containerAspect { /// image extends left and right
let newImageWidth = containerSize.height * imageAspect /// the width of the overflowing image
let newX = -(newImageWidth - containerSize.width) / 2
rectOfImage = CGRect(x: newX, y: 0, width: newImageWidth, height: containerSize.height)
} else { /// image extends top and bottom
let newImageHeight = containerSize.width * (1 / imageAspect) /// the width of the overflowing image
let newY = -(newImageHeight - containerSize.height) / 2
rectOfImage = CGRect(x: 0, y: newY, width: containerSize.width, height: newImageHeight)
}
let newOriginBoundingBox = CGRect(
x: boundingBox.origin.x,
y: 1 - boundingBox.origin.y - boundingBox.height,
width: boundingBox.width,
height: boundingBox.height
)
var convertedRect = VNImageRectForNormalizedRect(newOriginBoundingBox, Int(rectOfImage.width), Int(rectOfImage.height))
/// add the margins
convertedRect.origin.x += rectOfImage.origin.x
convertedRect.origin.y += rectOfImage.origin.y
return convertedRect
}
/// draw an orange frame around the detected rectangle, on top of the image view
func drawBoundingBox(rect: CGRect) {
let uiView = UIView(frame: rect)
imageView.addSubview(uiView)
uiView.backgroundColor = UIColor.clear
uiView.layer.borderColor = UIColor.orange.cgColor
uiView.layer.borderWidth = 3
}
Result | Demo repo
Input image
Result
Yes, you can. First, take an instance of UIImagePickerController & present it.
let picker = UIImagePickerController()
picker.delegate = self
picker.sourceType = .photoLibrary
present(picker, animated: true, completion: nil)
Then implement the delegate method take the desired image
extension YourViewController: UIImagePickerControllerDelegate {
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
if let pickedImage = info[.originalImage] as? UIImage {
## here start your request & request handler
}
picker.dismiss(animated: true, completion: nil)
}
}

How to find the new center of an UIImageView after scaling?

I am creating an app where the user can add an image to a "canvas" and resize and move the image around using pinch and pan gesture recognizers. The image view is a custom one I created using this article:
Bezier Paths and Gesture Recognizers
This works really nicely. The image resizes and moves very smoothly. I can capture the center and the size of the image after the pan and pinch gestures. The problem is that after I save the size and coordinates of the image it isn't respecting those when I load it back into the "canvas". It is as if the center is offset by "X" number of pixels.
Here is my code for making the resizable and movable image view:
‘’’
import UIKit
import Foundation
class MovableImage: UIImageView {
let size: CGFloat = 150.0
var imageMovedHandler:((_ x: CGFloat, _ y: CGFloat) -> ())?
var imageDeletedHandler:((_ delete: Bool) -> ())?
var longPressHandler:((_ selected: Bool) -> ())?
var imageSizeChangedHandler:((_ newImageView: MovableImage) -> ())?
let deleteButton = UIButton(type: .close)
init(origin: CGPoint) {
super.init(frame: CGRect(x: origin.x, y: origin.y, width: size, height: size)) //
debugCenterDot()
initGestureRecognizers()
}
//added a dot to try and understand what is happening with the "center" of the imageview, but it didn't show in the center of the imageview
func debugCenterDot() {
let dot = UIBezierPath(ovalIn: CGRect(x: self.center.x, y: self.center.y, width: 15, height: 15))
let dotLayer = CAShapeLayer()
dotLayer.path = dot.cgPath
dotLayer.strokeColor = UIColor.yellow.cgColor
self.layer.addSublayer(dotLayer)
self.setNeedsDisplay()
}
internal func addButton() {
deleteButton.tintColor = UIColor.red
deleteButton.backgroundColor = UIColor.white
deleteButton.addTarget(self, action: #selector(deleteSelf(sender:)), for: .touchUpInside)
deleteButton.frame = .zero //CGRect(x: 8, y: 8, width: 15, height: 15)
deleteButton.translatesAutoresizingMaskIntoConstraints = false
self.addSubview(deleteButton)
NSLayoutConstraint.activate([
deleteButton.widthAnchor.constraint(equalToConstant: 15),
deleteButton.widthAnchor.constraint(equalTo: deleteButton.heightAnchor),
deleteButton.leadingAnchor.constraint(equalTo: self.safeAreaLayoutGuide.leadingAnchor, constant: 8),
deleteButton.topAnchor.constraint(equalTo: self.safeAreaLayoutGuide.topAnchor, constant: 8),
])
}
#objc func deleteSelf(sender: UIButton) {
imageDeletedHandler?(true)
self.removeFromSuperview()
}
func initGestureRecognizers() {
let panGR = UIPanGestureRecognizer(target: self, action: #selector(didPan(panGR:)))
addGestureRecognizer(panGR)
let pinchGR = UIPinchGestureRecognizer(target: self, action: #selector(didPinch(pinchGR:)))
addGestureRecognizer(pinchGR)
let longPressGR = UILongPressGestureRecognizer(target: self, action: #selector(didLongPress(longPressGR:)))
longPressGR.minimumPressDuration = 1
addGestureRecognizer(longPressGR)
}
#objc func didLongPress(longPressGR: UILongPressGestureRecognizer) {
self.superview!.bringSubviewToFront(self)
self.layer.borderWidth = 2
self.layer.borderColor = UIColor.red.cgColor
addButton()
longPressHandler?(true)
}
#objc func didPan(panGR: UIPanGestureRecognizer) {
self.superview!.bringSubviewToFront(self)
if self.layer.borderWidth == 2 {
let translation = panGR.translation(in: self)
print("BEFORE PAN: \(self.center)")
self.center.x += translation.x
self.center.y += translation.y
print("AFTER PAN: \(self.center)")
panGR.setTranslation(CGPoint.zero, in: self)
if panGR.state == .ended {
imageMovedHandler?(self.center.x, self.center.y)
self.layer.borderWidth = 0
self.layer.borderColor = nil
self.deleteButton.removeFromSuperview()
}
}
}
#objc func didPinch(pinchGR: UIPinchGestureRecognizer) {
self.superview?.bringSubviewToFront(self)
if self.layer.borderWidth == 2 {
let scale = pinchGR.scale
self.transform = CGAffineTransform(scaleX: scale, y: scale)
if pinchGR.state == .ended {
imageSizeChangedHandler?(self)
}
}
}
func scaleOf(transform: CGAffineTransform) -> CGPoint {
let xscale = sqrt(transform.a * transform.a + transform.c * transform.c)
let yscale = sqrt(transform.b * transform.b + transform.d * transform.d)
return CGPoint(x: xscale, y: yscale)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
‘’’
And here is how I am loading it back into the "canvas" and saving it back to CoreData (save functions in closures near the bottom of the load function):
'''
func loadImages(new: Bool, enableInteraction: Bool) {
let pagePhotos = LoadPagePhotos()
var pageImages: [NSManagedObject] = []
var x: CGFloat?
var y: CGFloat?
var width: CGFloat?
var height: CGFloat?
var image: UIImage?
if isYear {
pageImages = pagePhotos.resetYearPhotosPositions(journalName: journalName, yearPosition: yearPosition)
} else {
pageImages = pagePhotos.resetPhotosPositions(journalName: journalName, monthName: monthName, weekPosition: positionWeek)
}
scrollView.mainView.newImages.forEach { i in
i.removeFromSuperview()
}
scrollView.mainView.newImages.removeAll()
if pageImages.count > 0 {
pageImages.forEach{ (photo) in
x = CGFloat((photo.value(forKey: "pageImageX") as? Float)!)
y = CGFloat((photo.value(forKey: "pageImageY") as? Float)!)
height = CGFloat((photo.value(forKey: "pageImageSizeHeight") as? Float)!)
width = CGFloat((photo.value(forKey: "pageImageSizeWidth") as? Float)!)
image = photo.value(forKey: "image") as? UIImage
let thisImage: MovableImage = MovableImage(origin: CGPoint.zero)
thisImage.contentMode = .scaleAspectFit
thisImage.center = CGPoint(x: x!, y: y!)
thisImage.image = image!
thisImage.frame.size.height = height!
thisImage.frame.size.width = width!
scrollView.mainView.addSubview(thisImage)
scrollView.mainView.newImages.append(thisImage)
if enableInteraction {
thisImage.isUserInteractionEnabled = true
} else {
thisImage.isUserInteractionEnabled = false
}
thisImage.layer.zPosition = 1
thisImage.layer.borderWidth = 0
if new {
imageOptionsMenuView.isHidden = false
} else {
imageOptionsMenuView.isHidden = true
}
movableImage = thisImage
//for clarity sake I moved the save functions to separate block here in stack overflow so it is easier to read
}
}
'''
The closures "imageMovedHandler" and "imageSizeChangedHandler" are used to detect when moving and resizing is done and I save the image to CoreData. Here they are:
'''
if movableImage != nil {
movableImage?.imageMovedHandler = { [unowned self] (x, y) in
if self.isYear {
if self.scrollView.mainView.newImages.count > 0 {
for (idx, i) in self.scrollView.mainView.newImages.enumerated() {
if i.layer.borderWidth == 2 {
let id = idx + 1
_ = LoadPagePhotos().updateYearPhoto(journalName: self.journalName, yearPosition: self.yearPosition, pageImageId: id, imageHeight: Float(i.frame.size.height), imageWidth: Float(i.frame.size.width), imageX: Float(i.center.x), imageY: Float(i.center.y), pagePhoto: i.image!, photoPath: nil)
}
}
}
} else {
if self.scrollView.mainView.newImages.count > 0 {
for (idx, i) in self.scrollView.mainView.newImages.enumerated() {
if i.layer.borderWidth == 2 {
let id = idx + 1
_ = LoadPagePhotos().updatePhoto(journalName: self.journalName, monthName: self.monthName, weekPosition: self.positionWeek, pageImageId: id, imageHeight: Float(i.frame.size.height), imageWidth: Float(i.frame.size.width), imageX: Float(i.center.x), imageY: Float(i.center.y), pagePhoto: i.image!, photoPath: nil)
}
}
}
}
self.loadImages(new: false, enableInteraction: true)
}
movableImage?.imageSizeChangedHandler = { [unowned self] (newImageView) in
var id = 0
var img = 0
if self.isYear {
if self.scrollView.mainView.newImages.count > 0 {
for (idx, i) in self.scrollView.mainView.newImages.enumerated() {
if i.layer.borderWidth == 2 {
id = idx + 1
img = idx
}
}
self.scrollView.mainView.newImages[img] = newImageView
_ = LoadPagePhotos().updateYearPhoto(journalName: self.journalName, yearPosition: self.yearPosition, pageImageId: id, imageHeight: Float(newImageView.frame.size.height), imageWidth: Float(newImageView.frame.size.width), imageX: Float(newImageView.center.x), imageY: Float(newImageView.center.y), pagePhoto: newImageView.image!, photoPath: nil)
}
} else {
if self.scrollView.mainView.newImages.count > 0 {
for (idx, i) in self.scrollView.mainView.newImages.enumerated() {
if i.layer.borderWidth == 2 {
id = idx + 1
img = idx
}
}
self.scrollView.mainView.newImages[img] = newImageView
_ = LoadPagePhotos().updatePhoto(journalName: self.journalName, monthName: self.monthName, weekPosition: self.positionWeek, pageImageId: id, imageHeight: Float(newImageView.frame.size.height), imageWidth: Float(newImageView.frame.size.width), imageX: Float(newImageView.center.x), imageY: Float(newImageView.center.y), pagePhoto: newImageView.image!, photoPath: nil)
}
}
self.loadImages(new: false, enableInteraction: true)
}
}
}
'''
Here is an image of what is happening when I move the image around the canvas:
This first image shows where I stopped moving the image to:
This image shows where the image was loaded after saving the size and coordinates:
The desired outcome is:
When pinching, panning, and saving the image and then loading, the image retains its current coordinates and size in the canvas.
EDIT:
It should also be noted that the offset of the image when moving it only happens after "scaling

Resources