iOS How to Save Image from UIView without lose quality of image - ios

I have images that are drawn in a UIView, then lines are drawn on them. How can I can save the image with the painted lines to a UIImage object with the same image size it began with, and without losing any image quality?
ie. I have an image of size (3264, 2448).
That is drawn in a UIView (size 375, 281) that AspectFit with the image,
then a line is painted on the image. Finally, how can I save the image from the UIView to a UIImage with size (3264, 2448) without losing image quality?
If this is not the best approach, please recommend a better way to accomplish this.
class DrawingView: UIImageView {
private var pts = [CGPoint](count: 5, repeatedValue: CGPoint())
private var ctr: uint!
var lineWidth: CGFloat = 4.0
var lineColor: UIColor = UIColor.darkGrayColor()
required init(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
init(frame: CGRect, image: UIImage) {
super.init(frame: frame)
self.image = image
beginDrawingView()
}
private func beginDrawingView() {
userInteractionEnabled = true
}
override func touchesBegan(touches: Set<NSObject>, withEvent event: UIEvent) {
ctr = 0
if let touch = touches.first as? UITouch {
pts[0] = touch.locationInView(self) as CGPoint
}
}
override func touchesMoved(touches: Set<NSObject>, withEvent event: UIEvent) {
if let touch = touches.first as? UITouch {
let p: CGPoint = touch.locationInView(self)
ctr = ctr + 1
pts[Int(ctr)] = p
if ctr == 4 {
pts[3] = CGPointMake((pts[2].x + pts[4].x)/2.0, (pts[2].y + pts[4].y)/2.0);
UIGraphicsBeginImageContextWithOptions(bounds.size, false, 0.0)
let context = UIGraphicsGetCurrentContext()
image!.drawInRect(CGRect(x: 0, y: 0, width: bounds.width, height: bounds.height))
CGContextSaveGState(context)
CGContextSetShouldAntialias(context, true)
CGContextSetLineCap(context, kCGLineCapRound)
CGContextSetLineWidth(context, lineWidth)
CGContextSetStrokeColorWithColor(context, lineColor.CGColor)
let path = CGPathCreateMutable()
CGPathMoveToPoint(path, nil, pts[0].x, pts[0].y)
CGPathAddCurveToPoint(path, nil, pts[1].x, pts[1].y, pts[2].x, pts[2].y, pts[3].x, pts[3].y)
CGContextSetBlendMode(context, kCGBlendModeNormal)
CGContextAddPath(context, path)
CGContextStrokePath(context)
image = UIGraphicsGetImageFromCurrentImageContext()
CGContextRestoreGState(context)
UIGraphicsEndImageContext()
pts[0] = pts[3]
pts[1] = pts[4]
ctr = 1
}
}
}
}

I'm not sure if this is exactly what you are looking for, but if you already have a UIImage object, you can just save it as a Base64String. Here is some code that I use for this (you'll notice that I also perform compression on any JPG images because I'm saving the string to a database, but of course you wouldn't need that component since you don't want to lose image quality):
//Convert the image to Base64String with optional JPG compression
//
-(NSString*)convertImageToBase64String:(UIImage*)image withImageType:(NSString*)imageType
{
NSData* data;
if ([imageType isEqualToString:#"PNG"]) {
data = UIImagePNGRepresentation(image);
} else if ([imageType isEqualToString:#"JPG"] || [imageType isEqualToString:#"JPEG"]) {
data = UIImageJPEGRepresentation(image, 0.9);
}
return [data base64EncodedStringWithOptions:NSDataBase64Encoding64CharacterLineLength];
}
If you don't have the UIImage object yet, though, you could get it by taking a screenshot like this:
+ (UIImage *) imageWithView:(UIView *)view
{
UIGraphicsBeginImageContextWithOptions(view.bounds.size, view.opaque, 0.0);
[view.layer renderInContext:UIGraphicsGetCurrentContext()];
UIImage * img = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return img;
}
I haven't tested the last block of code, but I suspect it will give you a screenshot of the ENTIRE screen, so you would need to the view.bounds.size component to get the exact area you are looking for. Hope that helps!

Related

CIImage display MTKView vs GLKView performance

I have a series of UI Images (made from incoming jpeg Data from server) that I wish to render using MTKView. Problem is it is too slow compared to GLKView. There is lot of buffering and delay when I have a series of images to display in MTKView but no delay in GLKView.
Here is MTKView display code:
private lazy var context: CIContext = {
return CIContext(mtlDevice: self.device!, options: [CIContextOption.workingColorSpace : NSNull()])
}()
var ciImg: CIImage? {
didSet {
syncQueue.sync {
internalCoreImage = ciImg
}
}
}
func displayCoreImage(_ ciImage: CIImage) {
self.ciImg = ciImage
}
override func draw(_ rect: CGRect) {
var ciImage: CIImage?
syncQueue.sync {
ciImage = internalCoreImage
}
drawCIImage(ciImg)
}
func drawCIImage(_ ciImage:CIImage?) {
guard let image = ciImage,
let currentDrawable = currentDrawable,
let commandBuffer = commandQueue?.makeCommandBuffer()
else {
return
}
let currentTexture = currentDrawable.texture
let drawingBounds = CGRect(origin: .zero, size: drawableSize)
let scaleX = drawableSize.width / image.extent.width
let scaleY = drawableSize.height / image.extent.height
let scaledImage = image.transformed(by: CGAffineTransform(scaleX: scaleX, y: scaleY))
context.render(scaledImage, to: currentTexture, commandBuffer: commandBuffer, bounds: drawingBounds, colorSpace: CGColorSpaceCreateDeviceRGB())
commandBuffer.present(currentDrawable)
commandBuffer.commit()
}
And here is code for GLKView which is lag free and fast:
private var videoPreviewView:GLKView!
private var eaglContext:EAGLContext!
private var context:CIContext!
override init(frame: CGRect) {
super.init(frame: frame)
initCommon()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
initCommon()
}
func initCommon() {
eaglContext = EAGLContext(api: .openGLES3)!
videoPreviewView = GLKView(frame: self.bounds, context: eaglContext)
context = CIContext(eaglContext: eaglContext, options: nil)
self.addSubview(videoPreviewView)
videoPreviewView.bindDrawable()
videoPreviewView.clipsToBounds = true
videoPreviewView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
}
func displayCoreImage(_ ciImage: CIImage) {
let sourceExtent = ciImage.extent
let sourceAspect = sourceExtent.size.width / sourceExtent.size.height
let videoPreviewWidth = CGFloat(videoPreviewView.drawableWidth)
let videoPreviewHeight = CGFloat(videoPreviewView.drawableHeight)
let previewAspect = videoPreviewWidth/videoPreviewHeight
// we want to maintain the aspect radio of the screen size, so we clip the video image
var drawRect = sourceExtent
if sourceAspect > previewAspect
{
// use full height of the video image, and center crop the width
drawRect.origin.x = drawRect.origin.x + (drawRect.size.width - drawRect.size.height * previewAspect) / 2.0
drawRect.size.width = drawRect.size.height * previewAspect
}
else
{
// use full width of the video image, and center crop the height
drawRect.origin.y = drawRect.origin.y + (drawRect.size.height - drawRect.size.width / previewAspect) / 2.0
drawRect.size.height = drawRect.size.width / previewAspect
}
var videoRect = CGRect(x: 0, y: 0, width: videoPreviewWidth, height: videoPreviewHeight)
if sourceAspect < previewAspect
{
// use full height of the video image, and center crop the width
videoRect.origin.x += (videoRect.size.width - videoRect.size.height * sourceAspect) / 2.0;
videoRect.size.width = videoRect.size.height * sourceAspect;
}
else
{
// use full width of the video image, and center crop the height
videoRect.origin.y += (videoRect.size.height - videoRect.size.width / sourceAspect) / 2.0;
videoRect.size.height = videoRect.size.width / sourceAspect;
}
videoPreviewView.bindDrawable()
if eaglContext != EAGLContext.current() {
EAGLContext.setCurrent(eaglContext)
}
// clear eagl view to black
glClearColor(0, 0, 0, 1)
glClear(GLbitfield(GL_COLOR_BUFFER_BIT))
glEnable(GLenum(GL_BLEND))
glBlendFunc(GLenum(GL_ONE), GLenum(GL_ONE_MINUS_SRC_ALPHA))
context.draw(ciImage, in: videoRect, from: sourceExtent)
videoPreviewView.display()
}
I really want to find out where is bottleneck in Metal code. Is Metal not capable of displaying 640x360 UIImages 20 times per second?
EDIT: Setting colorPixelFormat of MTKView to rgba16Float solves the delay issue, but the reproduced colors are not accurate. So seems like colorspace conversion issue with core image. But how does GLKView renders so fast delay but not MTKView?
EDIT2: Setting colorPixelFormat of MTKView to bgra_xr10 mostly solves the delay issue. But the problem is we can not use CIRenderDestination API with this pixel color format.
Still wondering how GLKView/CIContext render the images so quickly without any delay but in MTKView we need to set colorPixelFormat to bgra_xr10 for increasing performance. And settings bgra_xr10 on iPad Mini 2 causes a crash:
-[MTLRenderPipelineDescriptorInternal validateWithDevice:], line 2590: error 'pixelFormat, for color render target(0), is not a valid MTLPixelFormat.

Drawing on UIImageView within UIScrollView

About my app: The user can view a PDF file within a UIWebView. I have an option for the user to choose whether they want to scroll through the pdf or take notes on it. When they take notes, the scrolling is disabled, and vice-versa. However, when the user is drawing, the lines move up and become hazy as shown:
(The red boxes are text within the pdf)
Here is my code:
Switching between the pen and scroll:
var usingPen = false
#IBAction func usePen(sender: AnyObject) {
usingPen = true
webView.userInteractionEnabled = false
UIView.animateWithDuration(0.3) { () -> Void in
self.popUpView.alpha = 0
}
}
#IBAction func useScroll(sender: AnyObject) {
usingPen = false
webView.userInteractionEnabled = true
UIView.animateWithDuration(0.3) { () -> Void in
self.popUpView.alpha = 0
}
}
The imageView the user draws on (objectView):
var objectView = UIImageView()
override func viewDidAppear(animated: Bool) {
objectView.frame.size = webView.scrollView.contentSize
webView.scrollView.addSubview(objectView)
}
Drawing on the image view:
var start = CGPoint()
let size: CGFloat = 3
var color = UIColor.blackColor()
func draw(start: CGPoint, end: CGPoint) {
if usingPen == true {
UIGraphicsBeginImageContext(self.objectView.frame.size)
let context = UIGraphicsGetCurrentContext()
objectView.image?.drawInRect(CGRect(x: 0, y: 0, width: objectView.frame.width, height: objectView.frame.height))
CGContextSetFillColorWithColor(context, color.CGColor)
CGContextSetStrokeColorWithColor(context, color.CGColor)
CGContextSetLineWidth(context, size)
CGContextBeginPath(context)
CGContextMoveToPoint(context, start.x, start.y)
CGContextAddLineToPoint(context, end.x, end.y)
CGContextStrokePath(context)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
objectView.image = newImage
}
}
override func touchesBegan(touches: Set<UITouch>, withEvent event: UIEvent?) {
start = (touches.first?.locationInView(self.objectView))!
}
override func touchesMoved(touches: Set<UITouch>, withEvent event: UIEvent?) {
draw(start, end: (touches.first?.locationInView(self.objectView))!)
start = (touches.first?.locationInView(self.objectView))!
}
How can I prevent the haziness and movement of the drawings? Thanks for your help.
This probably occurs due to an image scaling issue. Since you're drawing into an image with a scale factor of 1 (which is the default) and your screen has a scale factor of 2 or 3, lines will continue to blur slightly every time they're copied and drawn. The solution is to specify the screen's scale when you create your image context:
UIGraphicsBeginImageContextWithOptions(self.objectView.frame.size, false, UIScreen.mainScreen().scale)
Note that the way you're drawing the line is fairly inefficient. Instead, you probably want to create a CGBitmapContext and continually draw to that; it'd be much faster and would also eliminate the "generation loss" problem you have.

Draw a shape over a high res image?

I want to be able to draw a line over a high resolution photo (e.g. 8megapixel image) in a specific place.
That is a simple enough thing, and there are many posts about that already but my problem is that the CGContext "drawing space" doesn't seem to be the same as the high res image.
I can draw the line and save an image, but my problem is with drawing the line in a specific location. My coordinate spaces seem to be different than each other. I think there must be a scale factor that I am missing or my understanding is just messed up.
So my question is:
How do I draw on to a image, that is "aspect fit" to the screen (but is much higher resolution) and have the drawing (in this case a line) be in the same position on the screen and the final full resolution composited image?
example image:
The red line is the line I am drawing. It should go from the center of the start target (theTarget) to the center of the end target (theEnd).
I have simplified my drawing function down for posting here, but i suspect it is my whole thinking/approach that is wrong.
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var theTarget: UIImageView!
#IBOutlet weak var theEnd: UIImageView!
var lineColor = UIColor.redColor()
var targetPos : CGPoint!
var endPos : CGPoint!
var originalImage : UIImage!
override func viewDidLoad() {
super.viewDidLoad()
imageView.image = UIImage(named: "reference.jpg")
originalImage = imageView.image
drawTheLine()
}
func drawTheLine () {
UIGraphicsBeginImageContext(originalImage!.size);
// Draw the original image as the background
originalImage?.drawAtPoint(CGPointMake(0, 0))
// Pass 2: Draw the line on top of original image
let context = UIGraphicsGetCurrentContext();
CGContextSetLineWidth(context, 10.0);
targetPos = theTarget.frame.origin
endPos = theEnd.frame.origin
CGContextMoveToPoint(context, targetPos.x, targetPos.y);
CGContextAddLineToPoint(context, endPos.x, endPos.y)
CGContextSetStrokeColorWithColor(context, lineColor.CGColor)
CGContextStrokePath(context);
imageView.image = UIGraphicsGetImageFromCurrentImageContext()
}
#IBAction func saveButton(sender: AnyObject) {
// Create new image
let newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
UIImageWriteToSavedPhotosAlbum(newImage!, nil, nil, nil )
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
#IBAction func handlePan(recognizer:UIPanGestureRecognizer) {
let translation = recognizer.translationInView(self.view)
if let view = recognizer.view {
view.center = CGPoint(x:view.center.x + translation.x,
y:view.center.y + translation.y)
}
recognizer.setTranslation(CGPointZero, inView: self.view)
//redraw the line
drawTheLine()
print("the start pos of the line is: ", theTarget.frame.origin, " and end pos is: ", theEnd.frame.origin)
}
}
I had this exact problem a while ago, so I wrote a UIImageView extension that maps the image view's coordinates into the image coordinates, when the fill mode is .ScaleAspectFit.
extension UIImageView {
func pointInAspectScaleFitImageCoordinates(point:CGPoint) -> CGPoint {
if let img = image {
let imageSize = img.size
let imageViewSize = frame.size
let imgRatio = imageSize.width/imageSize.height // The ratio of the image before scaling.
let imgViewRatio = imageViewSize.width/imageViewSize.height // The ratio of the image view
let ratio = (imgRatio > imgViewRatio) ? imageSize.width/imageViewSize.width:imageSize.height/imageViewSize.height // The ratio of the image before scaling to after scaling.
let xOffset = (imageSize.width-(imageViewSize.width*ratio))*0.5 // The x-offset of the image on-screen (as it gets centered)
let yOffset = (imageSize.height-(imageViewSize.height*ratio))*0.5 // The y-offset of the image on-screen (as it gets centered)
let subImgOrigin = CGPoint(x: point.x*ratio, y: point.y*ratio); // The origin of the image (relative to the origin of the view)
return CGPoint(x: subImgOrigin.x+xOffset, y: subImgOrigin.y+yOffset);
}
return CGPointZero
}
}
You should be able to use this in your drawTheLine function quite easily:
func drawTheLine () {
UIGraphicsBeginImageContext(originalImage!.size);
// Draw the original image as the background
originalImage?.drawAtPoint(CGPointMake(0, 0))
// Pass 2: Draw the line on top of original image
let context = UIGraphicsGetCurrentContext();
CGContextSetLineWidth(context, 10.0);
targetPos = imageView.pointInAspectScaleFitImageCoordinates(theTarget.frame.origin)
endPos = imageView.pointInAspectScaleFitImageCoordinates(theEnd.frame.origin)
CGContextMoveToPoint(context, targetPos.x, targetPos.y);
CGContextAddLineToPoint(context, endPos.x, endPos.y)
CGContextSetStrokeColorWithColor(context, lineColor.CGColor)
CGContextStrokePath(context);
imageView.image = UIGraphicsGetImageFromCurrentImageContext()
}

CGBitmapContextCreate: unsupported parameter combination. How to pass kCGImageAlphaNoneSkipFirst [duplicate]

This question already has answers here:
kCGImageAlphaNone unresolved identifier in swift
(2 answers)
Closed 7 years ago.
I originally wrote this App (GitHub) in Obj-C, but need to convert it to Swift. Upon converting I've been having trouble getting the Context for the Bitmap created.
Error Message:
Whiteboard[2833] <Error>: CGBitmapContextCreate: unsupported parameter combination: 8 integer bits/component; 24 bits/pixel; 3-component color space; kCGImageAlphaNone; 1500 bytes/row.
Originally I had this:
self.cacheContext = CGBitmapContextCreate (self.cacheBitmap, size.width, size.height, 8, bitmapBytesPerRow, CGColorSpaceCreateDeviceRGB(), kCGImageAlphaNoneSkipFirst);
And now I have:
self.cacheContext = CGBitmapContextCreate(self.cacheBitmap!, UInt(size.width), UInt(size.height), 8, bitmapBytesPerRow, CGColorSpaceCreateDeviceRGB(), CGBitmapInfo.ByteOrder32Little);
I believe the issue has to do with CGBitmapInfo.ByteOrder32Little, but I'm not sure what to pass. Is there a way to pass kCGImageAlphaNoneSkipFirst as a CGBitmapInfo?
Full Source:
//
// WhiteBoard.swift
// Whiteboard
//
import Foundation
import UIKit
class WhiteBoard: UIView {
var hue: CGFloat
var cacheBitmap: UnsafeMutablePointer<Void>?
var cacheContext: CGContextRef?
override init(frame: CGRect) {
self.hue = 0.0;
// Create a UIView with the size of the parent view
super.init(frame: frame);
// Initialize the Cache Context of the bitmap
self.initContext(frame);
// Set the background color of the view to be White
self.backgroundColor = UIColor.whiteColor();
// Add a Save Button to the bottom right corner of the screen
let buttonFrame = CGRectMake(frame.size.width - 50, frame.size.height - 30, 40, 25);
let button = UIButton();
button.frame = buttonFrame;
button.setTitle("Save", forState: .Normal);
button.setTitleColor(UIColor.blueColor(), forState: .Normal);
button.addTarget(self, action: "downloadImage", forControlEvents: .TouchUpInside);
// Add the button to the view
self.addSubview(button);
}
required init(coder aDecoder: NSCoder) {
self.hue = 0.0;
super.init(coder: aDecoder)
}
func initContext(frame: CGRect)-> Bool {
let size = frame.size; // Get the size of the UIView
var bitmapByteCount: UInt!
var bitmapBytesPerRow: UInt!
// Calculate the number of bytes per row. 4 bytes per pixel: red, green, blue, alpha
bitmapBytesPerRow = UInt(size.width * 4);
// Total Bytes in the bitmap
bitmapByteCount = UInt(CGFloat(bitmapBytesPerRow) * size.height);
// Allocate memory for image data. This is the destination in memory where any
// drawing to the bitmap context will be rendered
self.cacheBitmap = malloc(bitmapByteCount);
// Create the Cache Context from the Bitmap
self.cacheContext = CGBitmapContextCreate(self.cacheBitmap!, UInt(size.width), UInt(size.height), 8, bitmapBytesPerRow, CGColorSpaceCreateDeviceRGB(), CGBitmapInfo.ByteOrder32Little);
// Set the background as white
CGContextSetRGBFillColor(self.cacheContext, 1.0, 1.0, 1.0, 1.0);
CGContextFillRect(self.cacheContext, frame);
CGContextSaveGState(self.cacheContext);
return true;
}
// Fired everytime a touch event is dragged
override func touchesMoved(touches: NSSet, withEvent event: UIEvent) {
let touch = touches.anyObject() as UITouch;
self.drawToCache(touch);
}
// Draw the new touch event to the cached Bitmap
func drawToCache(touch: UITouch) {
self.hue += 0.005;
if(self.hue > 1.0) {
self.hue = 0.0;
}
// Create a color object of the line color
let color = UIColor(hue: CGFloat(self.hue), saturation: CGFloat(0.7), brightness: CGFloat(1.0), alpha: CGFloat(1.0));
// Set the line size, type, and color
CGContextSetStrokeColorWithColor(self.cacheContext, color.CGColor);
CGContextSetLineCap(self.cacheContext, kCGLineCapRound);
CGContextSetLineWidth(self.cacheContext, CGFloat(15));
// Get the current and last touch point
let lastPoint = touch.previousLocationInView(self) as CGPoint;
let newPoint = touch.locationInView(self) as CGPoint;
// Draw the line
CGContextMoveToPoint(self.cacheContext, lastPoint.x, lastPoint.y);
CGContextAddLineToPoint(self.cacheContext, newPoint.x, newPoint.y);
CGContextStrokePath(self.cacheContext);
// Calculate the dirty pixels that needs to be updated
let dirtyPoint1 = CGRectMake(lastPoint.x-10, lastPoint.y-10, 20, 20);
let dirtyPoint2 = CGRectMake(newPoint.x-10, newPoint.y-10, 20, 20);
self.setNeedsDisplay();
// Only update the dirty pixels to improve performance
//self.setNeedsDisplayInRect(dirtyPoint1);
//self.setNeedsDisplayInRect(dirtyPoint2);
}
// Draw the cachedBitmap to the UIView
override func drawRect(rect: CGRect) {
// Get the current Graphics Context
let context = UIGraphicsGetCurrentContext();
// Get the Image to draw
let cacheImage = CGBitmapContextCreateImage(self.cacheContext);
// Draw the ImageContext to the screen
CGContextDrawImage(context, self.bounds, cacheImage);
}
// Download the image to the camera roll
func downloadImage() {
// Get the Image from the CGContext
let image = UIImage(CGImage: CGBitmapContextCreateImage(self.cacheContext));
// Save the Image to their Camera Roll
UIImageWriteToSavedPhotosAlbum(image, self, "image:didFinishSavingWithError:contextInfo:", nil);
}
func image(image: UIImage, didFinishSavingWithError error: NSError, contextInfo: UnsafeMutablePointer<Void>) {
if(!error.localizedDescription.isEmpty) {
UIAlertView(title: "Error", message: "Error Saving Photo", delegate: nil, cancelButtonTitle: "Ok").show();
}
}
}
In Objective-C, you would simply cast to the other enum type, like this:
(CGBitmapInfo)kCGImageAlphaNoneSkipFirst
In Swift, you have to do it like this:
CGBitmapInfo(CGImageAlphaInfo.NoneSkipFirst.rawValue)
Welcome to the wild and wacky world of Swift numerics. You have to pull the numeric value out of the original CGImageAlphaInfo enumeration with rawValue; now you can use that numeric value in the initializer of the CGBitmapInfo enumeration.
EDIT It's much simpler in iOS 9 / Swift 2.0, where you can just pass CGImageAlphaInfo.NoneSkipFirst.rawValue directly into CGBitmapContextCreate, which now just expects an integer at this spot.

Allowing users to draw rect on a UIImage, with the intention of cropping the image

I'm sure this has been asked a number of times from various different perspectives, but I'm unable to find an answer on here as yet.
What I want to achieve
What I would like to do is to display a UIImage, and allow the user to draw a rectangle on the image, and eventually crop their selection.
Research so far
I've found previous questions here on SO that handle the cropping, however they often deal with static cropping areas that don't change, this does lead to the following constraints of such mechanism
The area of the image you're interested in may be positioned
anywhere, for example if you're trying to crop a road sign, it may
be centered on one image, but aligned left on another, therefore you
can't predict which area to crop.
The size and scale of the interested area may change, for example
one image may be a close up of that road sign so the cropping area
would be larger, but another image may have been taken from a
distance meaning the cropping area would be smaller.
With the combination of the above two variables, its almost impossible to accurately predict where the area of interest in the image would be, so I'm relying on the user to define this by being able to "draw" a box around the area we're interested in, in this case, a road sign.
This is all peachy on Android, since you can delegate all the hard work out with a nice intent, such as :
Intent intent = new Intent("com.android.camera.action.CROP");
However, I can't find an equivalent for iOS.
I've found this bit of code from this source :
- (UIImage *)imageByDrawingCircleOnImage:(UIImage *)image
{
// begin a graphics context of sufficient size
UIGraphicsBeginImageContext(image.size);
// draw original image into the context
[image drawAtPoint:CGPointZero];
// get the context for CoreGraphics
CGContextRef ctx = UIGraphicsGetCurrentContext();
// set stroking color and draw circle
[[UIColor redColor] setStroke];
// make circle rect 5 px from border
CGRect circleRect = CGRectMake(0, 0,
image.size.width,
image.size.height);
circleRect = CGRectInset(circleRect, 5, 5);
// draw circle
CGContextStrokeEllipseInRect(ctx, circleRect);
// make image out of bitmap context
UIImage *retImage = UIGraphicsGetImageFromCurrentImageContext();
// free the context
UIGraphicsEndImageContext();
return retImage;
}
Which I believe is a good starting point for cropping (it does crop circles however), it does rely on predefining the area you want to crop when calling CGRectMake.
This previous question also details how to do the actual cropping.
I'm assuming that to allow the user to draw the rect, I'd need to integrate with Gestures?
The Question :
How can I allow the user, to draw a rect over an image view, with the intent of cropping that area?
You could give BJImageCropper a try:
A simple UIView subclass that allows a user to crop an image. If you use it, I'd love to know! Twitter: #barrettjacobsen
This post is already 5 years old, but future references, this is how I managed to get it done. Following code is a combination of Rob's answer and some image cropping
Xcode 9 and Swift 4 are being used here
Add 2 ViewControllers
Add ImageView and 2 buttons for first view controller and another image view for last view controller
link all views to source file
View controller
import UIKit
extension UIView {
func snapshot(afterScreenUpdates: Bool = false) -> UIImage {
UIGraphicsBeginImageContextWithOptions(bounds.size, isOpaque, 0)
drawHierarchy(in: bounds, afterScreenUpdates: afterScreenUpdates)
let image = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
}
extension UIImage {
func crop( rect: CGRect) -> UIImage {
var rect = rect
rect.origin.x*=self.scale
rect.origin.y*=self.scale
rect.size.width*=self.scale
rect.size.height*=self.scale
let imageRef = self.cgImage!.cropping(to: rect)
let image = UIImage(cgImage: imageRef!, scale: self.scale, orientation: self.imageOrientation)
return image
}
}
class ViewController: UIViewController {
var rec: CGRect!
var cropImage: UIImage!
#IBOutlet weak var imageView: UIImageView!
private let shapeLayer: CAShapeLayer = {
let _shapeLayer = CAShapeLayer()
_shapeLayer.fillColor = UIColor.clear.cgColor
_shapeLayer.strokeColor = UIColor.green.cgColor
_shapeLayer.lineWidth = 2
return _shapeLayer
}()
private var startPoint: CGPoint!
override func viewDidLoad() {
super.viewDidLoad()
imageView.layer.addSublayer(shapeLayer)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
clear()
startPoint = touches.first?.location(in: imageView)
}
func clear() {
imageView.layer.sublayers = nil
imageView.image = UIImage(named: "aa")
imageView.layer.addSublayer(shapeLayer)
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let startPoint = startPoint, let touch = touches.first else { return }
let point: CGPoint
if let predictedTouch = event?.predictedTouches(for: touch)?.last {
point = predictedTouch.location(in: imageView)
} else {
point = touch.location(in: imageView)
}
updatePath(from: startPoint, to: point)
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let startPoint = startPoint, let touch = touches.first else { return }
let point = touch.location(in: imageView)
updatePath(from: startPoint, to: point)
imageView.image = imageView.snapshot(afterScreenUpdates: true)
shapeLayer.path = nil
}
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent?) {
shapeLayer.path = nil
}
private func updatePath(from startPoint: CGPoint, to point: CGPoint) {
let size = CGSize(width: point.x - startPoint.x, height: point.y - startPoint.y)
rec = CGRect(origin: startPoint, size: size)
shapeLayer.path = UIBezierPath(rect: rec).cgPath
}
#IBAction func btnTapped(_ sender: Any) {
clear()
}
#IBAction func btnCropTapped(_ sender: Any) {
let newRec = CGRect(origin: CGPoint(x: rec.origin.x, y: rec.origin.y), size: rec.size)
cropImage = imageView.image?.crop(rect: newRec)
print(rec)
print(newRec)
self.performSegue(withIdentifier: "toImage", sender: nil)
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "toImage" {
if let destination = segue.destination as? ImageViewController {
destination.croppedImage = cropImage
}
}
}
}

Resources