Draw a shape over a high res image? - ios

I want to be able to draw a line over a high resolution photo (e.g. 8megapixel image) in a specific place.
That is a simple enough thing, and there are many posts about that already but my problem is that the CGContext "drawing space" doesn't seem to be the same as the high res image.
I can draw the line and save an image, but my problem is with drawing the line in a specific location. My coordinate spaces seem to be different than each other. I think there must be a scale factor that I am missing or my understanding is just messed up.
So my question is:
How do I draw on to a image, that is "aspect fit" to the screen (but is much higher resolution) and have the drawing (in this case a line) be in the same position on the screen and the final full resolution composited image?
example image:
The red line is the line I am drawing. It should go from the center of the start target (theTarget) to the center of the end target (theEnd).
I have simplified my drawing function down for posting here, but i suspect it is my whole thinking/approach that is wrong.
import UIKit
class ViewController: UIViewController {
#IBOutlet weak var imageView: UIImageView!
#IBOutlet weak var theTarget: UIImageView!
#IBOutlet weak var theEnd: UIImageView!
var lineColor = UIColor.redColor()
var targetPos : CGPoint!
var endPos : CGPoint!
var originalImage : UIImage!
override func viewDidLoad() {
super.viewDidLoad()
imageView.image = UIImage(named: "reference.jpg")
originalImage = imageView.image
drawTheLine()
}
func drawTheLine () {
UIGraphicsBeginImageContext(originalImage!.size);
// Draw the original image as the background
originalImage?.drawAtPoint(CGPointMake(0, 0))
// Pass 2: Draw the line on top of original image
let context = UIGraphicsGetCurrentContext();
CGContextSetLineWidth(context, 10.0);
targetPos = theTarget.frame.origin
endPos = theEnd.frame.origin
CGContextMoveToPoint(context, targetPos.x, targetPos.y);
CGContextAddLineToPoint(context, endPos.x, endPos.y)
CGContextSetStrokeColorWithColor(context, lineColor.CGColor)
CGContextStrokePath(context);
imageView.image = UIGraphicsGetImageFromCurrentImageContext()
}
#IBAction func saveButton(sender: AnyObject) {
// Create new image
let newImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
UIImageWriteToSavedPhotosAlbum(newImage!, nil, nil, nil )
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
#IBAction func handlePan(recognizer:UIPanGestureRecognizer) {
let translation = recognizer.translationInView(self.view)
if let view = recognizer.view {
view.center = CGPoint(x:view.center.x + translation.x,
y:view.center.y + translation.y)
}
recognizer.setTranslation(CGPointZero, inView: self.view)
//redraw the line
drawTheLine()
print("the start pos of the line is: ", theTarget.frame.origin, " and end pos is: ", theEnd.frame.origin)
}
}

I had this exact problem a while ago, so I wrote a UIImageView extension that maps the image view's coordinates into the image coordinates, when the fill mode is .ScaleAspectFit.
extension UIImageView {
func pointInAspectScaleFitImageCoordinates(point:CGPoint) -> CGPoint {
if let img = image {
let imageSize = img.size
let imageViewSize = frame.size
let imgRatio = imageSize.width/imageSize.height // The ratio of the image before scaling.
let imgViewRatio = imageViewSize.width/imageViewSize.height // The ratio of the image view
let ratio = (imgRatio > imgViewRatio) ? imageSize.width/imageViewSize.width:imageSize.height/imageViewSize.height // The ratio of the image before scaling to after scaling.
let xOffset = (imageSize.width-(imageViewSize.width*ratio))*0.5 // The x-offset of the image on-screen (as it gets centered)
let yOffset = (imageSize.height-(imageViewSize.height*ratio))*0.5 // The y-offset of the image on-screen (as it gets centered)
let subImgOrigin = CGPoint(x: point.x*ratio, y: point.y*ratio); // The origin of the image (relative to the origin of the view)
return CGPoint(x: subImgOrigin.x+xOffset, y: subImgOrigin.y+yOffset);
}
return CGPointZero
}
}
You should be able to use this in your drawTheLine function quite easily:
func drawTheLine () {
UIGraphicsBeginImageContext(originalImage!.size);
// Draw the original image as the background
originalImage?.drawAtPoint(CGPointMake(0, 0))
// Pass 2: Draw the line on top of original image
let context = UIGraphicsGetCurrentContext();
CGContextSetLineWidth(context, 10.0);
targetPos = imageView.pointInAspectScaleFitImageCoordinates(theTarget.frame.origin)
endPos = imageView.pointInAspectScaleFitImageCoordinates(theEnd.frame.origin)
CGContextMoveToPoint(context, targetPos.x, targetPos.y);
CGContextAddLineToPoint(context, endPos.x, endPos.y)
CGContextSetStrokeColorWithColor(context, lineColor.CGColor)
CGContextStrokePath(context);
imageView.image = UIGraphicsGetImageFromCurrentImageContext()
}

Related

Crop Image from Camera in Swift without move to another ViewController

I have an image overlay inside CameraViewController:
I want to get the image from inside this red square.
I don't want to move to another view controller to setup a CropViewController, the crop should be done inside this Controller.
This code behind almost works, the problem is that the image generated from camera is 1080x1920 and the self.cropView.bounds is (0,0,185,120) and of course it do not represent the same scale used to take the image
extension UIImage {
func crop(rect: CGRect) -> UIImage {
var rect = rect
rect.origin.x*=self.scale
rect.origin.y*=self.scale
rect.size.width*=self.scale
rect.size.height*=self.scale
let imageRef = self.cgImage!.cropping(to: rect)
let image = UIImage(cgImage: imageRef!, scale: self.scale, orientation: self.imageOrientation)
return image
}
}
You can always crop visually any image in a quadrilateral (a four sided shape - doesn't have to be rectangle) using a Core Image filter call CIPerspectiveCorrection.
Let's say you have an imageView frame that is 414 width by 716 height, with an image that is 1600 width by 900 height in size. (You are using a content mode of .aspectFit, right?) Let's say you want to crop a 4 sided shape that's corners - in (X,Y) coordinates in the imageView - are (50,50), (75,75), (100,300), and (25,200). Note that I'm listing the points in top left (TL, top right (TR), bottom right (BR), bottom left (BL) order. Also note that this is not a straight forward rectangle.
What you need to do is this:
Convert the UIImage to a CIImage where the "extent" is the UIImage size,
Convert those UIImageView coordinates to CIImage coordinates,
pass them and the CIImage into the CIPerspectiveCorrection filter for cropping, and
render the CIImage output into a UIImageView.
The below code is a little rough around the edges, but hopefully you get the concept:
class ViewController: UIViewController {
let uiTL = CGPoint(x: 50, y: 50)
let uiTR = CGPoint(x: 75, y: 75)
let uiBL = CGPoint(x: 100, y: 300)
let uiBR = CGPoint(x: 25, y: 200)
var ciImage:CIImage!
var ctx:CIContext!
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
ctx = CIContext(options: nil)
ciImage = CIImage(image: imageView.image!)
}
override func viewWillLayoutSubviews() {
let ciTL = createVector(createScaledPoint(uiTL))
let ciTR = createVector(createScaledPoint(uiTR))
let ciBR = createVector(createScaledPoint(uiBR))
let ciBL = createVector(createScaledPoint(uiBL))
imageView.image = doPerspectiveCorrection(CIImage(image: imageView.image!)!,
context: ctx,
topLeft: ciTL,
topRight: ciTR,
bottomRight: ciBR,
bottomLeft: ciBL)
}
func doPerspectiveCorrection(
_ image:CIImage,
context:CIContext,
topLeft:AnyObject,
topRight:AnyObject,
bottomRight:AnyObject,
bottomLeft:AnyObject)
-> UIImage {
let filter = CIFilter(name: "CIPerspectiveCorrection")
filter?.setValue(topLeft, forKey: "inputTopLeft")
filter?.setValue(topRight, forKey: "inputTopRight")
filter?.setValue(bottomRight, forKey: "inputBottomRight")
filter?.setValue(bottomLeft, forKey: "inputBottomLeft")
filter!.setValue(image, forKey: kCIInputImageKey)
let cgImage = context.createCGImage((filter?.outputImage)!, from: (filter?.outputImage!.extent)!)
return UIImage(cgImage: cgImage!)
}
func createScaledPoint(_ pt:CGPoint) -> CGPoint {
let x = (pt.x / imageView.frame.width) * ciImage.extent.width
let y = (pt.y / imageView.frame.height) * ciImage.extent.height
return CGPoint(x: x, y: y)
}
func createVector(_ point:CGPoint) -> CIVector {
return CIVector(x: point.x, y: ciImage.extent.height - point.y)
}
func createPoint(_ vector:CGPoint) -> CGPoint {
return CGPoint(x: vector.x, y: ciImage.extent.height - vector.y)
}
}
EDIT: I'm putting this here to explain things. The two of us swapped projects, and there was an issue with the questioner's code where a nil return was happening. First, here's the corrected code, which should be in the cropImage() function:
let ciTL = createVector(createScaledPoint(topLeft, overlay: cameraView, image: image), image: image)
let ciTR = createVector(createScaledPoint(topRight, overlay: cameraView, image: image), image: image)
let ciBR = createVector(createScaledPoint(bottomRight, overlay: cameraView, image: image), image: image)
let ciBL = createVector(createScaledPoint(bottomLeft, overlay: cameraView, image: image), image: image)
The issue is with the last two lines, which were transposed by passing bottomLeft where it should have been bottomRight, and vice-versa. (Easy mistake to make, I've done it too!)
Some explanation to help those who use CIPerspectiveCorrection (and other filters that use CIVectors).
A CIVector can have anywhere from - I think 2 to, well, almost infinite amount of components. It depends on the filter. In this case there are two components (X, Y). Simple enough, but the twist is that the 4 CIVectors describe 4 points inside the CIImage extent where the origin is the bottom left, not the top left.
Note I did not say a 4 sided shape. You can actually have a "figure 8" like shape where the "bottom right" point is left of the "bottom left" point! This would result in a shape where two sides cross each other.
All that matters is that all 4 points lie with the CIImage extent. If they don't, the filter with return nil for it's output image.
One last note for those who haven't work with CIImage filters before - the filters will not execute until you ask for the outputImage. You can instantiate one, fill in the parameters, chain them, whatever. You can even make a typo in the filter name (or any of their keys). Until your code asks for the filter.outputImage, nothing happens.

Move UIImageView Independently from its Mask in Swift

I'm trying to mask a UIImageView in such a way that it would allow the user to drag the image around without moving its mask. The effect would be similar to how one can position an image within the Instagram app essentially allowing the user to define the crop region of the image.
Here's an animated gif to demonstrate what I'm after.
Here's how I'm currently masking the image and repositioning it on drag/pan events.
import UIKit
class ViewController: UIViewController {
var dragDelta = CGPoint()
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
attachMask()
// listen for pan/drag events //
let pan = UIPanGestureRecognizer(target:self, action:#selector(onPanGesture))
pan.maximumNumberOfTouches = 1
pan.minimumNumberOfTouches = 1
self.view.addGestureRecognizer(pan)
}
func onPanGesture(gesture:UIPanGestureRecognizer)
{
let point:CGPoint = gesture.locationInView(self.view)
if (gesture.state == .Began){
print("begin", point)
// capture our drag start position
dragDelta = CGPoint(x:point.x-imageView.frame.origin.x, y:point.y-imageView.frame.origin.y)
} else if (gesture.state == .Changed){
// update image position based on how far we've dragged from drag start
imageView.frame.origin.y = point.y - dragDelta.y
} else if (gesture.state == .Ended){
print("ended", point)
}
}
func attachMask()
{
let mask = CAShapeLayer()
mask.path = UIBezierPath(roundedRect: CGRect(x: 0, y: 100, width: imageView.frame.size.width, height: 400), cornerRadius: 5).CGPath
mask.anchorPoint = CGPoint(x: 0, y: 0)
mask.fillColor = UIColor.redColor().CGColor
view.layer.addSublayer(mask)
imageView.layer.mask = mask;
}
}
This results in both the image and mask moving together as you see below.
Any suggestions on how to "lock" the mask so the image can be moved independently underneath it would be very much appreciated.
Moving a mask and frame separately from each other to reach this effect isn't the best way to go about doing this. Most apps that do this sort of effect do the following:
Add a UIScrollView to the root view (with panning/zooming enabled)
Add a UIImageView to the UIScrollView
Size the UIImageView such that it has a 1:1 ratio with the image
Set the contentSize of the UIScrollView to match that of the UIImageView
The user can now pan around and zoom into the UIImageView as needed.
Next, if you're, say, cropping the image:
Get the visible rectangle (taken from Getting the visible rect of an UIScrollView's content)
CGRect visibleRect = [scrollView convertRect:scrollView.bounds toView:zoomedSubview];
Use whatever cropping method you'd like on the UIImage to get the necessary content.
This is the smoothest way to handle this kind of interaction and the code stays pretty simple!
Just figured it out. Setting the CAShapeLayer's position property to the inverse of the UIImageView's position as it's dragged will lock the CAShapeLayer in its original position however CoreAnimation by default will attempt to animate it whenever its position is reassigned.
This can be disabled by wrapping both position settings within a CATransaction as shown below.
func onPanGesture(gesture:UIPanGestureRecognizer)
{
let point:CGPoint = gesture.locationInView(self.view)
if (gesture.state == .Began){
print("begin", point)
// capture our drag start position
dragDelta = CGPoint(x:point.x-imageView.frame.origin.x, y:point.y-imageView.frame.origin.y)
} else if (gesture.state == .Changed){
// update image & mask positions based on the distance dragged
// and wrap both assignments in a CATransaction transaction to disable animations
CATransaction.begin()
CATransaction.setDisableActions(true)
mask.position.y = dragDelta.y - point.y
imageView.frame.origin.y = point.y - dragDelta.y
CATransaction.commit()
} else if (gesture.state == .Ended){
print("ended", point)
}
}
UPDATE
Here's an implementation of what I believe AlexKoren is suggesting. This approach nests a UIImageView within a UIScrollView and uses the UIScrollView to mask the image.
class ViewController: UIViewController, UIScrollViewDelegate {
#IBOutlet weak var scrollView: UIScrollView!
var imageView:UIImageView = UIImageView()
override func viewDidLoad() {
super.viewDidLoad()
let image = UIImage(named: "point-bonitas")
imageView.image = image
imageView.frame = CGRectMake(0, 0, image!.size.width, image!.size.height);
scrollView.delegate = self
scrollView.contentMode = UIViewContentMode.Center
scrollView.addSubview(imageView)
scrollView.contentSize = imageView.frame.size
let scale = scrollView.frame.size.width / scrollView.contentSize.width
scrollView.minimumZoomScale = scale
scrollView.maximumZoomScale = scale // set to 1 to allow zoom out to 100% of image size //
scrollView.zoomScale = scale
// center image vertically in scrollview //
let offsetY:CGFloat = (scrollView.contentSize.height - scrollView.frame.size.height) / 2;
scrollView.contentOffset = CGPointMake(0, offsetY);
}
func scrollViewDidZoom(scrollView: UIScrollView) {
print("zoomed")
}
func viewForZoomingInScrollView(scrollView: UIScrollView) -> UIView? {
return imageView
}
}
The other, perhaps simpler way would be to put the image view in a scroll view and let the scroll view manage it for you. It handles everything.

How to fix the Gradient on text when using an image in swift, as the gradient restarts

I'm trying to create a gradient on text, I have used UIGraphics to use a gradient image to create this. The problem I'm having is that the gradient is restarting. Does anyone know how I can scale the gradient to stretch to the text?
The text is on a wireframe and will be altered a couple of times. Sometimes it will be perfect but other times it is not.
The gradient should go yellow to blue but it restarts see photo below:
import UIKit
func colourTextWithGrad(label: UILabel) {
UIGraphicsBeginImageContext(label.frame.size)
UIImage(named: "testt.png")?.drawInRect(label.bounds)
let myGradient: UIImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
label.textColor = UIColor(patternImage: myGradient)
}
You'll have to redraw the image each time the label size changes
This is because a pattered UIColor is only ever tiled. From the documentation:
During drawing, the image in the pattern color is tiled as necessary to cover the given area.
Therefore, you'll need to change the image size yourself when the bounds of the label changes – as pattern images don't support stretching. To do this, you can subclass UILabel, and override the layoutSubviews method. Something like this should achieve the desired result:
class GradientLabel: UILabel {
let gradientImage = UIImage(named:"gradient.png")
override func layoutSubviews() {
guard let grad = gradientImage else { // skip re-drawing gradient if it doesn't exist
return
}
// redraw your gradient image
UIGraphicsBeginImageContext(frame.size)
grad.drawInRect(bounds)
let myGradient = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
// update text color
textColor = UIColor(patternImage: myGradient)
}
}
Although it's worth noting that I'd always prefer to draw a gradient myself – as you can have much more flexibility (say you want to add another color later). Also the quality of your image might be degraded when you redraw it at different sizes (although due to the nature of gradients, this should be fairly minimal).
You can draw your own gradient fairly simply by overriding the drawRect of your UILabel subclass. For example:
override func drawRect(rect: CGRect) {
// begin new image context to let the superclass draw the text in (so we can use it as a mask)
UIGraphicsBeginImageContextWithOptions(bounds.size, false, 0.0)
do {
// get your image context
let ctx = UIGraphicsGetCurrentContext()
// flip context
CGContextScaleCTM(ctx, 1, -1)
CGContextTranslateCTM(ctx, 0, -bounds.size.height)
// get the superclass to draw text
super.drawRect(rect)
}
// get image and end context
let img = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
// get drawRect context
let ctx = UIGraphicsGetCurrentContext()
// clip context to image
CGContextClipToMask(ctx, bounds, img.CGImage)
// define your colors and locations
let colors = [UIColor.orangeColor().CGColor, UIColor.redColor().CGColor, UIColor.purpleColor().CGColor, UIColor.blueColor().CGColor]
let locs:[CGFloat] = [0.0, 0.3, 0.6, 1.0]
// create your gradient
let grad = CGGradientCreateWithColors(CGColorSpaceCreateDeviceRGB(), colors, locs)
// draw gradient
CGContextDrawLinearGradient(ctx, grad, CGPoint(x: 0, y:bounds.size.height*0.5), CGPoint(x:bounds.size.width, y:bounds.size.height*0.5), CGGradientDrawingOptions(rawValue: 0))
}
Output:
Swift 4 & as subclass
class GradientLabel: UILabel {
// MARK: - Colors to create gradient from
#IBInspectable open var gradientFrom: UIColor?
#IBInspectable open var gradientTo: UIColor?
override func draw(_ rect: CGRect) {
// begin new image context to let the superclass draw the text in (so we can use it as a mask)
UIGraphicsBeginImageContextWithOptions(bounds.size, false, 0.0)
do {
// get your image context
guard let ctx = UIGraphicsGetCurrentContext() else { super.draw(rect); return }
// flip context
ctx.scaleBy(x: 1, y: -1)
ctx.translateBy(x: 0, y: -bounds.size.height)
// get the superclass to draw text
super.draw(rect)
}
// get image and end context
guard let img = UIGraphicsGetImageFromCurrentImageContext(), img.cgImage != nil else { return }
UIGraphicsEndImageContext()
// get drawRect context
guard let ctx = UIGraphicsGetCurrentContext() else { return }
// clip context to image
ctx.clip(to: bounds, mask: img.cgImage!)
// define your colors and locations
let colors: [CGColor] = [UIColor.orange.cgColor, UIColor.red.cgColor, UIColor.purple.cgColor, UIColor.blue.cgColor]
let locs: [CGFloat] = [0.0, 0.3, 0.6, 1.0]
// create your gradient
guard let grad = CGGradient(colorsSpace: CGColorSpaceCreateDeviceRGB(), colors: colors as CFArray, locations: locs) else { return }
// draw gradient
ctx.drawLinearGradient(grad, start: CGPoint(x: 0, y: bounds.size.height*0.5), end: CGPoint(x:bounds.size.width, y: bounds.size.height*0.5), options: CGGradientDrawingOptions(rawValue: 0))
}
}

Programmatic "fuzzy" style background for UIView

Of course, it's trivial to set a plain color for a background:
These days, instead of using "plain gray", it is popular to use a "fuzzy" or "cloudy" background, as a design feature in apps.
For example, here's a couple "fuzzy" backgrounds - it's just a plain color with perhaps some noise and maybe blur on that.
You can see backgrounds something like this all over, consider popular feed apps (whassapp etc). It's a "fad" of our day.
It occurred to me, it would be fantastic if you could do this in code in Swift
Note: starting with a PNG is not an elegant solution:
Hopefully it is possible to generate everything programmatically from scratch.
It would be great if the Inspector had a slider in the IBDesignable style, "Add faddish 'grainy' background..." - Should be possible in the new era!
This will get you started, based on something I wrote a long time ago:
#IBInspectable properties:
noiseColor: the noise/grain color, this is applied over the view's backgroundColor
noiseMinAlpha: the minimum alpha the randomized noise can be
noiseMaxAlpha: the maximum alpha the randomized noise can be
noisePasses: how many times to apply the noise, more passes will be slower but can result in a better noise effect
noiseSpacing: how common the randomized noise occurs, higher spacing means the noise will be less frequent
Explanation:
When any of the designable noise properties change the view is flagged for redraw. In the draw function the UIImage is generated (or pulled from NSCache if available).
In the generation method each pixel is iterated over and if the pixel should be noise (depending on the spacing parameter), the noise color is applied with a randomized alpha channel. This is done as many times as the number of passes.
.
// NoiseView.swift
import UIKit
let noiseImageCache = NSCache()
#IBDesignable class NoiseView: UIView {
let noiseImageSize = CGSizeMake(128, 128)
#IBInspectable var noiseColor: UIColor = UIColor.blackColor() {
didSet { setNeedsDisplay() }
}
#IBInspectable var noiseMinAlpha: CGFloat = 0 {
didSet { setNeedsDisplay() }
}
#IBInspectable var noiseMaxAlpha: CGFloat = 1 {
didSet { setNeedsDisplay() }
}
#IBInspectable var noisePasses: Int = 1 {
didSet {
noisePasses = max(0, noisePasses)
setNeedsDisplay()
}
}
#IBInspectable var noiseSpacing: Int = 1 {
didSet {
noiseSpacing = max(1, noiseSpacing)
setNeedsDisplay()
}
}
override func drawRect(rect: CGRect) {
super.drawRect(rect)
UIColor(patternImage: currentUIImage()).set()
UIRectFillUsingBlendMode(bounds, .Normal)
}
private func currentUIImage() -> UIImage {
// Key based on all parameters
let cacheKey = "\(noiseImageSize),\(noiseColor),\(noiseMinAlpha),\(noiseMaxAlpha),\(noisePasses)"
var image = noiseImageCache.objectForKey(cacheKey) as! UIImage!
if image == nil {
image = generatedUIImage()
#if !TARGET_INTERFACE_BUILDER
noiseImageCache.setObject(image, forKey: cacheKey)
#endif
}
return image
}
private func generatedUIImage() -> UIImage {
UIGraphicsBeginImageContextWithOptions(noiseImageSize, false, 0)
let accuracy: CGFloat = 1000.0
for _ in 0..<noisePasses {
for y in 0..<Int(noiseImageSize.height) {
for x in 0..<Int(noiseImageSize.width) {
if random() % noiseSpacing == 0 {
let alpha = (CGFloat(random() % Int((noiseMaxAlpha - noiseMinAlpha) * accuracy)) / accuracy) + noiseMinAlpha
noiseColor.colorWithAlphaComponent(alpha).set()
UIRectFill(CGRectMake(CGFloat(x), CGFloat(y), 1, 1))
}
}
}
}
let image = UIGraphicsGetImageFromCurrentImageContext() as UIImage
UIGraphicsEndImageContext()
return image
}
}
in Swift 3
import UIKit
let noiseImageCache = NSCache<AnyObject, AnyObject>()
#IBDesignable class NoiseView: UIView {
let noiseImageSize = CGSize(width: 128.0, height: 128.0)
#IBInspectable var noiseColor: UIColor = UIColor.black {
didSet { setNeedsDisplay() }
}
#IBInspectable var noiseMinAlpha: CGFloat = 0 {
didSet { setNeedsDisplay() }
}
#IBInspectable var noiseMaxAlpha: CGFloat = 0.5 {
didSet { setNeedsDisplay() }
}
#IBInspectable var noisePasses: Int = 3 {
didSet {
noisePasses = max(0, noisePasses)
setNeedsDisplay()
}
}
#IBInspectable var noiseSpacing: Int = 1 {
didSet {
noiseSpacing = max(1, noiseSpacing)
setNeedsDisplay()
}
}
override func draw(_ rect: CGRect) {
super.draw(rect)
UIColor(patternImage: currentUIImage()).set()
UIRectFillUsingBlendMode(bounds, .normal)
}
private func currentUIImage() -> UIImage {
// Key based on all parameters
let cacheKey = "\(noiseImageSize),\(noiseColor),\(noiseMinAlpha),\(noiseMaxAlpha),\(noisePasses)"
var image = noiseImageCache.object(forKey: cacheKey as AnyObject) as? UIImage
if image == nil {
image = generatedUIImage()
#if !TARGET_INTERFACE_BUILDER
noiseImageCache.setObject(image!, forKey: cacheKey as AnyObject)
#endif
}
return image!
}
private func generatedUIImage() -> UIImage {
UIGraphicsBeginImageContextWithOptions(noiseImageSize, false, 0)
let accuracy: CGFloat = 1000.0
for _ in 0..<noisePasses {
for y in 0..<Int(noiseImageSize.height) {
for x in 0..<Int(noiseImageSize.width) {
if Int(arc4random()) % noiseSpacing == 0 {
let alpha = (CGFloat(arc4random() % UInt32((noiseMaxAlpha - noiseMinAlpha) * accuracy)) / accuracy) + noiseMinAlpha
noiseColor.withAlphaComponent(alpha).set()
UIRectFill(CGRect(x: x, y: y, width: 1, height: 1))
}
}
}
}
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image!
}
}
You could easily build something up using GPUImage. It comes with a huge set of blurs, noise generators and filters.. You can connect them together in sequence and build up complex GPU accelerated effects.
To give you an good starting point. Here's a quick dirty prototype of a function that uses GPUImage to do something like what you want. If you set 'orUseNoise' to YES it will create a blurred image based on perlin noise INSTEAD if the image. Tweak the values pointed out to change the desired effect.
- (UIImage *)blurWithGPUImage:(UIImage *)sourceImage orUseNoise:(bool) useNoise {
GPUImagePicture *stillImageSource = [[GPUImagePicture alloc] initWithImage:sourceImage];
GPUImageGaussianBlurFilter *gaussFilter = [[GPUImageGaussianBlurFilter alloc] init];
[gaussFilter setBlurRadiusInPixels:6]; //<<-------TWEAK
[gaussFilter setBlurPasses:1]; //<<-------TWEAK
if(useNoise) {
GPUImagePerlinNoiseFilter* perlinNouse = [[GPUImagePerlinNoiseFilter alloc] init];
[perlinNouse setColorStart:(GPUVector4){1.0, 1.0, 1.0f, 1.0}]; //<<-------TWEAK
[perlinNouse setColorFinish:(GPUVector4){0.5,0.5, 0.5f, 1.0}]; //<<-------TWEAK
[perlinNouse setScale:200]; //<<-------TWEAK
[stillImageSource addTarget:perlinNouse];
[perlinNouse addTarget:gaussFilter];
} else {
[stillImageSource addTarget:gaussFilter];
}
[gaussFilter useNextFrameForImageCapture];
[stillImageSource processImage];
UIImage *outputImage = [gaussFilter imageFromCurrentFramebuffer];
// Set up output context.
UIGraphicsBeginImageContext(self.view.frame.size);
CGContextRef outputContext = UIGraphicsGetCurrentContext();
// Invert image coordinates
CGContextScaleCTM(outputContext, 1.0, -1.0);
CGContextTranslateCTM(outputContext, 0, -self.view.frame.size.height);
// Draw base image.
CGContextDrawImage(outputContext, self.view.frame, outputImage.CGImage);
// Apply tint
CGContextSaveGState(outputContext);
UIColor* tint = [UIColor colorWithWhite:1.0f alpha:0.6]; //<<-------TWEAK
CGContextSetFillColorWithColor(outputContext, tint.CGColor);
CGContextFillRect(outputContext, self.view.frame);
CGContextRestoreGState(outputContext);
// Output image
outputImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return outputImage;
}
This is a simple stack of:
GPUImagePicture -> GPUImagePerlinNoiseFilter -> GPUImageGaussianBlurFilter
..with a bit of handling code to make into an image properly.
You can try changing the stack to use some of the many other filters.
NOTE: Even if you use the noise instead of the image. You will still need to provide an image until you cut that part out.
We use great component KGNoise. It is really easy to use. I think it can help you
KGNoise generates random black and white pixels into a static 128x128 image that is then tiled to fill the space. The random pixels are seeded with a value that has been chosen to look the most random, this also means that the noise will look consistent between app launches.
I agree with answer about GPUImage and since you don't want to provide image, you could create blank image like this:
func createNoiseImage(size: CGSize, color: UIColor) -> UIImage {
UIGraphicsBeginImageContext(size)
let context = UIGraphicsGetCurrentContext()
CGContextSetFillColorWithColor(context, color.CGColor)
CGContextFillRect(context, CGRectMake(0, 0, size.width, size.height))
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext();
let filter = GPUImagePerlinNoiseFilter()
return filter.imageByFilteringImage(image)
}
The main advantage of using GPUImage is speed.
While the question asks for a "programmatic" solution, it comes to mind that what you are trying to do and refer as "fuzzy" sounds a lot like UIBlurEffect, UIVisualEffectView and UIVibrancyEffect which were introduced in iOS 8.
In order to use these, you can drag a UIVisualEffectView on your Storyboard scene to add a blur or vibrancy effect to a specific part of the screen.
If you would like to have an entire scene appearing with the visual effect on top of the previous scene, you should configure the following:
Set either the View Controller or presentation segue to Presentation = Over Current Context and make the background color of the "fuzzy"
Set the background color of the presented view controller to clearColor.
Embed the entire content of the presented view controller inside a UIVisualEffectView
With that, you can get effects like this:

Allowing users to draw rect on a UIImage, with the intention of cropping the image

I'm sure this has been asked a number of times from various different perspectives, but I'm unable to find an answer on here as yet.
What I want to achieve
What I would like to do is to display a UIImage, and allow the user to draw a rectangle on the image, and eventually crop their selection.
Research so far
I've found previous questions here on SO that handle the cropping, however they often deal with static cropping areas that don't change, this does lead to the following constraints of such mechanism
The area of the image you're interested in may be positioned
anywhere, for example if you're trying to crop a road sign, it may
be centered on one image, but aligned left on another, therefore you
can't predict which area to crop.
The size and scale of the interested area may change, for example
one image may be a close up of that road sign so the cropping area
would be larger, but another image may have been taken from a
distance meaning the cropping area would be smaller.
With the combination of the above two variables, its almost impossible to accurately predict where the area of interest in the image would be, so I'm relying on the user to define this by being able to "draw" a box around the area we're interested in, in this case, a road sign.
This is all peachy on Android, since you can delegate all the hard work out with a nice intent, such as :
Intent intent = new Intent("com.android.camera.action.CROP");
However, I can't find an equivalent for iOS.
I've found this bit of code from this source :
- (UIImage *)imageByDrawingCircleOnImage:(UIImage *)image
{
// begin a graphics context of sufficient size
UIGraphicsBeginImageContext(image.size);
// draw original image into the context
[image drawAtPoint:CGPointZero];
// get the context for CoreGraphics
CGContextRef ctx = UIGraphicsGetCurrentContext();
// set stroking color and draw circle
[[UIColor redColor] setStroke];
// make circle rect 5 px from border
CGRect circleRect = CGRectMake(0, 0,
image.size.width,
image.size.height);
circleRect = CGRectInset(circleRect, 5, 5);
// draw circle
CGContextStrokeEllipseInRect(ctx, circleRect);
// make image out of bitmap context
UIImage *retImage = UIGraphicsGetImageFromCurrentImageContext();
// free the context
UIGraphicsEndImageContext();
return retImage;
}
Which I believe is a good starting point for cropping (it does crop circles however), it does rely on predefining the area you want to crop when calling CGRectMake.
This previous question also details how to do the actual cropping.
I'm assuming that to allow the user to draw the rect, I'd need to integrate with Gestures?
The Question :
How can I allow the user, to draw a rect over an image view, with the intent of cropping that area?
You could give BJImageCropper a try:
A simple UIView subclass that allows a user to crop an image. If you use it, I'd love to know! Twitter: #barrettjacobsen
This post is already 5 years old, but future references, this is how I managed to get it done. Following code is a combination of Rob's answer and some image cropping
Xcode 9 and Swift 4 are being used here
Add 2 ViewControllers
Add ImageView and 2 buttons for first view controller and another image view for last view controller
link all views to source file
View controller
import UIKit
extension UIView {
func snapshot(afterScreenUpdates: Bool = false) -> UIImage {
UIGraphicsBeginImageContextWithOptions(bounds.size, isOpaque, 0)
drawHierarchy(in: bounds, afterScreenUpdates: afterScreenUpdates)
let image = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
}
extension UIImage {
func crop( rect: CGRect) -> UIImage {
var rect = rect
rect.origin.x*=self.scale
rect.origin.y*=self.scale
rect.size.width*=self.scale
rect.size.height*=self.scale
let imageRef = self.cgImage!.cropping(to: rect)
let image = UIImage(cgImage: imageRef!, scale: self.scale, orientation: self.imageOrientation)
return image
}
}
class ViewController: UIViewController {
var rec: CGRect!
var cropImage: UIImage!
#IBOutlet weak var imageView: UIImageView!
private let shapeLayer: CAShapeLayer = {
let _shapeLayer = CAShapeLayer()
_shapeLayer.fillColor = UIColor.clear.cgColor
_shapeLayer.strokeColor = UIColor.green.cgColor
_shapeLayer.lineWidth = 2
return _shapeLayer
}()
private var startPoint: CGPoint!
override func viewDidLoad() {
super.viewDidLoad()
imageView.layer.addSublayer(shapeLayer)
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
clear()
startPoint = touches.first?.location(in: imageView)
}
func clear() {
imageView.layer.sublayers = nil
imageView.image = UIImage(named: "aa")
imageView.layer.addSublayer(shapeLayer)
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let startPoint = startPoint, let touch = touches.first else { return }
let point: CGPoint
if let predictedTouch = event?.predictedTouches(for: touch)?.last {
point = predictedTouch.location(in: imageView)
} else {
point = touch.location(in: imageView)
}
updatePath(from: startPoint, to: point)
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
guard let startPoint = startPoint, let touch = touches.first else { return }
let point = touch.location(in: imageView)
updatePath(from: startPoint, to: point)
imageView.image = imageView.snapshot(afterScreenUpdates: true)
shapeLayer.path = nil
}
override func touchesCancelled(_ touches: Set<UITouch>, with event: UIEvent?) {
shapeLayer.path = nil
}
private func updatePath(from startPoint: CGPoint, to point: CGPoint) {
let size = CGSize(width: point.x - startPoint.x, height: point.y - startPoint.y)
rec = CGRect(origin: startPoint, size: size)
shapeLayer.path = UIBezierPath(rect: rec).cgPath
}
#IBAction func btnTapped(_ sender: Any) {
clear()
}
#IBAction func btnCropTapped(_ sender: Any) {
let newRec = CGRect(origin: CGPoint(x: rec.origin.x, y: rec.origin.y), size: rec.size)
cropImage = imageView.image?.crop(rect: newRec)
print(rec)
print(newRec)
self.performSegue(withIdentifier: "toImage", sender: nil)
}
override func prepare(for segue: UIStoryboardSegue, sender: Any?) {
if segue.identifier == "toImage" {
if let destination = segue.destination as? ImageViewController {
destination.croppedImage = cropImage
}
}
}
}

Resources