in IOS , implement 9-patch image with strange white vertical line? - ios

Here is the case:
raw image:
gift.9
image after processing
there are two strange white vertical lines
code part:
view.backgroundColor = UIColor.white
let imgViewWidth: CGFloat = 300
let imgView = UIImageView(frame: CGRect(origin: CGPoint(x: 50, y: 250), size: CGSize(width: imgViewWidth, height: 100)))
view.addSubview(imgView)
if let image = UIImage(named: "gift.9"), let img = stretch(image: image, newWidth: imgViewWidth){
imgView.image = img
}
the logic is easy , image crop and stretch.
image crop
func crop(img: UIImage, with rect: CGRect) -> UIImage? {
guard let cgImg = img.cgImage else { return nil }
// Create bitmap image from context using the rect
if let imageRef: CGImage = cgImg.cropping(to: rect){
// Create a new image based on the imageRef and rotate back to the original orientation
let image: UIImage = UIImage(cgImage: imageRef, scale: img.scale, orientation: img.imageOrientation)
return image
}
else{
return nil
}
}
image stretch
func stretch(image: UIImage, newWidth: CGFloat) -> UIImage? {
guard image.size.width < newWidth else {
return image
}
let originalWidth = image.size.width
let offset: CGFloat = 1
// crop left first
let left = crop(img: image, with: CGRect(x: 0, y: 0, width: originalWidth/2, height: image.size.height))
// crop right first
let right = crop(img: image, with: CGRect(x: originalWidth/2, y: 0, width: originalWidth/2, height: image.size.height))
UIGraphicsBeginImageContextWithOptions(CGSize(width: newWidth, height: image.size.height),
false, image.scale)
let lhsResizable = left?.resizableImage(withCapInsets: UIEdgeInsets(top: 0,
left: originalWidth/2 - offset,
bottom: 0,
right: 0),
resizingMode: .stretch)
// draw left stretched first
lhsResizable?.draw(in: CGRect(x: 0, y: 0, width: newWidth/2, height: image.size.height))
let rhsResizable = right?.resizableImage(withCapInsets: UIEdgeInsets(top: 0,
left: 0,
bottom: 0,
right: originalWidth/2 - offset),
resizingMode: .stretch)
// then , draw right stretched
rhsResizable?.draw(in: CGRect(x: newWidth/2, y: 0, width: newWidth/2, height: image.size.height))
let fullImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return fullImage
}
PS:
I marked the two strange vertical white line
How to get rid of it?
Any other method is welcomed.

Related

Swift 3 draw uiimage programmatically

I don't have experience in Core Graphics but I need to draw a dynamic uiimage that look like these:
left
whole
(Actually I want the grey area to be clear. So the red color will look like floating)
This is the code I tried:
public extension UIImage {
public convenience init?(color: UIColor, size: CGSize = CGSize(width: 27, height: 5), isWhole: Bool = true) {
let totalHeight: CGFloat = 5.0
let topRectHeight: CGFloat = 1.0
//if (isWhole) {
let topRect = CGRect(origin: .zero, size: CGSize(width: size.width, height: topRectHeight))
UIGraphicsBeginImageContextWithOptions(topRect.size, false, 0.0)
color.setFill()
UIRectFill(topRect)
let bottomRect = CGRect(origin: CGPoint(x: 0, y: topRectHeight), size: CGSize(width: size.width, height: totalHeight - topRectHeight))
UIGraphicsBeginImageContextWithOptions(bottomRect.size, false, 0.0)
UIColor.blue.setFill()
UIRectFill(bottomRect)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
guard let cgImage = image?.cgImage else { return nil }
self.init(cgImage: cgImage)
}
Here is example you can have first image if you set isWhole property to false and have second image if you set it to true. You can paste this code in viewDidLoad to test and play with it.
var isWhole = false
UIGraphicsBeginImageContextWithOptions(CGSize.init(width: 27, height: 5), false,0.0)
var context = UIGraphicsGetCurrentContext()
context?.setFillColor(UIColor.red.cgColor)
if(context != nil){
if(isWhole){
context?.fill(CGRect(x: 0, y: 0, width: 27, height: 2.5))
}
else{
context?.fill(CGRect(x: 0, y: 0, width: 13.5, height: 2.5))
}
context?.setFillColor(UIColor.gray.cgColor)
context?.fill(CGRect(x: 0, y: 2.5, width: 27, height: 2.5))
}
let newImage = UIGraphicsGetImageFromCurrentImageContext()
var imageView = UIImageView(frame: CGRect(x: 100, y: 200, width: 27, height: 5))
imageView.image = newImage
self.view.addSubview(imageView)
UIGraphicsEndImageContext()
If you need your red rectangle to be with rounder corners just change fill(rect:CGRect) with path like this:
if(isWhole){
context?.addPath(CGPath(roundedRect: CGRect(x: 0, y: 0, width: 27, height: 2.5), cornerWidth: 1, cornerHeight: 1, transform: nil))
context?.fillPath()
}
I'd recommend creating two paths in the first context that you create, i.e.
let topPath = UIBezierPath(rect: topRect)
color.setFill()
topPath.fill()
let bottomPath = UIBezierPath(rect: bottomRect)
UIColor.blue.setFill()
bottomPath.fill()
Then you can get the image from the current context.
I know you said in your comments that you can't use UIViews, but what you want "looks" easily done through views. Why not do that, then simply turn it into a UIImage?
override func viewDidLoad() {
super.viewDidLoad()
let imageContainer = UIView(frame: CGRect(x: 30, y: 40, width: 110, height: 60))
imageContainer.backgroundColor = view.backgroundColor
view.addSubview(imageContainer)
let redView = UIView(frame: CGRect(x: 0, y: 0, width: 100, height: 30))
redView.backgroundColor = UIColor.red
let grayView = UIView(frame: CGRect(x: 10, y: 30, width: 100, height: 30))
grayView.backgroundColor = UIColor.gray
imageContainer.addSubview(redView)
imageContainer.addSubview(grayView)
let imageView = UIImageView(frame: CGRect(x: 100, y: 140, width: 200, height: 200))
imageView.contentMode = .scaleAspectFit
imageView.image = createImage(imageContainer)
view.addSubview(imageView)
}
func createImage(_ view: UIView) -> UIImage {
UIGraphicsBeginImageContextWithOptions(
CGSize(width: view.frame.width, height: view.frame.height), true, 1)
view.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image!
}

How to merge two UIImages while keeping the aspect ratio and size?

The code is added to Github to let you understand the real problem.
This is the hierarchy:
-- ViewController.View P [width: 375, height: 667]
---- UIImageView A [width: 375, height: 667] Name: imgBackground
[A is holding an image of size(1287,1662)]
---- UIImageView B [width: 100, height: 100] Name: imgForeground
[B is holding an image of size(2400,982)]
I am trying to merge A with B but the result is stretched.
This is the merge code:
func mixImagesWith(frontImage:UIImage?, backgroundImage: UIImage?, atPoint point:CGPoint, ofSize signatureSize:CGSize) -> UIImage {
let size = self.imgBackground.frame.size
UIGraphicsBeginImageContextWithOptions(size, false, UIScreen.main.scale)
backgroundImage?.draw(in: CGRect.init(x: 0, y: 0, width: size.width, height: size.height))
frontImage?.draw(in: CGRect.init(x: point.x, y: point.y, width: signatureSize.width, height: signatureSize.height))
let newImage:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return newImage
}
Note:
.contentMode = .scaleAspectFit
Code works but the result is stretched.
See this line in code, let size = self.imgBackground.frame.size – I need to change this to fix the problem. Find the origin of subview with respect to UIImage size
Here's the screenshot to understand the problem:
What should I do to get the proper output of merge function?
You have two bugs in your code:
You should also calculate aspect for document image to fit it into UIImageView. In mergeImages() replace:
img.draw(in: CGRect(x: 0, y: 0, width: size.width, height: size.height))
with:
img.draw(in: getAspectFitFrame(sizeImgView: size, sizeImage: img.size))
When calculating aspect you center image horizontally/vertically if its width/height less then UIImageView width/height. But instead of comparing newWidth and newHeight you should compare factors:
if hfactor > vfactor {
y = (sizeImgView.height - newHeight) / 2
} else {
x = (sizeImgView.width - newWidth) / 2
}
Try bellow code it works for me, hope it works for you too,
func addWaterMarkToImage(img:UIImage, sizeWaterMark:CGRect, waterMarkImage:UIImage, completion : ((UIImage)->())?){
handler = completion
let img2:UIImage = waterMarkImage
let rect = CGRect(x: 0, y: 0, width: img.size.width, height: img.size.height)
UIGraphicsBeginImageContext(img.size)
img.draw(in: rect)
let frameAspect:CGRect = getAspectFitFrame(sizeImgView: sizeWaterMark.size, sizeImage: waterMarkImage.size)
let frameOrig:CGRect = CGRect(x: sizeWaterMark.origin.x+frameAspect.origin.x, y: sizeWaterMark.origin.y+frameAspect.origin.y, width: frameAspect.size.width, height: frameAspect.size.height)
img2.draw(in: frameOrig, blendMode: .normal, alpha: 1)
let result = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
if handler != nil {
handler!(result!)
}
}
//MARK - Get Aspect Fit frame of UIImage
func getAspectFitFrame(sizeImgView:CGSize, sizeImage:CGSize) -> CGRect{
let imageSize:CGSize = sizeImage
let viewSize:CGSize = sizeImgView
let hfactor : CGFloat = imageSize.width/viewSize.width
let vfactor : CGFloat = imageSize.height/viewSize.height
let factor : CGFloat = max(hfactor, vfactor)
// Divide the size by the greater of the vertical or horizontal shrinkage factor
let newWidth : CGFloat = imageSize.width / factor
let newHeight : CGFloat = imageSize.height / factor
var x:CGFloat = 0.0
var y:CGFloat = 0.0
if newWidth > newHeight{
y = (sizeImgView.height - newHeight)/2
}
if newHeight > newWidth{
x = (sizeImgView.width - newWidth)/2
}
let newRect:CGRect = CGRect(x: x, y: y, width: newWidth, height: newHeight)
return newRect
}

How to put a black color with opacity on an image in Swift 3

I'd like to know how can I put a black rectangle with opacity on the top of an image in Swift 3. For example:
you can create a UIView that is black with an opacity and make it the same size of your UIImageView and add it as subview. Try something like:
let tintView = UIView()
tintView.backgroundColor = UIColor(white: 0, alpha: 0.5) //change to your liking
tintView.frame = CGRect(x: 0, y: 0, width: imageView.frame.width, height: imageView.frame.height)
imageView.addSubview(tintView)
imageView = Your UIImageView
func drawRectOn(image: UIImage) -> UIImage {
UIGraphicsBeginImageContext(image.size)
image.draw(at: CGPoint.zero)
let context = UIGraphicsGetCurrentContext()
// set fill gray and alpha
context!.setFillColor(gray: 0, alpha: 0.5)
context!.move(to: CGPoint(x: 0, y: 0))
context!.fill(CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height))
context!.strokePath()
let resultImage = UIGraphicsGetImageFromCurrentImageContext()
// end the graphics context
UIGraphicsEndImageContext()
return resultImage!
}

how to place a scrolling image behind a stationary image in Swift

I am trying to port an artificial horizon app I wrote for a PC in c# to swift. It has a bezel image which does not move and behind it is a horizon image which can move up and down behind the bezel. The "window" part of the bezel is yellow so in c# I just made the yellow opaque.
In swift I stated with the horizon inside of a UIScrollView but I'm not sure how to get that to work with a second image that should not scroll.
Not all that up to speed on this swift stuff, can someone point me in the right direction?
let view: UIView = UIView.init(frame: CGRect(x: 0, y: 0, width: 500, height: 500))
let scrollView = UIScrollView.init(frame: view.bounds)
view.addSubview(scrollView)
let backImage: UIImage = fromColor(UIColor.redColor(), size: CGSize(width: 1000, height: 1000))
let backImageView: UIImageView = UIImageView.init(image: backImage)
scrollView.addSubview(backImageView)
scrollView.contentSize = CGSize.init(width: backImage.size.width, height: backImage.size.height)
let frontImage: UIImage = fromColor(UIColor.blueColor(), size: CGSize(width: 100, height: 100))
let layer: CALayer = CALayer.init()
layer.frame = CGRect.init(x: view.center.x - 50, y: view.center.y - 50, width: 100, height: 100)
layer.contents = frontImage.CGImage
view.layer.addSublayer(layer)
func fromColor(color: UIColor, size: CGSize) -> UIImage {
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
UIGraphicsBeginImageContext(rect.size)
let context = UIGraphicsGetCurrentContext()
CGContextSetFillColorWithColor(context, color.CGColor)
CGContextFillRect(context, rect)
let img = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return img
}
fromColor is a helper method.
Result of the code

Resize and Crop 2 Images affected the original image quality

Supposed that I have a UIImage's object on the UIViewController, and I want to set the image from the Controller. Basically what I want to do is, merging two images together, that the first image is the 5 star with blue color :
and the second image is the 5 star with grey color :
It's intended for rating image. Since the maximum rating is 5, then I have to multiply it by 20 to get 100 point to make the calculation easier. Please see code for more details logic.
So I have this (BM_RatingHelper.swift) :
static func getRatingImageBasedOnRating(rating: CGFloat, width: CGFloat, height: CGFloat) -> UIImage {
// available maximum rating is 5.0, so we have to multiply it by 20 to achieve 100.0 point
let ratingImageWidth = ( width / 100.0 ) * ( rating * 20.0 )
// get active rating image
let activeRatingImage = BM_ImageHelper.resize(UIImage(named: "StarRatingFullActive")!, targetSize: CGSize(width: width, height: height))
let activeRatingImageView = UIImageView(frame: CGRectMake(0, 0, ratingImageWidth, height));
activeRatingImageView.image = BM_ImageHelper.crop(activeRatingImage, x: 0, y: 0, width: ratingImageWidth, height: height);
// get inactive rating image
let inactiveRatingImage = BM_ImageHelper.resize(UIImage(named: "StarRatingFullInactive")!, targetSize: CGSize(width: width, height: height))
let inactiveRatingImageView = UIImageView(frame: CGRectMake(ratingImageWidth, 0, ( 100.0 - ratingImageWidth ), height));
inactiveRatingImageView.image = BM_ImageHelper.crop(inactiveRatingImage, x: ratingImageWidth, y: 0, width: ( 100.0 - ratingImageWidth ), height: height);
// combine the images
let ratingView = UIView.init(frame: CGRect(x: 0, y: 0, width: width, height: height))
ratingView.backgroundColor = BM_Color.colorForType(BM_ColorType.ColorWhiteTransparent)
ratingView.addSubview(activeRatingImageView)
ratingView.addSubview(inactiveRatingImageView)
return ratingView.capture()
}
The BM_ImageHelper.swift :
import UIKit
class BM_ImageHelper: NSObject {
// http://stackoverflow.com/questions/158914/cropping-an-uiimage
static func crop(image: UIImage, x: CGFloat, y: CGFloat, width: CGFloat, height: CGFloat) -> UIImage {
let rect = CGRect(x: x, y: y, width: width, height: height)
let imageRef = CGImageCreateWithImageInRect(image.CGImage, rect)!
let croppedImage = UIImage(CGImage: imageRef)
return croppedImage
}
// http://iosdevcenters.blogspot.com/2015/12/how-to-resize-image-in-swift-in-ios.html
static func resize(image: UIImage, targetSize: CGSize) -> UIImage {
let size = image.size
let widthRatio = targetSize.width / image.size.width
let heightRatio = targetSize.height / image.size.height
// Figure out what our orientation is, and use that to form the rectangle
var newSize: CGSize
if(widthRatio > heightRatio) {
newSize = CGSizeMake(size.width * heightRatio, size.height * heightRatio)
} else {
newSize = CGSizeMake(size.width * widthRatio, size.height * widthRatio)
}
// This is the rect that we've calculated out and this is what is actually used below
let rect = CGRectMake(0, 0, newSize.width, newSize.height)
// Actually do the resizing to the rect using the ImageContext stuff
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.drawInRect(rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
}
extension UIView {
// http://stackoverflow.com/a/34895760/897733
func capture() -> UIImage {
UIGraphicsBeginImageContextWithOptions(self.frame.size, self.opaque, UIScreen.mainScreen().scale)
self.layer.renderInContext(UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
}
I call that function like (supposed that the image need to filled is ratingImage) :
self.ratingImage.image =
BM_RatingHelper.getRatingImageBasedOnRating(3.7, width: 100.0, height: 20.0)
The code works perfectly, but the merged image is so low in quality although I have use the high quality image. This is the image for 3.7 rating :
What should I do to merge the images without lose the original quality? Thanks.
In your BM_ImageHelper.resize method its giving the scale 1.0. It should be the devices's screens scale.
Change it to
UIGraphicsBeginImageContextWithOptions(newSize, false, UIScreen.mainScreen().scale)
UPDATE
Also change your crop method to address the scale, like
static func crop(image: UIImage, x: CGFloat, y: CGFloat, width: CGFloat, height: CGFloat) -> UIImage {
let transform = CGAffineTransformMakeScale(image.scale, image.scale)
let rect = CGRect(x: x, y: y, width: width, height: height)
let transformedCropRect = CGRectApplyAffineTransform(rect, transform);
let imageRef = CGImageCreateWithImageInRect(image.CGImage, transformedCropRect)!
let croppedImage = UIImage(CGImage: imageRef, scale: image.scale, orientation: image.imageOrientation)
return croppedImage
}

Resources