How to archive a UItextfield whose text color is gradient? - ios

I am trying to archive a textfield.where the text field text is a gradient color.I archive it to store data into my Core database.But the archive method is giving my this error. *** Terminating app due to uncaught exception 'NSInternalInconsistencyException', reason: 'Only RGBA or White color spaces are supported in this situation.' The archive code is let data = NSKeyedArchiver.archivedData(withRootObject: testerText) . the viewdidload code is.
override func viewDidLoad() {
super.viewDidLoad()
testerText.textColor = UIColor(patternImage: gradientImage(size: testerText.frame.size, color1: CIColor(color: UIColor.green), color2: CIColor(color: UIColor.red), direction: .Left))
let data = NSKeyedArchiver.archivedData(withRootObject: testerText)
//print(toshow)
}
and the function which make the text color gradient is given below.
func gradientImage(size: CGSize, color1: CIColor, color2: CIColor, direction: GradiantDerection = .Up) -> UIImage {
let context = CIContext(options: nil)
let filter = CIFilter(name: "CILinearGradient")
var startVector: CIVector
var endVector: CIVector
filter!.setDefaults()
switch direction {
case .Up:
startVector = CIVector(x: size.width * 0.5, y: 0)
endVector = CIVector(x: size.width * 0.5, y: size.height)
case .Left:
startVector = CIVector(x: size.width, y: size.height * 0.5)
endVector = CIVector(x: 0, y: size.height * 0.5)
case .UpLeft:
startVector = CIVector(x: size.width, y: 0)
endVector = CIVector(x: 0, y: size.height)
case .UpRight:
startVector = CIVector(x: 0, y: 0)
endVector = CIVector(x: size.width, y: size.height)
}
filter!.setValue(startVector, forKey: "inputPoint0")
filter!.setValue(endVector, forKey: "inputPoint1")
filter!.setValue(color1, forKey: "inputColor0")
filter!.setValue(color2, forKey: "inputColor1")
let image = UIImage(cgImage: context.createCGImage(filter!.outputImage!, from: CGRect(x: 0, y: 0, width: size.width, height: size.height))!)
return image
}
Now I want to know how do I archive those textfield so that I can store it to my database?

Try This One For GradientText in TextField
override func viewDidLoad() {
super.viewDidLoad()
testerText.textColor = UIColor(patternImage: gradientImage(size: testerText.frame.size, color1: CIColor(color: UIColor(red: 1.0, green: 1.0, blue: 1.0, alpha: 1.0)), color2: CIColor(color: UIColor(red: 1.0, green: 1.0, blue: 1.0, alpha: 0.2))))
let data = NSKeyedArchiver.archivedData(withRootObject: testerText.text as Any)
}
func gradientImage(size: CGSize, color1: CIColor, color2: CIColor) -> UIImage {
let context = CIContext(options: nil)
let filter = CIFilter(name: "CILinearGradient")
var startVector: CIVector
var endVector: CIVector
filter!.setDefaults()
startVector = CIVector(x: size.width * 0.5, y: 0)
endVector = CIVector(x: size.width * 0.5, y: size.height * 0.8)
filter!.setValue(startVector, forKey: "inputPoint0")
filter!.setValue(endVector, forKey: "inputPoint1")
filter!.setValue(color1, forKey: "inputColor0")
filter!.setValue(color2, forKey: "inputColor1")
let image = UIImage(cgImage:
context.createCGImage(filter!.outputImage!, from: CGRect(x: 0, y: 0, width: size.width, height: size.height))!)
return image
}

Related

add rounding for the outline swift

I have a code that adds an outline of the required width and color to an image and it works well
But I still need to adjust the rounding for the contour, please tell me in which direction should I start?
There are two functions here that do the job of laying the outline.
func colorized(with color: UIColor = .white) -> UIImage {
UIGraphicsBeginImageContextWithOptions(size, false, scale)
defer {
UIGraphicsEndImageContext()
}
guard let context = UIGraphicsGetCurrentContext(), let cgImage = cgImage else { return self }
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
color.setFill()
context.translateBy(x: 0, y: size.height)
context.scaleBy(x: 1.0, y: -1.0)
context.clip(to: rect, mask: cgImage)
context.fill(rect)
guard let colored = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return colored
}
/**
Returns the stroked version of the fransparent image with the given stroke color and the thickness.
- Parameters:
- color: The colors to user. By defaut, uses the ``UIColor.white`
- thickness: the thickness of the border. Default to `2`
- quality: The number of degrees (out of 360): the smaller the best, but the slower. Defaults to `10`.
- Returns: the stroked version of the image, or self if something was wrong
*/
func stroked(with color: UIColor = .red, thickness: CGFloat = 2, quality: CGFloat = 10) -> UIImage {
guard let cgImage = cgImage else { return self }
// Colorize the stroke image to reflect border color
let strokeImage = colorized(with: color)
guard let strokeCGImage = strokeImage.cgImage else { return self }
/// Rendering quality of the stroke
let step = quality == 0 ? 10 : abs(quality)
let oldRect = CGRect(x: thickness, y: thickness, width: size.width, height: size.height).integral
let newSize = CGSize(width: size.width + 2 * thickness, height: size.height + 2 * thickness)
let translationVector = CGPoint(x: thickness, y: 0)
UIGraphicsBeginImageContextWithOptions(newSize, false, scale)
guard let context = UIGraphicsGetCurrentContext() else { return self }
defer {
UIGraphicsEndImageContext()
}
context.translateBy(x: 0, y: newSize.height)
context.scaleBy(x: 1.0, y: -1.0)
context.interpolationQuality = .high
for angle: CGFloat in stride(from: 0, to: 360, by: step) {
let vector = translationVector.rotated(around: .zero, byDegrees: angle)
let transform = CGAffineTransform(translationX: vector.x, y: vector.y)
context.concatenate(transform)
context.draw(strokeCGImage, in: oldRect)
let resetTransform = CGAffineTransform(translationX: -vector.x, y: -vector.y)
context.concatenate(resetTransform)
}
context.draw(cgImage, in: oldRect)
guard let stroked = UIGraphicsGetImageFromCurrentImageContext() else { return self }
return stroked
}
I tried many options but most of the answers are just adding a conerRadius to the UIImageView (this doesn't work)

How to remove blur effect on the image when user touch the image

In my application there are two option blur and Unblur image by touch the image. I am doing this to blur the Image when user touch the image
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
var croppedImg: UIImage? = nil
let touch = touches.first
var currentPoint = touch!.location(in: imgBackground)
let ratioW: Double = Double(imgBackground.image!.size.width / imgBackground.frame.size.width)
let ratioH: Double = Double(imgBackground.image!.size.height / imgBackground.frame.size.height)
currentPoint.x *= CGFloat(ratioW)
currentPoint.y *= CGFloat(ratioH)
let circleSizeW = 25 * ratioW
let circleSizeH = 25 * ratioH
currentPoint.x = CGFloat((Double(currentPoint.x ) - circleSizeW / 2 < 0) ? 0 : Double(currentPoint.x ) - circleSizeW / 2)
currentPoint.y = CGFloat((Double(currentPoint.y ) - circleSizeH / 2 < 0) ? 0 : Double(currentPoint.y ) - circleSizeH / 2)
let cropRect = CGRect(x: currentPoint.x , y: currentPoint.y , width: CGFloat(circleSizeW), height: CGFloat(circleSizeH))
croppedImg = croppIngimage(byImageName: imgBackground?.image, to: cropRect)
// Blur Effect
croppedImg = croppedImg?.imageWithGaussianBlur9()
croppedImg = roundedRectImage(from: croppedImg, withRadious: 4)
imgBackground.image = addImage(to: imgBackground.image, withImage2: croppedImg, andRect: cropRect)
}
func imageWithGaussianBlur9() -> UIImage? {
// Blur horizontally
UIGraphicsBeginImageContextWithOptions(size, _: false, _: scale)
draw(in: CGRect(x: 0, y: 0, width: size.width, height: size.height), blendMode: .normal, alpha: 0.5)
for x in 1..<5 {
draw(in: CGRect(x: CGFloat(x), y: 0, width: size.width, height: size.height), blendMode: .normal, alpha: 0.5)
draw(in: CGRect(x: CGFloat(-x), y: 0, width: size.width, height: size.height), blendMode: .normal, alpha: 0.5)
}
let horizBlurredImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
// Blur vertically
UIGraphicsBeginImageContextWithOptions(size, _: false, _: scale)
horizBlurredImage?.draw(in: CGRect(x: 0, y: 0, width: size.width, height: size.height), blendMode: .normal, alpha: 0.5)
for y in 1..<5 {
horizBlurredImage?.draw(in: CGRect(x: 0, y: CGFloat(y), width: size.width, height: size.height), blendMode: .normal, alpha: 0.5)
horizBlurredImage?.draw(in: CGRect(x: 0, y: CGFloat(-y), width: size.width, height: size.height), blendMode: .normal, alpha: 0.5)
}
let blurredImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
//
return blurredImage
}
The above code works perfectly when we blur the image
What should I do when user want to remove the blur effect
you have to place 2 UIImageView over each other. In the bottom you have to place original image.
#IBOutlet weak var imgBackground:UIImageView!
#IBOutlet weak var bottomViewImage:UIImageView! // original image
Add below code for unblur the image -:
var croppedImg: UIImage? = nil
let touch = touches.first
var currentPoint = touch!.location(in: imgBackground)
let ratioW: Double = Double(imgBackground.image!.size.width / imgBackground.frame.size.width)
let ratioH: Double = Double(imgBackground.image!.size.height / imgBackground.frame.size.height)
currentPoint.x *= CGFloat(ratioW)
currentPoint.y *= CGFloat(ratioH)
let circleSizeW = 25 * ratioW
let circleSizeH = 25 * ratioH
currentPoint.x = CGFloat((Double(currentPoint.x ) - circleSizeW / 2 < 0) ? 0 : Double(currentPoint.x ) - circleSizeW / 2)
currentPoint.y = CGFloat((Double(currentPoint.y ) - circleSizeH / 2 < 0) ? 0 : Double(currentPoint.y ) - circleSizeH / 2)
let cropRect = CGRect(x: currentPoint.x , y: currentPoint.y , width: CGFloat(circleSizeW), height: CGFloat(circleSizeH))
croppedImg = croppIngimage(byImageName: bottomViewImage?.image, to: cropRect)
croppedImg = roundedRectImage(from: croppedImg, withRadious: 4)
imgBackground.image = addImage(to: imgBackground.image, withImage2: croppedImg, andRect: cropRect)

CoreImageContext's CreateCGImage producing wrong CGRect

Code:
enum GradientDirection {
case up
case left
case upLeft
case upRight
}
extension SKTexture {
convenience init(size: CGSize, color1: CIColor, color2: CIColor, direction: GradientDirection = .up) {
let coreImageContext = CIContext(options: nil)
let gradientFilter = CIFilter(name: "CILinearGradient")
gradientFilter!.setDefaults()
var startVector:CIVector
var endVector:CIVector
switch direction {
case .up:
startVector = CIVector(x: size.width/2, y: 0)
endVector = CIVector(x: size.width/2, y: size.height)
case .left:
startVector = CIVector(x: size.width, y: size.height/2)
endVector = CIVector(x: 0, y: size.height/2)
case .upLeft:
startVector = CIVector(x: size.width, y: 0)
endVector = CIVector(x: 0, y: size.height)
case .upRight:
startVector = CIVector(x: 0, y: 0)
endVector = CIVector(x: size.width, y: size.height)
}
gradientFilter!.setValue(startVector, forKey: "inputPoint0")
gradientFilter!.setValue(endVector, forKey: "inputPoint1")
gradientFilter!.setValue(color1, forKey: "inputColor0")
gradientFilter!.setValue(color2, forKey: "inputColor1")
let imgRect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
let cgimg = coreImageContext.createCGImage(gradientFilter!.outputImage!, from: imgRect)!
print("cgimg: ", cgimg) // *** Observer this output ***** 103.0 width and height
self.init(cgImage: cgimg)
}
}
Calling Initializer:
// e.g. CGSize(width: 102.69999694824219, height: 102.69999694824219)
let textureSize = CGSize(width: self.frame.width, height: self.frame.height)
let shapeTexture = SKTexture(size: textureSize, color1: bottomColor, color2: topColor, direction: .upRight)
Passing width/height: 102.69999694824219, produces shapeTexture with width/height: 103.
Seems like coreImageContext.createCGImage is rounding off 102.69999694824219 to 103.0.
This results in minor unexpected output. How can I by-pass this rounding off? Or is there is any other method to generate Gradient image for Nodes?
More Code:
class BubbleNode: SKShapeNode {
private var backgroundNode: SKCropNode!
var label: SKLabelNode!
private var state: BubbleNodeState!
private let BubbleAnimationDuration = 0.2
private let BubbleIconPercentualInset = 0.4
var model: BubbleModel! {
didSet {
self.label.text = model.name
}
}
override init() {
super.init()
}
convenience init(withRadius radius: CGFloat) {
self.init()
self.init(circleOfRadius: radius)
state = .normal
self.configure()
}
private func configure() {
self.name = "mybubble"
physicsBody = SKPhysicsBody(circleOfRadius: 4 + self.path!.boundingBox.size.width / 2.0)
physicsBody!.isDynamic = true
physicsBody!.affectedByGravity = false
physicsBody!.allowsRotation = false
physicsBody!.mass = 0.3
physicsBody!.friction = 0.0
physicsBody!.linearDamping = 3
backgroundNode = SKCropNode()
backgroundNode.isUserInteractionEnabled = false
backgroundNode.position = CGPoint.zero
backgroundNode.zPosition = 0
self.addChild(backgroundNode)
label = SKLabelNode(fontNamed: "")
label.preferredMaxLayoutWidth = self.frame.size.width - 16
label.numberOfLines = 0
label.position = CGPoint.zero
label.fontColor = .white
label.fontSize = 10
label.isUserInteractionEnabled = false
label.verticalAlignmentMode = .center
label.horizontalAlignmentMode = .center
label.zPosition = 2
self.addChild(label)
}
func addGradientNode(withRadius radius: CGFloat) {
let gradientNode = SKShapeNode(path: self.path!)
gradientNode.zPosition = 1
gradientNode.fillColor = .white
gradientNode.strokeColor = .clear
let bottomColor = CIColor(red: 0.922, green: 0.256, blue: 0.523, alpha: 1)
let topColor = CIColor(red: 0.961, green: 0.364, blue: 0.155, alpha: 1)
let textureSize = CGSize(width: self.frame.width, height: self.frame.height)
let shapeTexture = SKTexture(size: textureSize, color1: bottomColor, color2: topColor, direction: .upRight)
gradientNode.fillTexture = shapeTexture
self.addChild(gradientNode)
print("path: ", self.path!)
print("textureSize: ", textureSize)
print("shapeTexture: ", shapeTexture)
}
}
Any image/texture will always have integer sizes since there are no sub-pixels in memory. So frameworks like Core Image will always round the given size up to the next integer value.
In contrast, the frame of a view is given in points, which need to be multiplied by the view's contentScaleFactor to get the actual pixel size (that you should use to generate your gradient). UIKit also allows for sub-pixel frame sizes, but under the hood, it will also round up when rendering the views to the screen.

What's wrong with this SKShapeNode gradient coloring code?

I'm trying to create gradient colored SKShapeNode.
I found the code below from theGoogle but it only works when color1 is black, e.g. defined as follows:
CIColor.init(red: 0, green: 0, blue: 0, alpha: 1.0)
The second color can be anything.
enum GradientDirection {
case up
case left
case upLeft
case upRight
}
extension SKTexture {
convenience init(size:CGSize,color1:CIColor,color2:CIColor,direction:GradientDirection = .up) {
let coreImageContext = CIContext(options: nil)
let gradientFilter = CIFilter(name: "CILinearGradient")
gradientFilter!.setDefaults()
var startVector:CIVector
var endVector:CIVector
switch direction {
case .up:
startVector = CIVector(x: size.width/2, y: 0)
endVector = CIVector(x: size.width/2, y: size.height)
case .left:
startVector = CIVector(x: size.width, y: size.height/2)
endVector = CIVector(x: 0, y: size.height/2)
case .upLeft:
startVector = CIVector(x: size.width, y: 0)
endVector = CIVector(x: 0, y: size.height)
case .upRight:
startVector = CIVector(x: 0, y: 0)
endVector = CIVector(x: size.width, y: size.height)
}
gradientFilter!.setValue(startVector, forKey: "inputPoint0")
gradientFilter!.setValue(endVector, forKey: "inputPoint1")
gradientFilter!.setValue(color1, forKey: "inputColor0")
gradientFilter!.setValue(color2, forKey: "inputColor1")
let cgimg = coreImageContext.createCGImage(gradientFilter!.outputImage!, fromRect: CGRect(x: 0, y: 0, width: size.width, height: size.height))
self.init(CGImage:cgimg)
}
}
I want the color one to be something else than black. I can't figure out what's wrong with this code. I define the SKShapeNode as follows:
let textureSize = CGSize(width: shapeNode.frame.width, height: shapeNode.frame.height)
let bottomColor = CIColor.init(red: 0, green: 0, blue: 0, alpha: 1.0)
let topColor = CIColor.init(red: 225, green: 255, blue: 255, alpha: 1.0)
let shapeTexture = SKTexture(size:textureSize, color1:bottomColor, color2:topColor, direction:GradientDirection.up)
shapeNode.fillTexture = shapeTexture
self.addChild(shapeNode)
All help appreciated!
RGB values for CIColor should be in [0, 1]. 255 is not a valid value.

iOS blend mode multiply

I'm looking for a solution to create this red box:
It is using a color filter 'Multiply'. Currently I have found this information:
https://developer.apple.com/library/mac/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/dq_images/dq_images.html
But how can I use something like the multiply effect on a UIView or isn't this possible?
So that the background is a UIImageView and the red box is a UIView with the multiply effect.
This Swift extension did the trick for me.
extension UIImage {
//creates a static image with a color of the requested size
static func fromColor(color: UIColor, size: CGSize) -> UIImage {
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
UIGraphicsBeginImageContext(rect.size)
let context = UIGraphicsGetCurrentContext()
CGContextSetFillColorWithColor(context, color.CGColor)
CGContextFillRect(context, rect)
let img = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return img
}
func blendWithColorAndRect(blendMode: CGBlendMode, color: UIColor, rect: CGRect) -> UIImage {
let imageColor = UIImage.fromColor(color, size:self.size)
let rectImage = CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height)
UIGraphicsBeginImageContextWithOptions(self.size, true, 0)
let context = UIGraphicsGetCurrentContext()
// fill the background with white so that translucent colors get lighter
CGContextSetFillColorWithColor(context, UIColor.whiteColor().CGColor)
CGContextFillRect(context, rectImage)
self.drawInRect(rectImage, blendMode: .Normal, alpha: 1)
imageColor.drawInRect(rect, blendMode: blendMode, alpha: 1)
// grab the finished image and return it
let result = UIGraphicsGetImageFromCurrentImageContext()
//self.backgroundImageView.image = result
UIGraphicsEndImageContext()
return result
}
}
Swift 3:
static func fromColor(color: UIColor, size: CGSize) -> UIImage {
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
UIGraphicsBeginImageContext(rect.size)
let context = UIGraphicsGetCurrentContext()
context!.setFillColor(color.cgColor)
context!.fill(rect)
let img = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return img!
}
func blendWithColorAndRect(blendMode: CGBlendMode, color: UIColor, rect: CGRect) -> UIImage {
let imageColor = UIImage.fromColor(color: color, size:self.size)
let rectImage = CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height)
UIGraphicsBeginImageContextWithOptions(self.size, true, 0)
let context = UIGraphicsGetCurrentContext()
// fill the background with white so that translucent colors get lighter
context!.setFillColor(UIColor.white.cgColor)
context!.fill(rectImage)
self.draw(in: rectImage, blendMode: .normal, alpha: 1)
imageColor.draw(in: rect, blendMode: blendMode, alpha: 1)
// grab the finished image and return it
let result = UIGraphicsGetImageFromCurrentImageContext()
//self.backgroundImageView.image = result
UIGraphicsEndImageContext()
return result!
}
But only works for images, I would like to work for videos too :|
I expanded #pegpeg solution and added a gradient overlay to the image instead of a single color
Swift 4:
static func fromGradient(colors: [UIColor], locations: [CGFloat], horizontal: Bool, size: CGSize) -> UIImage {
let rect = CGRect(x: 0, y: 0, width: size.width, height: size.height)
UIGraphicsBeginImageContext(rect.size)
let context = UIGraphicsGetCurrentContext()
let colorSpace = CGColorSpaceCreateDeviceRGB()
let cgColors = colors.map {$0.cgColor} as CFArray
let grad = CGGradient(colorsSpace: colorSpace, colors: cgColors , locations: locations)
let startPoint = CGPoint(x: 0, y: 0)
let endPoint = horizontal ? CGPoint(x: size.width, y: 0) : CGPoint(x: 0, y: size.height)
context?.drawLinearGradient(grad!, start: startPoint, end: endPoint, options: .drawsAfterEndLocation)
let img = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return img!
}
func blendWithGradientAndRect(blendMode: CGBlendMode, colors: [UIColor], locations: [CGFloat], horizontal: Bool = false, alpha: CGFloat = 1.0, rect: CGRect) -> UIImage {
let imageColor = UIImage.fromGradient(colors: colors, locations: locations, horizontal: horizontal, size: size)
let rectImage = CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height)
UIGraphicsBeginImageContextWithOptions(self.size, true, 0)
let context = UIGraphicsGetCurrentContext()
// fill the background with white so that translucent colors get lighter
context!.setFillColor(UIColor.white.cgColor)
context!.fill(rectImage)
self.draw(in: rectImage, blendMode: .normal, alpha: 1)
imageColor.draw(in: rect, blendMode: blendMode, alpha: alpha)
// grab the finished image and return it
let result = UIGraphicsGetImageFromCurrentImageContext()
//self.backgroundImageView.image = result
UIGraphicsEndImageContext()
return result!
}
the colors array should be of the same length of the locations array, example:
let newImage = image.blendWithGradientAndRect(blendMode: .multiply,
colors: [.red, .white],
locations: [0,1],
horizontal: true,
alpha: 0.8,
rect: imageRect)

Resources