CGBlendMode.clear isn't working for me - ios

I'm trying make an eraser function in a project. I can draw the lines ok, but there is no action when I invoke CGBlendMode.clear.
Can anyone see what I'm doing wrong or missing. I'm posting a code snippet below, but if another piece is required, please let me know.
fileprivate func drawLine(_ stroke: Stroke) -> Void {
print(#function)
let properties = stroke.settings
let array = stroke.points
if array?.count == 1 {
let pointStr = array?[0]
let point = CGPointFromString(pointStr!)
self.drawLineFrom(point, toPoint: point, properties: properties!)
}
for i in 0 ..< (array?.count)! - 1 {
let pointStr0 = array?[i]
let pointStr1 = array?[i+1]
let point0 = CGPointFromString(pointStr0!)
let point1 = CGPointFromString(pointStr1!)
self.drawLineFrom(point0, toPoint: point1, properties: properties!)
}
self.mergeViews()
}
fileprivate func drawLineFrom(_ fromPoint: CGPoint, toPoint: CGPoint, properties: StrokeSettings) -> Void {
print(#function)
UIGraphicsBeginImageContext(self.frame.size)
let context = UIGraphicsGetCurrentContext()
context!.move(to: CGPoint(x: fromPoint.x, y: fromPoint.y))
context!.addLine(to: CGPoint(x: toPoint.x, y: toPoint.y))
context!.setLineCap(CGLineCap.round)
context!.setLineWidth(properties.width)
print(#function, "drawEraseSwitch: ", drawEraseSwitch)
if drawEraseSwitch == 0 {
context!.setBlendMode(CGBlendMode.normal)
context!.setStrokeColor(red: properties.color.red, green: properties.color.green, blue: properties.color.blue, alpha: 1.0)
} else {
context!.setStrokeColor(red: properties.color.red, green: properties.color.green, blue: properties.color.blue, alpha: 1.0)
context!.setBlendMode(CGBlendMode.clear)
}
context!.strokePath()
self.backgroundImageView.image?.draw(in: self.backgroundImageView.frame)
let image = UIGraphicsGetImageFromCurrentImageContext()
self.backgroundImageView.image = image
self.backgroundImageView.alpha = properties.color.alpha
UIGraphicsEndImageContext()
}
fileprivate func mergeViews() {
print(#function)
UIGraphicsBeginImageContext(self.mainImageView.frame.size)
let image = UIGraphicsGetImageFromCurrentImageContext()
self.mainImageView.image = image
UIGraphicsEndImageContext()
//self.backgroundImageView.image = nil
}

Related

How can i add Gradient colour for button tint in swift iOS

is there any solution to apply tint colour as a gradient in swift
my button has a simple white icon I want to change it to a gradient colour
I updated my answer according to Duncan C comment.
You can modify your image before setting it to button.
extension UIImage {
func drawLinearGradient(colors: [CGColor], startingPoint: CGPoint, endPoint: CGPoint) -> UIImage? {
let renderer = UIGraphicsImageRenderer(size: self.size)
var shouldReturnNil = false
let gradientImage = renderer.image { context in
context.cgContext.translateBy(x: 0, y: self.size.height)
context.cgContext.scaleBy(x: 1.0, y: -1.0)
context.cgContext.setBlendMode(.normal)
let rect = CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height)
// Create gradient
let colors = colors as CFArray
let colorsSpace = CGColorSpaceCreateDeviceRGB()
guard let gradient = CGGradient(colorsSpace: colorsSpace, colors: colors, locations: nil) else {
shouldReturnNil = true
return
}
// Apply gradient
guard let cgImage = self.cgImage else {
shouldReturnNil = true
print("Couldn't get cgImage of UIImage.")
return
}
context.cgContext.clip(to: rect, mask: cgImage)
context.cgContext.drawLinearGradient(
gradient,
start: endPoint,
end: startingPoint,
options: .init(rawValue: 0)
)
}
return shouldReturnNil ? nil : gradientImage
}
}
You can use it like that:
guard let image = UIImage(named: "<your_image_name>") else { return }
v.image = image.drawLinearGradient(
colors: [UIColor.blue.cgColor, UIColor.red.cgColor],
startingPoint: .init(x: image.size.width / 2, y: 0),
endPoint: .init(x: image.size.width / 2, y: image.size.height)
)
button.setImage(gradientImage, for: .normal)
This code produces result like this:
I have the same problem too
but for now
if you wanna change the text color it works.
like you convert image into pattern color by this code
/*
class gradiunTitle : UIButton {
override init(frame: CGRect) {
super.init(frame: frame)
setup()
}
required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
setup()
}
private func setup() {
let gradient = getGradientLayer(bounds: self.bounds)
self.setTitleColor(gradientColor(bounds: self.bounds, gradientLayer: gradient), for: .normal)
self.tintColor = gradientColor(bounds: self.frame, gradientLayer: gradient)
}
func getGradientLayer(bounds : CGRect) -> CAGradientLayer{
let gradient = CAGradientLayer()
gradient.frame = bounds
//order of gradient colors
gradient.colors = [UIColor.red.cgColor,UIColor.blue.cgColor, UIColor.green.cgColor]
gradient.startPoint = CGPoint(x: 0.0, y: 0.5)
gradient.endPoint = CGPoint(x: 1.0, y: 0.5)
return gradient
}
func gradientColor(bounds: CGRect, gradientLayer :CAGradientLayer) -> UIColor? {
UIGraphicsBeginImageContext(gradientLayer.bounds.size)
gradientLayer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return UIColor(patternImage: image!)
}
}
*/

CALayer live blur inside AVVideoCompositionCoreAnimationTool

I am trying to come up with a way to perform a live blur during AVVideoCompositionCoreAnimationTool export. I have tried UIVisualEffectView and stealing the layer of the underlying view. It works in preview but as soon as you use it inside AVVideoCompositionCoreAnimationTool the layer is black. So I started building a CALayer that does this but it is not updating often enough. What can I do to make it draw more often or what might work for using the AVVideoCompositionCoreAnimationTool and a live blur in iOS? Here is the layer I built.
class CABlurLayer : CALayer{
let maxBlurRadius : CGFloat = 20
var currentImageIndex : Float = 0
var blur : Int = 10
var context : CGContext?
var link : Timer?
var snap : UIImage?
var targetLayer : CALayer?
override init() {
super.init()
}
convenience init(targetLayer:CALayer?){
self.init()
self.targetLayer = targetLayer
self.drawsAsynchronously = true
if let tl = targetLayer{
self.masksToBounds = tl.masksToBounds
}
updateSnapShots()
link = Timer.scheduledTimer(timeInterval: 1/60, target: self, selector: #selector(updateBlur), userInfo: nil, repeats: true)
}
#objc func updateBlur(){
updateSnapShots()
DispatchQueue.main.async {
self.setNeedsDisplay()
}
}
required init?(coder aDecoder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
func updateSnapShots(){
guard let tl = targetLayer else{return}
UIGraphicsBeginImageContextWithOptions(self.bounds.size, false, 0)
guard let ctx = UIGraphicsGetCurrentContext() else{return}
tl.render(in: ctx)
let snapshot = UIGraphicsGetImageFromCurrentImageContext()
snap = snapshot?.applyBlurWithRadius(CGFloat(blur), tintColor: UIColor().withAlphaComponent(0), saturationDeltaFactor: 1.4)
}
override func draw(in ctx: CGContext) {
guard let blurredImage = snap,
let tl = targetLayer else{return}
var origin = tl.frame.origin
if let pres = tl.presentation(){
origin = pres.frame.origin
}
UIGraphicsPushContext(ctx)
blurredImage.draw(at: origin)
UIGraphicsPopContext()
}
}
class MyViewController : UIViewController {
override func loadView() {
let view = UIView()
view.backgroundColor = .white
self.view = view
let ur = URL(string: "https://images.pexels.com/photos/457882/pexels-photo-457882.jpeg?auto=compress&cs=tinysrgb&dpr=2&w=500")
URLSession.shared.dataTask(with: ur!) { (dt, response, error) in
if let data = dt{
print("we have a response")
let img = UIImage(data: data)
DispatchQueue.main.async {
let layer = CALayer()
layer.frame = CGRect(x: 0, y: 0, width: 500, height: 500)
view.layer.addSublayer(layer)
let imageLayer = CALayer()
imageLayer.masksToBounds = true
imageLayer.frame = CGRect(x: 0, y: 150, width: 400, height: 300)
imageLayer.contentsGravity = .resizeAspectFill
imageLayer.contents = img?.cgImage
layer.addSublayer(imageLayer)
let blur = CABlurLayer(targetLayer: imageLayer)
blur.frame = layer.bounds
layer.addSublayer(blur)
blur.blur = 20
let pos = CABasicAnimation(keyPath: "position.x")
pos.toValue = imageLayer.position.x
pos.fromValue = imageLayer.position.x - 100
pos.duration = 2
pos.repeatCount = 100
pos.autoreverses = true
imageLayer.add(pos, forKey: nil)
}
}
}.resume()
}
}
// Present the view controller in the Live View window
PlaygroundPage.current.liveView = MyViewController()
PlaygroundPage.current.needsIndefiniteExecution = true
UIImage Extensions
import UIKit
import Accelerate
public extension UIImage {
public func applyLightEffect() -> UIImage? {
return applyBlurWithRadius(30, tintColor: UIColor(white: 1.0, alpha: 0.3), saturationDeltaFactor: 1.8)
}
public func applyExtraLightEffect() -> UIImage? {
return applyBlurWithRadius(20, tintColor: UIColor(white: 0.97, alpha: 0.82), saturationDeltaFactor: 1.8)
}
public func applyDarkEffect() -> UIImage? {
return applyBlurWithRadius(20, tintColor: UIColor(white: 0.11, alpha: 0.73), saturationDeltaFactor: 1.8)
}
public func applyTintEffectWithColor(_ tintColor: UIColor) -> UIImage? {
let effectColorAlpha: CGFloat = 0.6
var effectColor = tintColor
let componentCount = tintColor.cgColor.numberOfComponents
if componentCount == 2 {
var b: CGFloat = 0
if tintColor.getWhite(&b, alpha: nil) {
effectColor = UIColor(white: b, alpha: effectColorAlpha)
}
} else {
var red: CGFloat = 0
var green: CGFloat = 0
var blue: CGFloat = 0
if tintColor.getRed(&red, green: &green, blue: &blue, alpha: nil) {
effectColor = UIColor(red: red, green: green, blue: blue, alpha: effectColorAlpha)
}
}
return applyBlurWithRadius(10, tintColor: effectColor, saturationDeltaFactor: -1.0, maskImage: nil)
}
public func applyBlurWithRadius(_ blurRadius: CGFloat, tintColor: UIColor?, saturationDeltaFactor: CGFloat, maskImage: UIImage? = nil) -> UIImage? {
// Check pre-conditions.
if (size.width < 1 || size.height < 1) {
print("*** error: invalid size: \(size.width) x \(size.height). Both dimensions must be >= 1: \(self)")
return nil
}
guard let cgImage = self.cgImage else {
print("*** error: image must be backed by a CGImage: \(self)")
return nil
}
if maskImage != nil && maskImage!.cgImage == nil {
print("*** error: maskImage must be backed by a CGImage: \(String(describing: maskImage))")
return nil
}
let __FLT_EPSILON__ = CGFloat(Float.ulpOfOne)
let screenScale = UIScreen.main.scale
let imageRect = CGRect(origin: CGPoint.zero, size: size)
var effectImage = self
let hasBlur = blurRadius > __FLT_EPSILON__
let hasSaturationChange = fabs(saturationDeltaFactor - 1.0) > __FLT_EPSILON__
if hasBlur || hasSaturationChange {
func createEffectBuffer(_ context: CGContext) -> vImage_Buffer {
let data = context.data
let width = vImagePixelCount(context.width)
let height = vImagePixelCount(context.height)
let rowBytes = context.bytesPerRow
return vImage_Buffer(data: data, height: height, width: width, rowBytes: rowBytes)
}
UIGraphicsBeginImageContextWithOptions(size, false, screenScale)
guard let effectInContext = UIGraphicsGetCurrentContext() else { return nil }
effectInContext.scaleBy(x: 1.0, y: -1.0)
effectInContext.translateBy(x: 0, y: -size.height)
effectInContext.draw(cgImage, in: imageRect)
var effectInBuffer = createEffectBuffer(effectInContext)
UIGraphicsBeginImageContextWithOptions(size, false, screenScale)
guard let effectOutContext = UIGraphicsGetCurrentContext() else { return nil }
var effectOutBuffer = createEffectBuffer(effectOutContext)
if hasBlur {
// A description of how to compute the box kernel width from the Gaussian
// radius (aka standard deviation) appears in the SVG spec:
// http://www.w3.org/TR/SVG/filters.html#feGaussianBlurElement
//
// For larger values of 's' (s >= 2.0), an approximation can be used: Three
// successive box-blurs build a piece-wise quadratic convolution kernel, which
// approximates the Gaussian kernel to within roughly 3%.
//
// let d = floor(s * 3*sqrt(2*pi)/4 + 0.5)
//
// ... if d is odd, use three box-blurs of size 'd', centered on the output pixel.
//
let inputRadius = blurRadius * screenScale
let d = floor(inputRadius * 3.0 * CGFloat(sqrt(2 * .pi) / 4 + 0.5))
var radius = UInt32(d)
if radius % 2 != 1 {
radius += 1 // force radius to be odd so that the three box-blur methodology works.
}
let imageEdgeExtendFlags = vImage_Flags(kvImageEdgeExtend)
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, nil, 0, 0, radius, radius, nil, imageEdgeExtendFlags)
vImageBoxConvolve_ARGB8888(&effectOutBuffer, &effectInBuffer, nil, 0, 0, radius, radius, nil, imageEdgeExtendFlags)
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, nil, 0, 0, radius, radius, nil, imageEdgeExtendFlags)
}
var effectImageBuffersAreSwapped = false
if hasSaturationChange {
let s: CGFloat = saturationDeltaFactor
let floatingPointSaturationMatrix: [CGFloat] = [
0.0722 + 0.9278 * s, 0.0722 - 0.0722 * s, 0.0722 - 0.0722 * s, 0,
0.7152 - 0.7152 * s, 0.7152 + 0.2848 * s, 0.7152 - 0.7152 * s, 0,
0.2126 - 0.2126 * s, 0.2126 - 0.2126 * s, 0.2126 + 0.7873 * s, 0,
0, 0, 0, 1
]
let divisor: CGFloat = 256
let matrixSize = floatingPointSaturationMatrix.count
var saturationMatrix = [Int16](repeating: 0, count: matrixSize)
for i: Int in 0 ..< matrixSize {
saturationMatrix[i] = Int16(round(floatingPointSaturationMatrix[i] * divisor))
}
if hasBlur {
vImageMatrixMultiply_ARGB8888(&effectOutBuffer, &effectInBuffer, saturationMatrix, Int32(divisor), nil, nil, vImage_Flags(kvImageNoFlags))
effectImageBuffersAreSwapped = true
} else {
vImageMatrixMultiply_ARGB8888(&effectInBuffer, &effectOutBuffer, saturationMatrix, Int32(divisor), nil, nil, vImage_Flags(kvImageNoFlags))
}
}
if !effectImageBuffersAreSwapped {
effectImage = UIGraphicsGetImageFromCurrentImageContext()!
}
UIGraphicsEndImageContext()
if effectImageBuffersAreSwapped {
effectImage = UIGraphicsGetImageFromCurrentImageContext()!
}
UIGraphicsEndImageContext()
}
// Set up output context.
UIGraphicsBeginImageContextWithOptions(size, false, screenScale)
guard let outputContext = UIGraphicsGetCurrentContext() else { return nil }
outputContext.scaleBy(x: 1.0, y: -1.0)
outputContext.translateBy(x: 0, y: -size.height)
// Draw base image.
outputContext.draw(cgImage, in: imageRect)
// Draw effect image.
if hasBlur {
outputContext.saveGState()
if let maskCGImage = maskImage?.cgImage {
outputContext.clip(to: imageRect, mask: maskCGImage);
}
outputContext.draw(effectImage.cgImage!, in: imageRect)
outputContext.restoreGState()
}
// Add in color tint.
if let color = tintColor {
outputContext.saveGState()
outputContext.setFillColor(color.cgColor)
outputContext.fill(imageRect)
outputContext.restoreGState()
}
// Output image is ready.
let outputImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return outputImage
}
public func blurImage()->UIImage?{
return self.applyBlurWithRadius(20, tintColor: UIColor().withAlphaComponent(0), saturationDeltaFactor: 1.4)
}
}
This has been around a while so I thought I would share my solution. I stole the CABackdropLayer from a UIVisualEffectView to achieve a live blur. You can init a layer of this type but it is private. However since a public view uses this layer and I am just taking it from that view I am not having to access a private api in a super direct way.

Swift - Creating a connection between two labels or button using blue drag line like in Xcode

I have managed to create the blue line like in Xcode but how would you recognise when the end point of the line is dragged and released above the other labels or buttons so a connection can be made?
Code
class ViewController: UIViewController {
#IBOutlet var label: UILabel!
#IBOutlet var label2: UILabel!
private lazy var lineShape: CAShapeLayer = {
var color = hexStringToUIColor(hex: "#5DBCD2")
let lineShape = CAShapeLayer()
lineShape.strokeColor = UIColor.blue.cgColor
lineShape.fillColor = color.cgColor
lineShape.lineWidth = 2.0
return lineShape
}()
private var panGestureStartPoint: CGPoint = .zero
private lazy var panRecognizer: UIPanGestureRecognizer = {
return UIPanGestureRecognizer(target: self, action: #selector(panGestureCalled(_:)))
}()
override func viewDidLoad() {
super.viewDidLoad()
self.label.addGestureRecognizer(panRecognizer)
}
#objc func panGestureCalled(_: UIPanGestureRecognizer) {
let currentPanPoint = panRecognizer.location(in: self.view)
switch panRecognizer.state {
case .began:
panGestureStartPoint = currentPanPoint
self.view.layer.addSublayer(lineShape)
case .changed:
let linePath = UIBezierPath()
linePath.move(to: panGestureStartPoint)
linePath.addLine(to: currentPanPoint)
lineShape.path = linePath.cgPath
lineShape.path = CGPath.barbell(from: panGestureStartPoint, to: currentPanPoint, barThickness: 2.0, bellRadius: 6.0)
case .ended:
lineShape.path = nil
lineShape.removeFromSuperlayer()
default: break
}
}
func hexStringToUIColor (hex:String) -> UIColor {
var cString:String = hex.trimmingCharacters(in: .whitespacesAndNewlines).uppercased()
if (cString.hasPrefix("#")) {
cString.remove(at: cString.startIndex)
}
if ((cString.count) != 6) {
return UIColor.gray
}
var rgbValue:UInt32 = 0
Scanner(string: cString).scanHexInt32(&rgbValue)
return UIColor(
red: CGFloat((rgbValue & 0xFF0000) >> 16) / 255.0,
green: CGFloat((rgbValue & 0x00FF00) >> 8) / 255.0,
blue: CGFloat(rgbValue & 0x0000FF) / 255.0,
alpha: CGFloat(1.0)
)
}
}
extension CGPath {
class func barbell(from start: CGPoint, to end: CGPoint, barThickness proposedBarThickness: CGFloat, bellRadius proposedBellRadius: CGFloat) -> CGPath {
let barThickness = max(0, proposedBarThickness)
let bellRadius = max(barThickness / 2, proposedBellRadius)
let vector = CGPoint(x: end.x - start.x, y: end.y - start.y)
let length = hypot(vector.x, vector.y)
if length == 0 {
return CGPath(ellipseIn: CGRect(origin: start, size: .zero).insetBy(dx: -bellRadius, dy: -bellRadius), transform: nil)
}
var yOffset = barThickness / 2
var xOffset = sqrt(bellRadius * bellRadius - yOffset * yOffset)
let halfLength = length / 2
if xOffset > halfLength {
xOffset = halfLength
yOffset = sqrt(bellRadius * bellRadius - xOffset * xOffset)
}
let jointRadians = asin(yOffset / bellRadius)
let path = CGMutablePath()
path.addArc(center: .zero, radius: bellRadius, startAngle: jointRadians, endAngle: -jointRadians, clockwise: false)
path.addArc(center: CGPoint(x: length, y: 0), radius: bellRadius, startAngle: .pi + jointRadians, endAngle: .pi - jointRadians, clockwise: false)
path.closeSubpath()
let unitVector = CGPoint(x: vector.x / length, y: vector.y / length)
var transform = CGAffineTransform(a: unitVector.x, b: unitVector.y, c: -unitVector.y, d: unitVector.x, tx: start.x, ty: start.y)
return path.copy(using: &transform)!
}
}
The code above replicates the gif animation. I used the questions below to create it if that helps.
Draw a line that can stretch like the Xcode assistant editor in Swift
How to reproduce this Xcode blue drag line
I think the answer is in the above question but I can't get my head around it as it's for MacOS and not iOS.
Any help is much appreciated
UPDATE
From the really good answers given, I added the code below in AddedPanGesture(). This isn't the finish version of the code.
case .changed:
let linePath = UIBezierPath()
linePath.move(to: panGestureStartPoint)
linePath.addLine(to: currentPanPoint)
lineShape.path = linePath.cgPath
lineShape.path = CGPath.barbell(from: panGestureStartPoint, to: currentPanPoint, barThickness: 2.0, bellRadius: 6.0)
let labels = [label2, label3]
for labelPoint in labels {
let point = panRecognizer.location(in: labelPoint)
if labelPoint!.layer.contains(point){
labelPoint!.layer.borderWidth = 10
labelPoint!.layer.borderColor = UIColor.green.cgColor
} else {
labelPoint!.layer.borderWidth = 0
labelPoint!.layer.borderColor = UIColor.clear.cgColor
}
}
case .ended:
let labels = [label2, label3]
for labelPoint in labels {
let point = panRecognizer.location(in: labelPoint)
if labelPoint!.layer.contains(point){
labelPoint!.layer.borderWidth = 2
labelPoint!.layer.borderColor = UIColor.green.cgColor
} else {
lineShape.path = nil
lineShape.removeFromSuperlayer()
labelPoint!.layer.borderWidth = 0
labelPoint!.layer.borderColor = UIColor.clear.cgColor
}
}
If there is a better way I would love the input.
Thanks!
CGRect implements a contains(_:) method that lets you tell if a rect contains a point.
You need to add logic that makes sure your starting drag point is inside a button, and that your ending point is inside a different button, using the frames of those buttons as the rects you interrogate. If you find that the user has dragged from one button to the next then your .ended handler (as mentioned by M Abubaker Majeed in his/her comment) needs to detect that and add a permanent link somehow. How you do that is up to you. You could use a separate shape layer to hold the lines that you want to persist, and an array of starting/ending points of the connecting lines, or various other approaches.
I just update the code for your convenience.
#objc func panGestureCalled(_: UIPanGestureRecognizer) {
let currentPanPoint = panRecognizer.location(in: self.view)
switch panRecognizer.state {
case .began:
panGestureStartPoint = currentPanPoint
self.view.layer.addSublayer(lineShape)
case .changed:
let linePath = UIBezierPath()
linePath.move(to: panGestureStartPoint)
linePath.addLine(to: currentPanPoint)
lineShape.path = linePath.cgPath
lineShape.path = CGPath.barbell(from: panGestureStartPoint, to: currentPanPoint, barThickness: 2.0, bellRadius: 6.0)
label2.layer.borderWidth = hasDestine(panRecognizer) ? 10 : 0
label2.layer.borderColor = hasDestine(panRecognizer) ? UIColor.green.cgColor : UIColor.clear.cgColor
case .ended:
if (!hasDestine(panRecognizer)){
lineShape.path = nil
lineShape.removeFromSuperlayer()}
label2.layer.borderWidth = 0
default: break
}
}
func hasDestine(_ panRecognizer: UIPanGestureRecognizer)-> Bool {
let point = panRecognizer.location(in: label2)
return label2.layer.contains(point)
}

iOS memory warning in Swift

I user a timer firing a function repeat per 0.1 second to get a UIImage using UIGraphicsGetImageFromCurrentImageContext and assign to the a #IBOutllet weak UImageView.image.(coverImageView)
var timer:Timer?
func fireTimer() {
timer = Timer.scheduledTimer(withTimeInterval: 0.1, repeats: true, block: { [weak self] _ in
self?.lightTheFier()
})
}
func invalidateTimer() {
timer?.invalidate()
timer = nil
}
func lightTheFier() {
var points = [CGPoint]()
for pin in pins {
let point = mapView.convert(pin.coordinate, toPointTo: coverImageView)
points.append(point)
}
coverImageView.image = getMaskedImage(points: points, zoomLevel: Int(mapView.zoomLevel()))
}
func getOringinalImageFromPath() -> UIImage{
UIGraphicsBeginImageContextWithOptions(coverImageView.bounds.size, false, 0.0)
let path = UIBezierPath(rect: coverImageView.bounds)
UIColor.black.setFill()
path.fill()
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image!
}
func getMaskImageFromPath(points:[CGPoint], scale:CGFloat) -> UIImage {
UIGraphicsBeginImageContextWithOptions(coverImageView.bounds.size, false, 0.0)
let radius:CGFloat = 10.0
UIColor.blue.setFill()
for point in points {
let rect = CGRect(x: point.x - (scale*radius/2), y: point.y - (scale*radius/2), width: scale*radius, height: scale*radius)
let path = UIBezierPath(ovalIn: rect)
path.fill()
}
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image!
}
func getMaskedImage(points:[CGPoint], zoomLevel:Int) -> UIImage{
let orImage = getOringinalImageFromPath().cgImage
let maskImage = getMaskImageFromPath(points: points, scale: CGFloat(zoomLevel)).cgImage
let mask = CGImage(maskWidth: maskImage!.width, height: maskImage!.height, bitsPerComponent: maskImage!.bitsPerComponent, bitsPerPixel: maskImage!.bitsPerPixel, bytesPerRow: maskImage!.bytesPerRow, provider: maskImage!.dataProvider!, decode: nil, shouldInterpolate: true)
let maskRef = orImage?.masking(mask!)
let image = UIImage(cgImage: maskRef!)
return image
}
All the code above are in a ViewController.swift for test.
It's fine that the memory is increasing during the function is running, what I want is it can release the memory when I invalidate the timer.
How should I do to resolve this issue?
You're starting an image context: UIGraphicsBeginImageContextWithOptions, but you're never ending it. You need to add UIGraphicsEndImageContext() to your getMaskImageFromPath and getOringinalImageFromPath methods:
func getMaskImageFromPath(points:[CGPoint], scale:CGFloat) -> UIImage {
UIGraphicsBeginImageContextWithOptions(coverImageView.bounds.size, false, 0.0)
let radius:CGFloat = 10.0
UIColor.blue.setFill()
for point in points {
let rect = CGRect(x: point.x - (scale*radius/2), y: point.y - (scale*radius/2), width: scale*radius, height: scale*radius)
let path = UIBezierPath(ovalIn: rect)
path.fill()
}
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext() // Missing this line
return image!
}

IOS Save image with name

i have an app that takes a picture and merges it with another image. I want to be able to display al the images taken in the app (and only my app) and display them in a gallery view. the problem I am having is in assigning the image a name.
I have tried a number of different ways
First Attempt
UIImageWriteToSavedPhotosAlbum(newImage, nil, nil, nil)
this saved the image but i do not know how i can retrieve it
Second Attempt
PHPhotoLibrary.sharedPhotoLibrary().performChanges({
let assetRequest = PHAssetChangeRequest.creationRequestForAssetFromImage(newImage)
}, completionHandler: { (success, error) -> Void in
if success {
//image saved to photos library.
print("image saved")
}
});
Same Problem as first attempt
Third Attempt
let date :NSDate = NSDate()
let dateFormatter = NSDateFormatter()
dateFormatter.dateFormat = "yyyy-MM-dd'_'HH:mm:ss"
dateFormatter.timeZone = NSTimeZone(name: "GMT")
let imageName = "\(dateFormatter.stringFromDate(date)).jpg"
//var paths: [AnyObject] = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, true)
var documentsDirectoryPath = getDocumentsURL().relativePath
documentsDirectoryPath! += imageName
let settingsData: NSData = UIImageJPEGRepresentation(newImage, 1.0)!
settingsData.writeToFile(documentsDirectoryPath!, atomically: true)
This seems to have the most promise but the file does bot save
I have looked at various answers on here but I still cant identify a solution
Add a slash as first character of the imageName string like this:
let imageName = "/\(dateFormatter.stringFromDate(date)).jpg"
otherwise the path would be "..../Documentsmyimagename" but it should be "..../Documents/myimagename"
i.e. without the additional slash the image is saved to the folder above "Documents"
I also recomend not to use the colon character in filenames, on a mac the colon is not allowed and is replaced automatically.
This is the code I tested in a new XCode project and that worked for me (I put a file "myimage.png" in the root folder of the project):
let newImage = UIImage.init(named: "myimage")
let date :NSDate = NSDate()
let dateFormatter = NSDateFormatter()
//dateFormatter.dateFormat = "yyyy-MM-dd'_'HH:mm:ss"
dateFormatter.dateFormat = "yyyy-MM-dd'_'HH_mm_ss"
dateFormatter.timeZone = NSTimeZone(name: "GMT")
let imageName = "/\(dateFormatter.stringFromDate(date)).jpg"
print(imageName)
//var paths: [AnyObject] = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, true)
//var documentsDirectoryPath = getDocumentsURL().relativePath
var documentsDirectoryPath = NSSearchPathForDirectoriesInDomains(.DocumentDirectory, .UserDomainMask, true)[0]
documentsDirectoryPath += imageName
print(documentsDirectoryPath)
let settingsData: NSData = UIImageJPEGRepresentation(newImage!, 1.0)!
settingsData.writeToFile(documentsDirectoryPath, atomically: true)
SWIFT 3 :)
import UIKit
class ShowImageViewController: UIViewController {
#IBOutlet weak var mainImageView: UIImageView!
#IBOutlet weak var tempImageView: UIImageView!
private var lastPoint = CGPoint.zero
private var red: CGFloat = 0.0
private var green: CGFloat = 0.0
private var blue: CGFloat = 0.0
private var brushWidth: CGFloat = 10.0
private var opacity: CGFloat = 1.0
private var swiped = false
override func viewDidLoad() {
super.viewDidLoad()
mainImageView.image = captureImages[selectedImageindex]
tempImageView.frame = getImageRect(for: mainImageView)
// Do any additional setup after loading the view.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
swiped = false
if let touch = touches.first {
lastPoint = touch.location(in: tempImageView)
}
}
override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
swiped = true
if let touch = touches.first {
let currentPoint = touch.location(in: tempImageView)
drawLineFrom(fromPoint: lastPoint, toPoint: currentPoint)
lastPoint = currentPoint
}
}
override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
if !swiped {
drawLineFrom(fromPoint: lastPoint, toPoint: lastPoint)
}
// Merge tempImageView into mainImageView
UIGraphicsBeginImageContext(mainImageView.frame.size)
mainImageView.image?.draw(in: CGRect(x: 0, y: 0, width: mainImageView.frame.size.width, height: mainImageView.frame.size.height), blendMode: .normal, alpha: 1.0)
tempImageView.image?.draw(in: CGRect(x: 0, y: 0, width: mainImageView.frame.size.width, height: mainImageView.frame.size.height), blendMode: .normal, alpha: opacity)
mainImageView.image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
tempImageView.image = nil
}
func drawLineFrom(fromPoint: CGPoint, toPoint: CGPoint) {
UIGraphicsBeginImageContext(mainImageView.frame.size)
let context = UIGraphicsGetCurrentContext()
tempImageView.image?.draw(in: CGRect(x: 0, y: 0, width: mainImageView.frame.size.width, height: mainImageView.frame.size.height))
context!.move(to: CGPoint(x: fromPoint.x, y: fromPoint.y))
context!.addLine(to: CGPoint(x: toPoint.x, y: toPoint.y))
context!.setLineCap(.round)
context!.setLineWidth(brushWidth)
context!.setStrokeColor(red: red, green: green, blue: blue, alpha: 1.0)
context!.setBlendMode(.normal)
context!.strokePath()
tempImageView.image = UIGraphicsGetImageFromCurrentImageContext()
tempImageView.alpha = opacity
UIGraphicsEndImageContext()
}
func getImageRect(for imageView: UIImageView) -> CGRect {
let resVi = (imageView.image?.size.width)! / (imageView.image?.size.height)!
let resPl = imageView.frame.size.width / imageView.frame.size.height
if resPl > resVi {
let imageSize = CGSize(width: CGFloat((imageView.image?.size.width)! * imageView.frame.size.height / (imageView.image?.size.height)!), height: CGFloat(imageView.frame.size.height))
return CGRect(x: CGFloat((imageView.frame.size.width - imageSize.width) / 2), y: CGFloat((imageView.frame.size.height - imageSize.height) / 2), width: CGFloat(imageSize.width), height: CGFloat(imageSize.height))
}
else {
let imageSize = CGSize(width: CGFloat(imageView.frame.size.width), height: CGFloat((imageView.image?.size.height)! * imageView.frame.size.width / (imageView.image?.size.width)!))
return CGRect(x: CGFloat((imageView.frame.size.width - imageSize.width) / 2), y: CGFloat((imageView.frame.size.height - imageSize.height) / 2), width: CGFloat(imageSize.width), height: CGFloat(imageSize.height))
}
}
}

Resources