I was recently attempting to convert the code from here into Swift. However, I keep getting a white color, no matter the image. Here's my code:
// Playground - noun: a place where people can play
import UIKit
extension UIImage {
func averageColor() -> UIColor {
var colorSpace = CGColorSpaceCreateDeviceRGB()
var rgba: [CGFloat] = [0,0,0,0]
var context = CGBitmapContextCreate(&rgba, 1, 1, 8, 4, colorSpace, CGBitmapInfo.fromRaw(CGImageAlphaInfo.PremultipliedLast.toRaw())!)
rgba
CGContextDrawImage(context, CGRectMake(0, 0, 1, 1), self.CGImage)
if rgba[3] > 0 {
var alpha = rgba[3] / 255
var multiplier = alpha / 255
return UIColor(red: rgba[0] * multiplier, green: rgba[1] * multiplier, blue: rgba[2] * multiplier, alpha: alpha)
} else {
return UIColor(red: rgba[0] / 255, green: rgba[1] / 255, blue: rgba[2] / 255, alpha: rgba[3] / 255)
}
}
}
var img = UIImage(data: NSData(contentsOfURL: NSURL(string: "http://upload.wikimedia.org/wikipedia/commons/c/c3/Aurora_as_seen_by_IMAGE.PNG")))
img.averageColor()
Thanks in advance.
CoreImage in iOS 9: use the CIAreaAverage filter and pass the extent of your entire image to be averaged.
Plus, it's much faster since it'll either be running on the GPU or as a highly-optimized CPU CIKernel.
import UIKit
extension UIImage {
func areaAverage() -> UIColor {
var bitmap = [UInt8](count: 4, repeatedValue: 0)
if #available(iOS 9.0, *) {
// Get average color.
let context = CIContext()
let inputImage = CIImage ?? CoreImage.CIImage(CGImage: CGImage!)
let extent = inputImage.extent
let inputExtent = CIVector(x: extent.origin.x, y: extent.origin.y, z: extent.size.width, w: extent.size.height)
let filter = CIFilter(name: "CIAreaAverage", withInputParameters: [kCIInputImageKey: inputImage, kCIInputExtentKey: inputExtent])!
let outputImage = filter.outputImage!
let outputExtent = outputImage.extent
assert(outputExtent.size.width == 1 && outputExtent.size.height == 1)
// Render to bitmap.
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: kCIFormatRGBA8, colorSpace: CGColorSpaceCreateDeviceRGB())
} else {
// Create 1x1 context that interpolates pixels when drawing to it.
let context = CGBitmapContextCreate(&bitmap, 1, 1, 8, 4, CGColorSpaceCreateDeviceRGB(), CGBitmapInfo.ByteOrderDefault.rawValue | CGImageAlphaInfo.PremultipliedLast.rawValue)!
let inputImage = CGImage ?? CIContext().createCGImage(CIImage!, fromRect: CIImage!.extent)
// Render to bitmap.
CGContextDrawImage(context, CGRect(x: 0, y: 0, width: 1, height: 1), inputImage)
}
// Compute result.
let result = UIColor(red: CGFloat(bitmap[0]) / 255.0, green: CGFloat(bitmap[1]) / 255.0, blue: CGFloat(bitmap[2]) / 255.0, alpha: CGFloat(bitmap[3]) / 255.0)
return result
}
}
Swift 3
func areaAverage() -> UIColor {
var bitmap = [UInt8](repeating: 0, count: 4)
if #available(iOS 9.0, *) {
// Get average color.
let context = CIContext()
let inputImage: CIImage = ciImage ?? CoreImage.CIImage(cgImage: cgImage!)
let extent = inputImage.extent
let inputExtent = CIVector(x: extent.origin.x, y: extent.origin.y, z: extent.size.width, w: extent.size.height)
let filter = CIFilter(name: "CIAreaAverage", withInputParameters: [kCIInputImageKey: inputImage, kCIInputExtentKey: inputExtent])!
let outputImage = filter.outputImage!
let outputExtent = outputImage.extent
assert(outputExtent.size.width == 1 && outputExtent.size.height == 1)
// Render to bitmap.
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: kCIFormatRGBA8, colorSpace: CGColorSpaceCreateDeviceRGB())
} else {
// Create 1x1 context that interpolates pixels when drawing to it.
let context = CGContext(data: &bitmap, width: 1, height: 1, bitsPerComponent: 8, bytesPerRow: 4, space: CGColorSpaceCreateDeviceRGB(), bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue)!
let inputImage = cgImage ?? CIContext().createCGImage(ciImage!, from: ciImage!.extent)
// Render to bitmap.
context.draw(inputImage!, in: CGRect(x: 0, y: 0, width: 1, height: 1))
}
// Compute result.
let result = UIColor(red: CGFloat(bitmap[0]) / 255.0, green: CGFloat(bitmap[1]) / 255.0, blue: CGFloat(bitmap[2]) / 255.0, alpha: CGFloat(bitmap[3]) / 255.0)
return result
}
Here's a solution:
func averageColor() -> UIColor {
let rgba = UnsafeMutablePointer<CUnsignedChar>.alloc(4)
let colorSpace: CGColorSpaceRef = CGColorSpaceCreateDeviceRGB()
let info = CGBitmapInfo(CGImageAlphaInfo.PremultipliedLast.rawValue)
let context: CGContextRef = CGBitmapContextCreate(rgba, 1, 1, 8, 4, colorSpace, info)
CGContextDrawImage(context, CGRectMake(0, 0, 1, 1), self.CGImage)
if rgba[3] > 0 {
let alpha: CGFloat = CGFloat(rgba[3]) / 255.0
let multiplier: CGFloat = alpha / 255.0
return UIColor(red: CGFloat(rgba[0]) * multiplier, green: CGFloat(rgba[1]) * multiplier, blue: CGFloat(rgba[2]) * multiplier, alpha: alpha)
} else {
return UIColor(red: CGFloat(rgba[0]) / 255.0, green: CGFloat(rgba[1]) / 255.0, blue: CGFloat(rgba[2]) / 255.0, alpha: CGFloat(rgba[3]) / 255.0)
}
}
Swift 3:
func areaAverage() -> UIColor {
var bitmap = [UInt8](repeating: 0, count: 4)
let context = CIContext(options: nil)
let cgImg = context.createCGImage(CoreImage.CIImage(cgImage: self.cgImage!), from: CoreImage.CIImage(cgImage: self.cgImage!).extent)
let inputImage = CIImage(cgImage: cgImg!)
let extent = inputImage.extent
let inputExtent = CIVector(x: extent.origin.x, y: extent.origin.y, z: extent.size.width, w: extent.size.height)
let filter = CIFilter(name: "CIAreaAverage", withInputParameters: [kCIInputImageKey: inputImage, kCIInputExtentKey: inputExtent])!
let outputImage = filter.outputImage!
let outputExtent = outputImage.extent
assert(outputExtent.size.width == 1 && outputExtent.size.height == 1)
// Render to bitmap.
context.render(outputImage, toBitmap: &bitmap, rowBytes: 4, bounds: CGRect(x: 0, y: 0, width: 1, height: 1), format: kCIFormatRGBA8, colorSpace: CGColorSpaceCreateDeviceRGB())
// Compute result.
let result = UIColor(red: CGFloat(bitmap[0]) / 255.0, green: CGFloat(bitmap[1]) / 255.0, blue: CGFloat(bitmap[2]) / 255.0, alpha: CGFloat(bitmap[3]) / 255.0)
return result
}
Are you setting up your context correctly? If I look at the documentation for the CGBitmapContext Reference:
https://developer.apple.com/library/ios/documentation/graphicsimaging/Reference/CGBitmapContext/index.html#//apple_ref/c/func/CGBitmapContextCreate
it looks like you are only allocating enough memory for the image that would be contained in the CGFloat array. It also looks like you are telling the compiler that your image is only going to be one pixel by one pixel.
It looks like that size is also being confirmed as one pixel by one pixel when you are setting your CGRect in CGContextDrawImage.
If the Playground is only creating an image one pixel by one pixel, that would explain why you are only seeing a white screen.
Related
How do I get the average color of the non-transparent parts of an image?
I get that you can get the average color of an image using the block of code below, but it also factors in the transparent parts to the calculation.
var averageColor: UIColor? {
guard let inputImage = CIImage(image: self) else { return nil }
let extentVector = CIVector (
x: inputImage.extent.origin.x,
y: inputImage.extent.origin.y,
z: inputImage.extent.size.width,
w: inputImage.extent.size.height)
guard let filter = CIFilter (
name: "CIAreaAverage",
withInputParameters: [
kCIInputImageKey: inputImage,
kCIInputExtentKey: extentVector
]) else { return nil }
guard let outputImage = filter.outputImage else { return nil }
var bitmap = [UInt8](repeating: 0, count: 4)
let context = CIContext(options: [kCIContextWorkingColorSpace: kCFNull])
context.render (
outputImage,
toBitmap: &bitmap,
rowBytes: 4,
bounds: CGRect(x: 0, y: 0, width: 1, height: 1),
format: kCIFormatRGBA8,
colorSpace: nil
)
return UIColor (
red: CGFloat(bitmap[0]) / 255,
green: CGFloat(bitmap[1]) / 255,
blue: CGFloat(bitmap[2]) / 255,
alpha: CGFloat(bitmap[3]) / 255
)
}
I know it is possible to change color of pixel using pixel buffer, like in the below code, but I just want to blur a pixel using 'CIFilter' rather than changing color. I don't want to apply a 'CIFilter' on whole image.
//data pointer – stores an array of the pixel components. For example (r0, b0, g0, a0, r1, g1, b1, a1 .... rn, gn, bn, an)
let data : UnsafeMutablePointer<UInt8> = calloc(bytesPerRow, height)!.assumingMemoryBound(to: UInt8.self)
//get the index of the pixel (4 components times the x position plus the y position times the row width)
let pixelIndex = 4 * (location.x + (location.y * width))
//set the pixel components to the color components
data[pixelIndex] = red
data[pixelIndex+1] = green
data[pixelIndex+2] = blue
data[pixelIndex+3] = alpha
Also can we use below code for applying CIFilter on Pixel?
if let pixelData = self.cgImage?.dataProvider?.data {
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelInfo: Int = ((Int(self.size.width) * Int(pos.y)) + Int(pos.x)) * 4
let red = CGFloat(data[pixelInfo]) / CGFloat(255.0)
let green = CGFloat(data[pixelInfo+1]) / CGFloat(255.0)
let blue = CGFloat(data[pixelInfo+2]) / CGFloat(255.0)
let alpha = CGFloat(data[pixelInfo+3]) / CGFloat(255.0)
}
Check out the CIBlendWithMask filter. It allows you to create a mask of any shape (even a single pixel) and use that to blend the input with another input. If you make inputBackgroundImage be the original image, and make inputImage be the original image with your desired filter applied, the inputImageMask is an all-black image with only the single pixel you want to change in white.
I typed this up pretty quick without testing code - could be a few errors. I have done something very similar recently, so shouldn't be too off. I'd like to know whatcha get and if this doesn't work, I bet it's close.
/*
Implementations
Notes:
- I don't know what kind of `CIFilter` blur you'd like to apply, so I'm just using one from here
- https://developer.apple.com/library/archive/documentation/GraphicsImaging/Reference/CoreImageFilterReference/#//apple_ref/doc/filter/ci/CIBoxBlur
*/
//Normal Image
let inputImage:UIImage = originalImage
//Blurred Image of only the BLURRED PIXELS -- we change the rest of the pixels to clear -- thus we can use this as the backgroundImage and the maskedImage
let unblurredImage = getBackgroundImage()
let filter = CIFilter(name: "CIBoxBlur")
filter?.setValue(unblurredImage, kCIInputImageKey)
let blurredImage = filter?.outputImage
//Now we can blend the 2 images
let blendFilter = CIFilter(name: "CIBlendWithAlphaMask")
blendFilter?.setValue(CIImage(image: inputImage), kCIInputImageKey)
blendFilter?.setValue(blurredImage, "inputBackgroundImage")
blendFilter?.setValue(blurredImage, "inputMaskImage")
let finalCIImage = blendFilter?.outputImage
let finalImage = UIImage(ciImage: finalCIImage)
/*
Functions used in the process
*/
//Create an image of only the pixels we want to blur
func getBackgroundImage(ciimage: CIImage) -> UIImage {
let inputCGImage = ciimage.convertCIImageToCGImage()!
let colorSpace = CGColorSpaceCreateDeviceRGB()
let width = inputCGImage.width
let height = inputCGImage.height
let bytesPerPixel = 4
let bitsPerComponent = 8
let bytesPerRow = bytesPerPixel * width
let bitmapInfo = RGBA32.bitmapInfo
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else {
print("Couldn't create CGContext")
return nil
}
context.draw(inputCGImage, in: CGRect(x: 0, y: 0, width: width, height: height))
let pixelBuffer = buffer.bindMemory(to: RGBA32.self, capacity: width * height)
for row in 0 ..< Int(height) {
for column in 0 ..< Int(width) {
let offset = row * width + column
/*
You need to define aPixelIWantBlurred however you desire
Also, we don't edit the pixels we want to blur - we edit the other pixels to a transparent value. This allows us to use this as the background image and the masked image
*/
if pixelBuffer[offset] != aPixelIWantBlurred {
pixelBuffer[offset] = RGBA32.init(red: 0, green: 0, blue: 0, alpha: 0)
}
}
}
let outputCGImage = context.makeImage()!
let outputImage = UIImage(cgImage: outputCGImage, scale: image.scale, orientation: image.imageOrientation)
return outputImage
}
extension CIImage {
func convertCIImageToCGImage() -> CGImage! {
let context = CIContext(options: nil)
return context.createCGImage(self, from: self.extent)
}
}
struct RGBA32: Equatable {
private var color: UInt32
var redComponent: UInt8 {
return UInt8((color >> 24) & 255)
}
var greenComponent: UInt8 {
return UInt8((color >> 16) & 255)
}
var blueComponent: UInt8 {
return UInt8((color >> 8) & 255)
}
var alphaComponent: UInt8 {
return UInt8((color >> 0) & 255)
}
init(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) {
let red = UInt32(red)
let green = UInt32(green)
let blue = UInt32(blue)
let alpha = UInt32(alpha)
color = (red << 24) | (green << 16) | (blue << 8) | (alpha << 0)
}
static let red = RGBA32(red: 255, green: 0, blue: 0, alpha: 255)
static let green = RGBA32(red: 0, green: 255, blue: 0, alpha: 255)
static let blue = RGBA32(red: 0, green: 0, blue: 255, alpha: 255)
static let white = RGBA32(red: 255, green: 255, blue: 255, alpha: 255)
static let black = RGBA32(red: 0, green: 0, blue: 0, alpha: 255)
static let magenta = RGBA32(red: 255, green: 0, blue: 255, alpha: 255)
static let yellow = RGBA32(red: 255, green: 255, blue: 0, alpha: 255)
static let cyan = RGBA32(red: 0, green: 255, blue: 255, alpha: 255)
static let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
static func ==(lhs: RGBA32, rhs: RGBA32) -> Bool {
return lhs.color == rhs.color
}
}
I wants to find the pixel colors in the middle of the screen.
extension UIImage {
func getPixelColor(atLocation location: CGPoint, withFrameSize size: CGSize) -> UIColor {
let pixelPlace: CGPoint = CGPoint(x: location.x, y: location.y)
let pixelData = self.cgImage!.dataProvider!.data
let data: UnsafePointer<UInt8> = CFDataGetBytePtr(pixelData)
let pixelIndex: Int = ((Int(self.size.width) * Int(pixelPlace.y)) + Int(pixelPlace.x)) * 4
let r = CGFloat(data[pixelIndex])// / CGFloat(255.0)
let g = CGFloat(data[pixelIndex+1])// / CGFloat(255.0)
let b = CGFloat(data[pixelIndex+2])// / CGFloat(255.0)
let a = CGFloat(data[pixelIndex+3])// / CGFloat(255.0)
return UIColor(red: r, green: g, blue: b, alpha: a)
}
}
Then I print the color with this.
let place = CGPoint(x: self.view.center.x , y: self.view.center.y)
print(yourImageView.image!.getPixelColor(atLocation: place, withFrameSize: self.view.frame.size))
How do I get the center pixel's color? I keep getting other color values than what is in the center. The color detection is functional so I don't believe the problem comes from that.
What I changed (same problem still)
extension UIView {
func getColor(at point: CGPoint) -> UIColor{
let pixel = UnsafeMutablePointer<CUnsignedChar>.allocate(capacity: 4)
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedLast.rawValue)
let context = CGContext(data: pixel, width: 1, height: 1, bitsPerComponent: 8, bytesPerRow: 4, space: colorSpace, bitmapInfo: bitmapInfo.rawValue)!
context.translateBy(x: -point.x, y: -point.y)
self.layer.render(in: context)
let color = UIColor(red: CGFloat(pixel[0]),
green: CGFloat(pixel[1]),
blue: CGFloat(pixel[2]),
alpha: CGFloat(pixel[3]))
pixel.deallocate(capacity: 4)
return color
}
}
And I access it this way
let loc = CGPoint(x: yourImageView.center.x, y: yourImageView.center.y)
var color = yourImageView.getColor(at: loc)
print(color)
For a given multi-color PNG UIImage (with transparency), what is the best/Swift-idiomatic way to:
create a duplicate UIImage
find all black pixels in the copy and change them to red
(return the modified copy)
There are a few related questions on SO but I haven't been able to find something that works.
You have to extract the pixel buffer of the image, at which point you can loop through, changing pixels as you see fit. At the end, create a new image from the buffer.
In Swift 3, this looks like:
func processPixels(in image: UIImage) -> UIImage? {
guard let inputCGImage = image.cgImage else {
print("unable to get cgImage")
return nil
}
let colorSpace = CGColorSpaceCreateDeviceRGB()
let width = inputCGImage.width
let height = inputCGImage.height
let bytesPerPixel = 4
let bitsPerComponent = 8
let bytesPerRow = bytesPerPixel * width
let bitmapInfo = RGBA32.bitmapInfo
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else {
print("unable to create context")
return nil
}
context.draw(inputCGImage, in: CGRect(x: 0, y: 0, width: width, height: height))
guard let buffer = context.data else {
print("unable to get context data")
return nil
}
let pixelBuffer = buffer.bindMemory(to: RGBA32.self, capacity: width * height)
for row in 0 ..< Int(height) {
for column in 0 ..< Int(width) {
let offset = row * width + column
if pixelBuffer[offset] == .black {
pixelBuffer[offset] = .red
}
}
}
let outputCGImage = context.makeImage()!
let outputImage = UIImage(cgImage: outputCGImage, scale: image.scale, orientation: image.imageOrientation)
return outputImage
}
struct RGBA32: Equatable {
private var color: UInt32
var redComponent: UInt8 {
return UInt8((color >> 24) & 255)
}
var greenComponent: UInt8 {
return UInt8((color >> 16) & 255)
}
var blueComponent: UInt8 {
return UInt8((color >> 8) & 255)
}
var alphaComponent: UInt8 {
return UInt8((color >> 0) & 255)
}
init(red: UInt8, green: UInt8, blue: UInt8, alpha: UInt8) {
let red = UInt32(red)
let green = UInt32(green)
let blue = UInt32(blue)
let alpha = UInt32(alpha)
color = (red << 24) | (green << 16) | (blue << 8) | (alpha << 0)
}
static let red = RGBA32(red: 255, green: 0, blue: 0, alpha: 255)
static let green = RGBA32(red: 0, green: 255, blue: 0, alpha: 255)
static let blue = RGBA32(red: 0, green: 0, blue: 255, alpha: 255)
static let white = RGBA32(red: 255, green: 255, blue: 255, alpha: 255)
static let black = RGBA32(red: 0, green: 0, blue: 0, alpha: 255)
static let magenta = RGBA32(red: 255, green: 0, blue: 255, alpha: 255)
static let yellow = RGBA32(red: 255, green: 255, blue: 0, alpha: 255)
static let cyan = RGBA32(red: 0, green: 255, blue: 255, alpha: 255)
static let bitmapInfo = CGImageAlphaInfo.premultipliedLast.rawValue | CGBitmapInfo.byteOrder32Little.rawValue
static func ==(lhs: RGBA32, rhs: RGBA32) -> Bool {
return lhs.color == rhs.color
}
}
For Swift 2 rendition, see previous revision of this answer.
For getting better result we can search color range in image pixels, referring to #Rob answer I made update and now the result is better.
func processByPixel(in image: UIImage) -> UIImage? {
guard let inputCGImage = image.cgImage else { print("unable to get cgImage"); return nil }
let colorSpace = CGColorSpaceCreateDeviceRGB()
let width = inputCGImage.width
let height = inputCGImage.height
let bytesPerPixel = 4
let bitsPerComponent = 8
let bytesPerRow = bytesPerPixel * width
let bitmapInfo = RGBA32.bitmapInfo
guard let context = CGContext(data: nil, width: width, height: height, bitsPerComponent: bitsPerComponent, bytesPerRow: bytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo) else {
print("Cannot create context!"); return nil
}
context.draw(inputCGImage, in: CGRect(x: 0, y: 0, width: width, height: height))
guard let buffer = context.data else { print("Cannot get context data!"); return nil }
let pixelBuffer = buffer.bindMemory(to: RGBA32.self, capacity: width * height)
for row in 0 ..< Int(height) {
for column in 0 ..< Int(width) {
let offset = row * width + column
/*
* Here I'm looking for color : RGBA32(red: 231, green: 239, blue: 247, alpha: 255)
* and I will convert pixels color that in range of above color to transparent
* so comparetion can done like this (pixelColorRedComp >= ourColorRedComp - 1 && pixelColorRedComp <= ourColorRedComp + 1 && green && blue)
*/
if pixelBuffer[offset].redComponent >= 230 && pixelBuffer[offset].redComponent <= 232 &&
pixelBuffer[offset].greenComponent >= 238 && pixelBuffer[offset].greenComponent <= 240 &&
pixelBuffer[offset].blueComponent >= 246 && pixelBuffer[offset].blueComponent <= 248 &&
pixelBuffer[offset].alphaComponent == 255 {
pixelBuffer[offset] = .transparent
}
}
}
let outputCGImage = context.makeImage()!
let outputImage = UIImage(cgImage: outputCGImage, scale: image.scale, orientation: image.imageOrientation)
return outputImage
}
I hope this helps someone 🎉
I am trying to apply gradient to UIImage using CoreGraphic; however, the result I got is not very nice. I want to create a black to transparent gradient at the bottom of the image to create a contrast for me to place some text. However, the gradient I was able doesn't blend well with the image; you can clearly see the separation in the center. The result I am looking for is like this application: http://capptivate.co/2014/02/17/yummly-3/
How should I apply the gradient to achieve this? ( I have to apply this to large quantity of images ).
My Result:
Here is my code:
func imageWithGradient(img:UIImage!) -> UIImage{
UIGraphicsBeginImageContext(img.size)
var context = UIGraphicsGetCurrentContext()
img.drawAtPoint(CGPointMake(0, 0))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let locations:[CGFloat] = [0.50, 1.0]
//1 = opaque
//0 = transparent
let bottom = UIColor(red: 0, green: 0, blue: 0, alpha: 0.5).CGColor
let top = UIColor(red: 0, green: 0, blue: 0, alpha: 0).CGColor
let gradient = CGGradientCreateWithColors(colorSpace,
[top, bottom], locations)
let startPoint = CGPointMake(img.size.width/2, 0)
let endPoint = CGPointMake(img.size.width/2, img.size.height)
CGContextDrawLinearGradient(context, gradient, startPoint, endPoint, 0)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image
}
SWIFT 3
func imageWithGradient(img:UIImage!) -> UIImage {
UIGraphicsBeginImageContext(img.size)
let context = UIGraphicsGetCurrentContext()
img.draw(at: CGPoint(x: 0, y: 0))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let locations:[CGFloat] = [0.0, 1.0]
let bottom = UIColor(red: 0, green: 0, blue: 0, alpha: 0.5).cgColor
let top = UIColor(red: 0, green: 0, blue: 0, alpha: 0).cgColor
let colors = [top, bottom] as CFArray
let gradient = CGGradient(colorsSpace: colorSpace, colors: colors, locations: locations)
let startPoint = CGPoint(x: img.size.width/2, y: 0)
let endPoint = CGPoint(x: img.size.width/2, y: img.size.height)
context!.drawLinearGradient(gradient!, start: startPoint, end: endPoint, options: CGGradientDrawingOptions(rawValue: UInt32(0)))
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return image!
}
I tried this out in a Swift playground, I couldn't replicate the exact issue you are describing, however here are some suggestions that you could try.
You could change the way you set up the stops in the gradient, I think it would make more sense to instead of shifting the gradient using the stops, shift the start point, i.e instead of starting from the top edge, start from the vertical center, something like this
UIGraphicsBeginImageContext(image.size)
var context = UIGraphicsGetCurrentContext()
image.drawAtPoint(CGPointMake(0, 0))
let colorSpace = CGColorSpaceCreateDeviceRGB()
let locations:[CGFloat] = [0.0, 1.0]
let bottom = UIColor(red: 0, green: 0, blue: 0, alpha: 0.5).CGColor
let top = UIColor(red: 0, green: 0, blue: 0, alpha: 0).CGColor
let gradient = CGGradientCreateWithColors(colorSpace,
[top, bottom], locations)
let startPoint = CGPointMake(image.size.width / 2, image.size.height / 2)
let endPoint = CGPointMake(image.size.width / 2, image.size.height)
CGContextDrawLinearGradient(context, gradient, startPoint, endPoint, 0)
let finalImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
Notice locations is now 0 and 1, while the startPoint y coordinate has shifted downwards.
Alternatively, look at passing kCGGradientDrawsBeforeStartLocation as an option to CGContextDrawLinearGradient, as that could also make a difference.