How can I convert an UIImage to grayscale in Swift using CIFilter? - ios

I am building a scanner component for an iOS app so far I have the result image cropped and in the correct perspective.
Now I need to turn the color image into Black and white "Scanned" document.
I tried to use - "CIPhotoEffectNoir" but it more grayscale then totally black and white. I wish to get a full contrast image with 100% black and 100% white.
How can I achieve that?
Thanks

You can use CIColorControls and set Contrast Key kCIInputContrastKey to increase the black/white contrast as follow:
Xcode 9 • Swift 4
extension String {
static let colorControls = "CIColorControls"
}
extension UIImage {
var coreImage: CIImage? { return CIImage(image: self) }
}
extension CIImage {
var uiImage: UIImage? { return UIImage(ciImage: self) }
func applying(contrast value: NSNumber) -> CIImage? {
return applyingFilter(.colorControls, parameters: [kCIInputContrastKey: value])
}
func renderedImage() -> UIImage? {
guard let image = uiImage else { return nil }
return UIGraphicsImageRenderer(size: image.size,
format: image.imageRendererFormat).image { _ in
image.draw(in: CGRect(origin: .zero, size: image.size))
}
}
}
let url = URL(string: "https://i.stack.imgur.com/Xs4RX.jpg")!
do {
if let coreImage = UIImage(data: try Data(contentsOf: url))?.coreImage,
let increasedContrast = coreImage.applying(contrast: 1.5) {
imageView.image = increasedContrast.uiImage
// if you need to convert your image to data (JPEG/PNG) you would need to render the ciimage using renderedImage method on CIImage
}
} catch {
print(error)
}
To convert from colors to grayscale you can set the Saturation Key kCIInputSaturationKey to zero:
extension CIImage {
func applying(saturation value: NSNumber) -> CIImage? {
return applyingFilter(.colorControls, parameters: [kCIInputSaturationKey: value])
}
var grayscale: CIImage? { return applying(saturation: 0) }
}
let url = URL(string: "https://i.stack.imgur.com/Xs4RX.jpg")!
do {
if let coreImage = UIImage(data: try Data(contentsOf: url))?.coreImage,
let grayscale = coreImage.grayscale {
// use grayscale image here
imageView.image = grayscale.uiImage
}
} catch {
print(error)
}

Desaturate will convert your image to grayscale
Increasing the contrast will push those grays out to the extremes, i.e. black and white.
You can CIColorControls:
let ciImage = CIImage(image: image)!
let blackAndWhiteImage = ciImage.applyingFilter("CIColorControls", withInputParameters: ["inputSaturation": 0, "inputContrast": 5])
Original:
With inputContrast = 1 (default):
With inputContrast = 5:

In Swift 5.1 I have written an extension method for OSX which also converts to and from NSImage. It uses saturation and input contrast to convert the image. I have abstracted a func for black and white.
extension NSImage {
func blackAndWhite () -> NSImage?
{
return applying(saturation: 0,inputContrast: 5,image: self)
}
func applying(saturation value: NSNumber, inputContrast inputContrastValue: NSNumber, image:NSImage) -> NSImage? {
let ciImage = CIImage(data: image.tiffRepresentation!)!
let blackAndWhiteImage = ciImage.applyingFilter("CIColorControls", parameters: ["inputSaturation": value, "inputContrast": inputContrastValue])
let rep: NSCIImageRep = NSCIImageRep(ciImage: blackAndWhiteImage)
let nsImage: NSImage = NSImage(size: rep.size)
nsImage.addRepresentation(rep)
return nsImage
}
}

Related

How do you apply Core Image filters to an onscreen image using Swift/MacOS or iOS and Core Image

Photos editing adjustments provides a realtime view of the applied adjustments as they are applied. I wasn't able to find any samples of how you do this. All the examples seems to show that you apply the filters through a pipeline of sorts and then take the resulting image and update the screen with the result. See code below.
Photos seems to show the adjustment applied to the onscreen image. How do they achieve this?
func editImage(inputImage: CGImage) {
DispatchQueue.global().async {
let beginImage = CIImage(cgImage: inputImage)
guard let exposureOutput = self.exposureFilter(beginImage, ev: self.brightness) else {
return
}
guard let vibranceOutput = self.vibranceFilter(exposureOutput, amount: self.vibranceAmount) else {
return
}
guard let unsharpMaskOutput = self.unsharpMaskFilter(vibranceOutput, intensity: self.unsharpMaskIntensity, radius: self.unsharpMaskRadius) else {
return
}
guard let sharpnessOutput = self.sharpenFilter(unsharpMaskOutput, sharpness: self.unsharpMaskIntensity) else {
return
}
if let cgimg = self.context.createCGImage(sharpnessOutput, from: vibranceOutput.extent) {
DispatchQueue.main.async {
self.cgImage = cgimg
}
}
}
}
OK, I just found the answer - use MTKView, which is working fine except for getting the image to fill the view correctly!
For the benefit of others here are the basics... I have yet to figure out how to position the image correctly in the view - but I can see the filter applied in realtime!
class ViewController: NSViewController, MTKViewDelegate {
....
#objc dynamic var cgImage: CGImage? {
didSet {
if let cgimg = cgImage {
ciImage = CIImage(cgImage: cgimg)
}
}
}
var ciImage: CIImage?
// Metal resources
var device: MTLDevice!
var commandQueue: MTLCommandQueue!
var sourceTexture: MTLTexture! // 2
let colorSpace = CGColorSpaceCreateDeviceRGB()
var context: CIContext!
var textureLoader: MTKTextureLoader!
override func viewDidLoad() {
super.viewDidLoad()
// Do view setup here.
let metalView = MTKView()
metalView.translatesAutoresizingMaskIntoConstraints = false
self.imageView.addSubview(metalView)
NSLayoutConstraint.activate([
metalView.bottomAnchor.constraint(equalTo: view.bottomAnchor),
metalView.trailingAnchor.constraint(equalTo: view.trailingAnchor),
metalView.leadingAnchor.constraint(equalTo: view.leadingAnchor),
metalView.topAnchor.constraint(equalTo: view.topAnchor)
])
device = MTLCreateSystemDefaultDevice()
commandQueue = device.makeCommandQueue()
metalView.delegate = self
metalView.device = device
metalView.framebufferOnly = false
context = CIContext()
textureLoader = MTKTextureLoader(device: device)
}
public func draw(in view: MTKView) {
if let ciImage = self.ciImage {
if let currentDrawable = view.currentDrawable {
let commandBuffer = commandQueue.makeCommandBuffer()
let inputImage = ciImage // 2
exposureFilter.setValue(inputImage, forKey: kCIInputImageKey)
exposureFilter.setValue(ev, forKey: kCIInputEVKey)
context.render(exposureFilter.outputImage!,
to: currentDrawable.texture,
commandBuffer: commandBuffer,
bounds: CGRect(origin: .zero, size: view.drawableSize),
colorSpace: colorSpace)
commandBuffer?.present(currentDrawable)
commandBuffer?.commit()
}
}
}

Render a MTIImage

Please don't judge me I'm just learning Swift.
Recently I installed MetalPetal framework and I followed the instructions:
https://github.com/MetalPetal/MetalPetal#example-code
But I get error because of MTIContext. Maybe I have to declare something more of MetalPetal?
My Code:
import UIKit
import MetalPetal
import CoreGraphics
class ViewController: UIViewController {
#IBOutlet weak var image1: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
weak var image: UIImage?
image = image1.image
var ciImage = CIImage(image: image!)
var cgImage1 = convertCIImageToCGImage(inputImage: ciImage!)
let imageFromCGImage = MTIImage(cgImage: cgImage1!)
let inputImage = imageFromCGImage
let filter = MTISaturationFilter()
filter.saturation = 1
filter.inputImage = inputImage
let outputImage = filter.outputImage
let context = MTIContext()
do {
try context.render(outputImage, to: pixelBuffer)
var image3: CIImage? = try context.makeCIImage(from: outputImage!)
//context.makeCIImage(from: image)
//context.makeCGImage(from: image)
} catch {
print(error)
}
// Do any additional setup after loading the view, typically from a nib.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func convertCIImageToCGImage(inputImage: CIImage) -> CGImage? {
let context = CIContext(options: nil)
if let cgImage = context.createCGImage(inputImage, from: inputImage.extent) {
return cgImage
}
return nil
}
}
#YuAo
Input Image
An UIImage is based on either underlying Quartz image (can be retrieved with cgImage) or an underlying Core Image (can be retrieved from UIImage with ciImage).
MTIImage offers constructors for both types.
MTIContext
A MTIContext must be initialized with a device that can be retrieved by calling MTLCreateSystemDefaultDevice().
Rendering
A rendering to a pixel buffer is not needed. We can get the result by calling makeCGImage.
Test
I've taken your source code above and slightly adapted it to the aforementioned points.
I also added a second UIImageView to see the result of the filtering. I also changed the saturation to 0 to see if the filter works
If GPU or shaders are involved it makes sense to test on a real device and not on the simulator.
The result looks like this:
In the upper area you see the original jpg, in the lower area the filter is applied.
Swift
The simplified Swift code that produces this result looks like this:
override func viewDidLoad() {
super.viewDidLoad()
guard let image = UIImage(named: "regensburg.jpg") else { return }
guard let cgImage = image.cgImage else { return }
imageView1.image = image
let filter = MTISaturationFilter()
filter.saturation = 0
filter.inputImage = MTIImage(cgImage: cgImage)
if let device = MTLCreateSystemDefaultDevice(),
let outputImage = filter.outputImage {
do {
let context = try MTIContext(device: device)
let filteredImage = try context.makeCGImage(from: outputImage)
imageView2.image = UIImage(cgImage: filteredImage)
} catch {
print(error)
}
}
}

Pixellating a UIImage returns UIImage with a different size

I'm using an extension to pixellate my images like the following:
func pixellated(scale: Int = 8) -> UIImage? {
guard let ciImage = CIImage(image: self), let filter = CIFilter(name: "CIPixellate") else { return nil }
filter.setValue(ciImage, forKey: kCIInputImageKey)
filter.setValue(scale, forKey: kCIInputScaleKey)
guard let output = filter.outputImage else { return nil }
return UIImage(ciImage: output)
}
The problem is the image represented by self here has not the same size than the one I create using UIImage(ciImage: output).
For example, using that code:
print("image.size BEFORE : \(image.size)")
if let imagePixellated = image.pixellated(scale: 48) {
image = imagePixellated
print("image.size AFTER : \(image.size)")
}
will print:
image.size BEFORE : (400.0, 298.0)
image.size AFTER : (848.0, 644.0)
Not the same size and not the same ratio.
Any idea why?
EDIT:
I added some prints in the extension as following:
func pixellated(scale: Int = 8) -> UIImage? {
guard let ciImage = CIImage(image: self), let filter = CIFilter(name: "CIPixellate") else { return nil }
print("UIIMAGE : \(self.size)")
print("ciImage.extent.size : \(ciImage.extent.size)")
filter.setValue(ciImage, forKey: kCIInputImageKey)
filter.setValue(scale, forKey: kCIInputScaleKey)
guard let output = filter.outputImage else { return nil }
print("output : \(output.extent.size)")
return UIImage(ciImage: output)
}
And here are the outputs:
UIIMAGE : (250.0, 166.5)
ciImage.extent.size : (500.0, 333.0)
output : (548.0, 381.0)
You have two problems:
self.size is measured in points. self's size in pixels is actually self.size multiplied by self.scale.
The CIPixellate filter changes the bounds of its image.
To fix problem one, you can simply set the scale property of the returned UIImage to be the same as self.scale:
return UIImage(ciImage: output, scale: self.scale, orientation: imageOrientation)
But you'll find this still isn't quite right. That's because of problem two. For problem two, the simplest solution is to crop the output CIImage:
// Must use self.scale, to disambiguate from the scale parameter
let floatScale = CGFloat(self.scale)
let pixelSize = CGSize(width: size.width * floatScale, height: size.height * floatScale)
let cropRect = CGRect(origin: CGPoint.zero, size: pixelSize)
guard let output = filter.outputImage?.cropping(to: cropRect) else { return nil }
This will give you an image of the size you want.
Now, your next question may be, "why is there a thin, dark border around my pixellated images?" Good question! But ask a new question for that.

How can I use CiFilter to give a bump effect using swift?

How can I do this in swift?
I am trying to set this effect particularly to a position on an image.
Please give the simple code to apply this effect.
Thanks in advance.
Error while adding distortion bump effect.
Try this code:
public typealias Filter = CIImage -> CIImage
public typealias CIParameters = Dictionary<String, AnyObject>
public func bumpDistortion(center: CGPoint, radius: Float, scale: Float) -> Filter {
return { image in
let parameters : CIParameters = [
kCIInputRadiusKey:radius,
kCIInputCenterKey:CIVector(CGPoint:center),
kCIInputScaleKey:scale,
kCIInputImageKey: image]
let filter = CIFilter(name:"CIBumpDistortion", withInputParameters:parameters)
return filter!.outputImage!
}
}
Lets assume you use the code from TastyCat:
if let image = UIImage(named: "YOUR_IMAGE_NAME") {
let bumpEffect = self.bumpDistortion(YOUR_POINT , radius:YOUR_RADIUS, scale:YOUR_SCALE)
guard let yourCIImage = CIImage(image: image) else {
//handle error
return
}
let result = bumpEffect(yourCIImage)
let theImageWithEffect = UIImage(CIImage:result)
}

Is there a way to check if a UIImage has been decompressed?

I'm sure most of you have dealt with forced decompression on a background thread to enhance rendering performance. My question is whether there is a way to check if an image has been decompressed.
It helped me to checked Image has been Decompressed or not by below technique. It is simple code to understand :-
import UIKit
class ViewController: UIViewController {
var compressedImage:NSString?
var decompressedImage:NSString?
override func viewDidLoad() {
super.viewDidLoad()
let image = compressImage()
var imageView = UIImageView(image: image)
//self.view.addSubview(imageView)
let decompressImage = deCompressImage(image: image)
let imageData = Data(UIImagePNGRepresentation(decompressImage)! )
print("***** Size after decompred \(imageData.description) **** ")
imageView = UIImageView(image: decompressImage)
decompressedImage = imageData.description as NSString?
let decompressed = checkImageBeenDecompressed(decompressedImage: decompressedImage!, compressedImage: compressedImage!)
print(decompressed)
//self.view.addSubview(imageView)
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func checkImageBeenDecompressed(decompressedImage:NSString , compressedImage:NSString) -> Bool {
let decompressedSize = Int( decompressedImage.getNumFromString()! )
let compressedSize = Int (compressedImage.getNumFromString()! )
if( decompressedSize! > compressedSize! ) {
print("Image has been decompressed")
return true
}
print("Image has not been decompressed")
return false
}
func compressImage() -> UIImage {
let oldImage = UIImage(named: "background.jpg")
var imageData = Data(UIImagePNGRepresentation(oldImage!)! )
print("***** Original Uncompressed Size \(imageData.description) **** ")
imageData = UIImageJPEGRepresentation(oldImage!, 0.025)!
print("***** Compressed Size \(imageData.description) **** ")
compressedImage = imageData.description as NSString?
let image = UIImage(data: imageData)
return image!
}
func deCompressImage(image:UIImage) -> UIImage {
UIGraphicsBeginImageContextWithOptions(image.size, true, 0)
image.draw(at: CGPoint.zero)
let decompressedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return decompressedImage!
}
}
extension NSString {
func getNumFromString() -> String? {
var numberString: NSString?
let thisScanner = Scanner(string: self as String)
let numbers = NSCharacterSet(charactersIn: "0123456789")
thisScanner.scanUpToCharacters(from: numbers as CharacterSet, into: nil)
thisScanner.scanCharacters(from: numbers as CharacterSet, into: &numberString)
return numberString as? String;
}
}
Demo Reference

Resources