How can I add several effects to a picture? I have the following code that adds an effect to a photo:
func applyEffects(name: String, n: Float) {
filter.setValue(self.cImage, forKeyPath: kCIInputImageKey)
filter.setValue(n, forKeyPath: name)
let result = filter.value(forKey: kCIOutputImageKey) as! CIImage
let cgImage = CIContext.init(options: nil).createCGImage(result, from: result.extent)
self.customImage = UIImage.init(cgImage: cgImage!)
}
func brightness(n: Float) {
self.applyEffects(name: kCIInputBrightnessKey, n: n)
}
func contrast(n: Float) {
self.applyEffects(name: kCIInputContrastKey, n: n)
}
func saturation(n: Float) {
self.applyEffects(name: kCIInputSaturationKey, n: n)
}
But when I want to apply the second effect, the first one disappears. How can I overlay two or more effects on each other?
I'm assuming you are using CIColorControls as your filter.
You need to pass all three values into your call:
// The documentation doesn't give a default value for contrast, but for the others, I'm setting the defaults
var brightness:Float = 1
var contrast:Float = 1
var saturation:Float = 1
func applyEffects() {
filter.setValue(self.cImage, forKeyPath: kCIInputImageKey)
filter.setValue(brightness, forKeyPath: kCIInputBrightnessKey)
filter.setValue(contrast, forKeyPath: kCIInputContrastKey)
filter.setValue(saturation, forKeyPath: kCIInputSaturationKey)
let result = filter.value(forKey: kCIOutputImageKey) as! CIImage
let cgImage = CIContext.init(options: nil).createCGImage(result, from: result.extent)
self.customImage = UIImage.init(cgImage: cgImage!)
}
func brightness(n: Float) {
brightness = n
applyEffects()
}
func contrast(n: Float) {
contrast = n
applyEffects()
}
func saturation(n: Float) {
saturation = n
applyEffects()
}
A suggestion:
If you are trying to use "real-time" updating via UISliders, use a GLKView and send in the CIImage directly. It uses the GPU, and performance on a device is greatly increased. You can always create a UIImage for saving, messaging, etc.
Related
I'm building a photo editing app that allow users to change the saturation, contrast and brightness of an image with sliders. I'm doing so by using CIFilter on the UIImage. Here's the code for the adjustment:
extension UIImage {
func withAdjustments(context: CIContext, saturation: CGFloat, contrast: CGFloat, brightness: CGFloat) -> UIImage {
guard let cgImage = self.cgImage else { return self }
guard let filter = CIFilter(name: "CIColorControls") else { return self }
filter.setValue(CIImage(cgImage: cgImage), forKey: kCIInputImageKey)
filter.setValue(saturation, forKey: kCIInputSaturationKey)
filter.setValue(contrast, forKey: kCIInputContrastKey)
filter.setValue(brightness, forKey: kCIInputBrightnessKey)
guard let result = filter.value(forKey: kCIOutputImageKey) as? CIImage else { return self }
guard let newCgImage = context.createCGImage(result, from: result.extent) else { return self }
return UIImage(cgImage: newCgImage, scale: 1, orientation: imageOrientation)
}
}
And in my view:
struct EditScreen: View {
let context = CIContext()
#State var saturation : Double = 1
#State var contrast : Double = 1
#State var brightness : Double = 0
...
var body: some View {
...
Image(uiImage: inputImage!.withAdjustments(
context: context,
saturation: saturation,
contrast: contrast,
brightness: brightness)).resizable().scaledToFit()
...
Those #State are modified with Sliders:
Slider(value: $contrast, in: 0...3, step: 0.01)
It works, but it is too laggy and low-performant on the simulator o Xcode. I haven't still been able to try it on a physical device but it seems to me that it'll work nice on it. However, I need it to work properly on the simulator, as I don't own a physical device to work with everyday. Is there any settings for the simulator that may be lowering its capabilities and slowing it down?
Please don't judge me I'm just learning Swift.
Recently I installed MetalPetal framework and I followed the instructions:
https://github.com/MetalPetal/MetalPetal#example-code
But I get error because of MTIContext. Maybe I have to declare something more of MetalPetal?
My Code:
import UIKit
import MetalPetal
import CoreGraphics
class ViewController: UIViewController {
#IBOutlet weak var image1: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
weak var image: UIImage?
image = image1.image
var ciImage = CIImage(image: image!)
var cgImage1 = convertCIImageToCGImage(inputImage: ciImage!)
let imageFromCGImage = MTIImage(cgImage: cgImage1!)
let inputImage = imageFromCGImage
let filter = MTISaturationFilter()
filter.saturation = 1
filter.inputImage = inputImage
let outputImage = filter.outputImage
let context = MTIContext()
do {
try context.render(outputImage, to: pixelBuffer)
var image3: CIImage? = try context.makeCIImage(from: outputImage!)
//context.makeCIImage(from: image)
//context.makeCGImage(from: image)
} catch {
print(error)
}
// Do any additional setup after loading the view, typically from a nib.
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func convertCIImageToCGImage(inputImage: CIImage) -> CGImage? {
let context = CIContext(options: nil)
if let cgImage = context.createCGImage(inputImage, from: inputImage.extent) {
return cgImage
}
return nil
}
}
#YuAo
Input Image
An UIImage is based on either underlying Quartz image (can be retrieved with cgImage) or an underlying Core Image (can be retrieved from UIImage with ciImage).
MTIImage offers constructors for both types.
MTIContext
A MTIContext must be initialized with a device that can be retrieved by calling MTLCreateSystemDefaultDevice().
Rendering
A rendering to a pixel buffer is not needed. We can get the result by calling makeCGImage.
Test
I've taken your source code above and slightly adapted it to the aforementioned points.
I also added a second UIImageView to see the result of the filtering. I also changed the saturation to 0 to see if the filter works
If GPU or shaders are involved it makes sense to test on a real device and not on the simulator.
The result looks like this:
In the upper area you see the original jpg, in the lower area the filter is applied.
Swift
The simplified Swift code that produces this result looks like this:
override func viewDidLoad() {
super.viewDidLoad()
guard let image = UIImage(named: "regensburg.jpg") else { return }
guard let cgImage = image.cgImage else { return }
imageView1.image = image
let filter = MTISaturationFilter()
filter.saturation = 0
filter.inputImage = MTIImage(cgImage: cgImage)
if let device = MTLCreateSystemDefaultDevice(),
let outputImage = filter.outputImage {
do {
let context = try MTIContext(device: device)
let filteredImage = try context.makeCGImage(from: outputImage)
imageView2.image = UIImage(cgImage: filteredImage)
} catch {
print(error)
}
}
}
I'm using an extension to pixellate my images like the following:
func pixellated(scale: Int = 8) -> UIImage? {
guard let ciImage = CIImage(image: self), let filter = CIFilter(name: "CIPixellate") else { return nil }
filter.setValue(ciImage, forKey: kCIInputImageKey)
filter.setValue(scale, forKey: kCIInputScaleKey)
guard let output = filter.outputImage else { return nil }
return UIImage(ciImage: output)
}
The problem is the image represented by self here has not the same size than the one I create using UIImage(ciImage: output).
For example, using that code:
print("image.size BEFORE : \(image.size)")
if let imagePixellated = image.pixellated(scale: 48) {
image = imagePixellated
print("image.size AFTER : \(image.size)")
}
will print:
image.size BEFORE : (400.0, 298.0)
image.size AFTER : (848.0, 644.0)
Not the same size and not the same ratio.
Any idea why?
EDIT:
I added some prints in the extension as following:
func pixellated(scale: Int = 8) -> UIImage? {
guard let ciImage = CIImage(image: self), let filter = CIFilter(name: "CIPixellate") else { return nil }
print("UIIMAGE : \(self.size)")
print("ciImage.extent.size : \(ciImage.extent.size)")
filter.setValue(ciImage, forKey: kCIInputImageKey)
filter.setValue(scale, forKey: kCIInputScaleKey)
guard let output = filter.outputImage else { return nil }
print("output : \(output.extent.size)")
return UIImage(ciImage: output)
}
And here are the outputs:
UIIMAGE : (250.0, 166.5)
ciImage.extent.size : (500.0, 333.0)
output : (548.0, 381.0)
You have two problems:
self.size is measured in points. self's size in pixels is actually self.size multiplied by self.scale.
The CIPixellate filter changes the bounds of its image.
To fix problem one, you can simply set the scale property of the returned UIImage to be the same as self.scale:
return UIImage(ciImage: output, scale: self.scale, orientation: imageOrientation)
But you'll find this still isn't quite right. That's because of problem two. For problem two, the simplest solution is to crop the output CIImage:
// Must use self.scale, to disambiguate from the scale parameter
let floatScale = CGFloat(self.scale)
let pixelSize = CGSize(width: size.width * floatScale, height: size.height * floatScale)
let cropRect = CGRect(origin: CGPoint.zero, size: pixelSize)
guard let output = filter.outputImage?.cropping(to: cropRect) else { return nil }
This will give you an image of the size you want.
Now, your next question may be, "why is there a thin, dark border around my pixellated images?" Good question! But ask a new question for that.
I am building a scanner component for an iOS app so far I have the result image cropped and in the correct perspective.
Now I need to turn the color image into Black and white "Scanned" document.
I tried to use - "CIPhotoEffectNoir" but it more grayscale then totally black and white. I wish to get a full contrast image with 100% black and 100% white.
How can I achieve that?
Thanks
You can use CIColorControls and set Contrast Key kCIInputContrastKey to increase the black/white contrast as follow:
Xcode 9 • Swift 4
extension String {
static let colorControls = "CIColorControls"
}
extension UIImage {
var coreImage: CIImage? { return CIImage(image: self) }
}
extension CIImage {
var uiImage: UIImage? { return UIImage(ciImage: self) }
func applying(contrast value: NSNumber) -> CIImage? {
return applyingFilter(.colorControls, parameters: [kCIInputContrastKey: value])
}
func renderedImage() -> UIImage? {
guard let image = uiImage else { return nil }
return UIGraphicsImageRenderer(size: image.size,
format: image.imageRendererFormat).image { _ in
image.draw(in: CGRect(origin: .zero, size: image.size))
}
}
}
let url = URL(string: "https://i.stack.imgur.com/Xs4RX.jpg")!
do {
if let coreImage = UIImage(data: try Data(contentsOf: url))?.coreImage,
let increasedContrast = coreImage.applying(contrast: 1.5) {
imageView.image = increasedContrast.uiImage
// if you need to convert your image to data (JPEG/PNG) you would need to render the ciimage using renderedImage method on CIImage
}
} catch {
print(error)
}
To convert from colors to grayscale you can set the Saturation Key kCIInputSaturationKey to zero:
extension CIImage {
func applying(saturation value: NSNumber) -> CIImage? {
return applyingFilter(.colorControls, parameters: [kCIInputSaturationKey: value])
}
var grayscale: CIImage? { return applying(saturation: 0) }
}
let url = URL(string: "https://i.stack.imgur.com/Xs4RX.jpg")!
do {
if let coreImage = UIImage(data: try Data(contentsOf: url))?.coreImage,
let grayscale = coreImage.grayscale {
// use grayscale image here
imageView.image = grayscale.uiImage
}
} catch {
print(error)
}
Desaturate will convert your image to grayscale
Increasing the contrast will push those grays out to the extremes, i.e. black and white.
You can CIColorControls:
let ciImage = CIImage(image: image)!
let blackAndWhiteImage = ciImage.applyingFilter("CIColorControls", withInputParameters: ["inputSaturation": 0, "inputContrast": 5])
Original:
With inputContrast = 1 (default):
With inputContrast = 5:
In Swift 5.1 I have written an extension method for OSX which also converts to and from NSImage. It uses saturation and input contrast to convert the image. I have abstracted a func for black and white.
extension NSImage {
func blackAndWhite () -> NSImage?
{
return applying(saturation: 0,inputContrast: 5,image: self)
}
func applying(saturation value: NSNumber, inputContrast inputContrastValue: NSNumber, image:NSImage) -> NSImage? {
let ciImage = CIImage(data: image.tiffRepresentation!)!
let blackAndWhiteImage = ciImage.applyingFilter("CIColorControls", parameters: ["inputSaturation": value, "inputContrast": inputContrastValue])
let rep: NSCIImageRep = NSCIImageRep(ciImage: blackAndWhiteImage)
let nsImage: NSImage = NSImage(size: rep.size)
nsImage.addRepresentation(rep)
return nsImage
}
}
I'm trying to blur a scene when I pause a game and I'm following an example but I'm unable to work it out in Swift 2.0.
A lot of tutorials say to just take a screenshot and then present that screenshot as blurred but I don't think that's a good idea, I'd like to blur the view without a screenshot.
here is my attempt:
func createlayers() {
let node = SKEffectNode()
node.shouldEnableEffects = false
let filter: CIFilter = CIFilter(name: "CIGaussianBlur", withInputParameters: ["inputRadius" : NSNumber(double:1.0)])!
node.filter = filter
}
func blurWithCompletion() {
let duration: CGFloat = 0.5
scene!.shouldRasterize = true
scene!.shouldEnableEffects = true
scene!.runAction(SKAction.customActionWithDuration(0.5, actionBlock: { (node: SKNode, elapsedTime: CGFloat) in
let radius = (elapsedTime/duration)*10.0
(node as? SKEffectNode)!.filter!.setValue(radius, forKey: "inputRadius")
}))
}
func pauseGame()
{
self.blurWithCompletion()
self.view!.paused = true
}
I get "fatal error: unexpectedly found nil while unwrapping an Optional value"
create layers method is not required.
Use this updated blurWithCompletion method:
func blurWithCompletion() {
let duration: CGFloat = 0.5
let filter: CIFilter = CIFilter(name: "CIGaussianBlur", withInputParameters: ["inputRadius" : NSNumber(double:1.0)])!
scene!.filter = filter
scene!.shouldRasterize = true
scene!.shouldEnableEffects = true
scene!.runAction(SKAction.customActionWithDuration(0.5, actionBlock: { (node: SKNode, elapsedTime: CGFloat) in
let radius = (elapsedTime/duration)*10.0
(node as? SKEffectNode)!.filter!.setValue(radius, forKey: "inputRadius")
}))
}