Swipeable CIFilter over video - ios

I am currently trying to implement something similar to Instagram's story feature where you take a picture or a video and when swiping left or right you change the current filter over the content. ( here is an example of what I managed to do in my app for images https://imgur.com/a/pYKrPkA )
As you can see in the example, I got it done for images but now my problem is that I am trying to make if work for videos aswell and I am a bit lost from where to start.
final class Filter: NSObject {
var isEnabled: Bool = true
var overlayImage: CIImage?
var ciFilter: CIFilter?
init(ciFilter: CIFilter?) {
self.ciFilter = ciFilter
super.init()
}
/// Empty filter for the original photo
static func emptyFilter() -> Filter {
return Filter(ciFilter: nil)
}
func imageByProcessingImage(_ image: CIImage, at time: CFTimeInterval) -> CIImage? {
guard isEnabled else { return image }
var image = image
if let overlayImage = overlayImage {
image = overlayImage.composited(over: image)
}
guard let ciFilter = ciFilter else {
return image
}
ciFilter.setValue(image, forKey: kCIInputImageKey)
return ciFilter.value(forKey: kCIOutputImageKey) as? CIImage
}
}
class StoriesImageView: UIView {
private var metalView: MTKView?
private var ciImage: CIImage?
private var preferredCIImageTransform: CGAffineTransform?
private let device = MTLCreateSystemDefaultDevice()
private var commandQueue: MTLCommandQueue?
private var context: CIContext?
override func layoutSubviews() {
super.layoutSubviews()
metalView?.frame = bounds
}
override func setNeedsDisplay() {
super.setNeedsDisplay()
metalView?.setNeedsDisplay()
}
func setImage(with image: UIImage) {
preferredCIImageTransform = preferredCIImageTransform(from: image)
if let cgImage = image.cgImage {
ciImage = CIImage(cgImage: cgImage)
loadContextIfNeeded()
}
setNeedsDisplay()
}
/// Return the image fitted to 1080x1920.
func renderedUIImage() -> UIImage? {
return renderedUIImage(in: CGRect(origin: .zero, size: CGSize(width: 1080, height: 1920)))
}
/// Returns CIImage in fitted to main screen bounds.
func renderedCIIImage() -> CIImage? {
return renderedCIImage(in: CGRect(rect: bounds, contentScale: UIScreen.main.scale))
}
func renderedUIImage(in rect: CGRect) -> UIImage? {
if let image = renderedCIImage(in: rect), let context = context {
if let imageRef = context.createCGImage(image, from: image.extent) {
return UIImage(cgImage: imageRef)
}
}
return nil
}
func renderedCIImage(in rect: CGRect) -> CIImage? {
if var image = ciImage, let transform = preferredCIImageTransform {
image = image.transformed(by: transform)
return scaleAndResize(image, for: rect)
}
return nil
}
private func cleanupContext() {
metalView?.removeFromSuperview()
metalView?.releaseDrawables()
metalView = nil
}
private func loadContextIfNeeded() {
setContext()
}
private func setContext() {
let mView = MTKView(frame: bounds, device: device)
mView.clearColor = MTLClearColor(red: 0, green: 0, blue: 0, alpha: 0)
mView.framebufferOnly = false
mView.enableSetNeedsDisplay = true
mView.contentScaleFactor = contentScaleFactor
mView.delegate = self
metalView = mView
commandQueue = device?.makeCommandQueue()
context = CIContext(mtlDevice: device!)
insertSubview(metalView!, at: 0)
}
private func scaleAndResize(_ image: CIImage, for rect: CGRect) -> CIImage {
let imageSize = image.extent.size
let horizontalScale = rect.size.width / imageSize.width
let verticalScale = rect.size.height / imageSize.height
let scale = min(horizontalScale, verticalScale)
return image.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
}
private func preferredCIImageTransform(from image: UIImage) -> CGAffineTransform {
if image.imageOrientation == .up {
return .identity
}
var transform: CGAffineTransform = .identity
switch image.imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: image.size.height)
transform = transform.rotated(by: .pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.rotated(by: .pi / 2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: image.size.height)
transform = transform.rotated(by: .pi / -2)
case .up, .upMirrored: break
#unknown default: fatalError("Unknown image orientation")
}
switch image.imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: image.size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .up, .down, .left, .right: break
#unknown default: fatalError("Unknown image orientation")
}
return transform
}
}
extension StoriesImageView: MTKViewDelegate {
func draw(in view: MTKView) {
autoreleasepool {
let rect = CGRect(rect: view.bounds, contentScale: UIScreen.main.scale)
if let image = renderedCIImage(in: rect) {
let commandBuffer = commandQueue?.makeCommandBuffer()
guard let drawable = view.currentDrawable else {
return
}
let heightDifference = (view.drawableSize.height - image.extent.size.height) / 2
let destination = CIRenderDestination(width: Int(view.drawableSize.width),
height: Int(view.drawableSize.height - heightDifference),
pixelFormat: view.colorPixelFormat,
commandBuffer: commandBuffer,
mtlTextureProvider: { () -> MTLTexture in
return drawable.texture
})
_ = try? context?.startTask(toRender: image, to: destination)
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {}
}
final class StoriesSwipeableImageView: StoriesImageView {
private let scrollView: UIScrollView = UIScrollView()
private let preprocessingFilter: Filter? = nil
var isRefreshingAutomaticallyWhenScrolling: Bool = true
var filters: [Filter]? {
didSet {
updateScrollViewContentSize()
updateCurrentSelected(notify: true)
}
}
var selectedFilter: Filter? {
didSet {
if selectedFilter != oldValue {
setNeedsLayout()
}
}
}
override init(frame: CGRect) {
super.init(frame: frame)
setup()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
setup()
}
override func layoutSubviews() {
super.layoutSubviews()
scrollView.frame = bounds
updateScrollViewContentSize()
}
private func setup() {
scrollView.delegate = self
scrollView.isPagingEnabled = true
scrollView.showsHorizontalScrollIndicator = false
scrollView.showsVerticalScrollIndicator = false
scrollView.bounces = true
scrollView.alwaysBounceVertical = true
scrollView.alwaysBounceHorizontal = true
scrollView.backgroundColor = .clear
addSubview(scrollView)
}
private func updateScrollViewContentSize() {
let filterCount = filters?.count ?? 0
scrollView.contentSize = CGSize(width: filterCount * Int(frame.size.width) * 3,
height: Int(frame.size.height))
if let selectedFilter = selectedFilter {
scroll(to: selectedFilter, animated: false)
}
}
private func scroll(to filter: Filter, animated: Bool) {
if let index = filters?.firstIndex(where: { $0 === filter }) {
let contentOffset = CGPoint(x: scrollView.contentSize.width / 3 + scrollView.frame.size.width * CGFloat(index), y: 0)
scrollView.setContentOffset(contentOffset, animated: animated)
updateCurrentSelected(notify: false)
} else {
fatalError("Filter is not available in filters collection")
}
}
private func updateCurrentSelected(notify: Bool) {
guard frame.size.width != 0 else { return }
let filterCount = filters?.count ?? 0
let selectedIndex = Int(scrollView.contentOffset.x + scrollView.frame.size.width / 2) / Int(scrollView.frame.size.width) % filterCount
var newFilterGroup: Filter?
if selectedIndex >= 0 && selectedIndex < filterCount {
newFilterGroup = filters?[selectedIndex]
} else {
fatalError("Invalid contentOffset")
}
if selectedFilter != newFilterGroup {
selectedFilter = newFilterGroup
if notify {
// Notify delegate?
}
}
}
override func renderedCIImage(in rect: CGRect) -> CIImage? {
guard var image = super.renderedCIImage(in: rect) else {
print("Failed to render image")
return nil
}
let timeinterval: CFTimeInterval = 0
if let preprocessingFilter = self.preprocessingFilter {
image = preprocessingFilter.imageByProcessingImage(image, at: timeinterval)!
}
let extent = image.extent
let contentSize = scrollView.bounds.size
if contentSize.width == 0 {
return image
}
let filtersCount = filters?.count ?? 0
if filtersCount == 0 {
return image
}
let ratio = scrollView.contentOffset.x / contentSize.width
var index = Int(ratio)
let upIndex = Int(ceil(ratio))
let remaningRatio = ratio - CGFloat(index)
var xImage = extent.size.width * -remaningRatio
var outputImage: CIImage? = CIImage(color: CIColor(red: 0, green: 0, blue: 0))
while index <= upIndex {
let currentIndex = index % filtersCount
let filter = filters?[currentIndex]
var filteredImage = filter?.imageByProcessingImage(image, at: timeinterval)
filteredImage = filteredImage?.cropped(to:
CGRect(x: extent.origin.x + xImage,
y: extent.origin.y,
width: extent.size.width,
height: extent.size.height)
)
guard let output = outputImage else { return nil }
outputImage = filteredImage?.composited(over: output)
xImage += extent.size.width
index += 1
}
outputImage = outputImage?.cropped(to: extent)
return outputImage
}
}
extension StoriesSwipeableImageView: UIScrollViewDelegate {
func scrollViewDidScroll(_ scrollView: UIScrollView) {
let width = scrollView.frame.size.width
let contentOffsetX = scrollView.contentOffset.x
let contentSizeWidth = scrollView.contentSize.width
let normalWidth = CGFloat(filters?.count ?? 0) * width
if width > 0 && contentSizeWidth > 0 {
if contentOffsetX <= 0 {
scrollView.contentOffset = CGPoint(x: contentOffsetX + normalWidth, y: scrollView.contentOffset.y)
} else if contentOffsetX + width >= contentSizeWidth {
scrollView.contentOffset = CGPoint(x: contentOffsetX - normalWidth, y: scrollView.contentOffset.y)
}
}
if isRefreshingAutomaticallyWhenScrolling {
setNeedsDisplay()
}
}
func scrollViewDidScrollToTop(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndScrollingAnimation(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDragging(_ scrollView: UIScrollView, willDecelerate decelerate: Bool) {
if !decelerate {
updateCurrentSelected(notify: true)
}
}
}
These 3 are the classes that do the magic for the image part. Does anyone have a suggestion or a starting point for this? I tried looking over at https://github.com/rFlex/SCRecorder but I get a bit lost in Obj-C.

In iOS 9 / OS X 10.11 / tvOS, there's a convenience method for applying CIFilters to video. It works on an AVVideoComposition, so you can use it both for playback and for file-to-file import/export. See AVVideoComposition.init(asset:applyingCIFiltersWithHandler:) for the method docs.
There's an example in Apple's Core Image Programming Guide, too:
let filter = CIFilter(name: "CIGaussianBlur")!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source = request.sourceImage.clampingToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
// Vary filter parameters based on video timing
let seconds = CMTimeGetSeconds(request.compositionTime)
filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output = filter.outputImage!.cropping(to: request.sourceImage.extent)
// Provide the filter output to the composition
request.finish(with: output, context: nil)
})
That part sets up the composition. After you've done that, you can either play it by assigning it to an AVPlayer or write it to a file with AVAssetExportSession. Since you're after the latter, here's an example of that:
let export = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset1920x1200)
export.outputFileType = AVFileTypeQuickTimeMovie
export.outputURL = outURL
export.videoComposition = composition
export.exportAsynchronouslyWithCompletionHandler(/*...*/)
There's a bit more about this in the WWDC15 session on Core Image, starting around 20 minutes in.

Related

Crop CGRect from UIImage taken from camera

I have a view controller which takes a photo with a circular view in the center.
After taking a photo, I need to crop the CGRect with which I created the circular view. I need to crop the rectangle, not the circle.
I tried https://stackoverflow.com/a/57258806/12411655 and many other solutions, but it doesn't crop CGRect that I need.
How do I convert the CGRect in the view's coordinates to UIImage's coordinates?
class CircularCameraViewController: UIViewController {
var captureSession: AVCaptureSession!
var capturePhotoOutput: AVCapturePhotoOutput!
var cropRect: CGRect!
public lazy var shutterButton: ShutterButton = {
let button = ShutterButton()
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(capturePhoto), for: .touchUpInside)
return button
}()
private lazy var cancelButton: UIButton = {
let button = UIButton()
button.setTitle("Cancel", for: .normal)
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(dismissCamera), for: .touchUpInside)
return button
}()
private lazy var flashButton: UIButton = {
let image = UIImage(named: "flash", in: Bundle(for: ScannerViewController.self), compatibleWith: nil)?.withRenderingMode(.alwaysTemplate)
let button = UIButton()
button.setImage(image, for: .normal)
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(toggleFlash), for: .touchUpInside)
button.tintColor = .white
return button
}()
override func viewDidLoad() {
super.viewDidLoad()
setupCamera()
setupPhotoOutput()
setupViews()
setupConstraints()
captureSession.startRunning()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
}
override func viewWillDisappear(_ animated: Bool) {
captureSession.stopRunning()
}
private func setupCamera() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
var input: AVCaptureDeviceInput
do {
input = try AVCaptureDeviceInput(device: captureDevice!)
} catch {
fatalError("Error configuring capture device: \(error)");
}
captureSession = AVCaptureSession()
captureSession.addInput(input)
// Setup the preview view.
let videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer)
let camPreviewBounds = view.bounds
cropRect = CGRect(
x: camPreviewBounds.minX + (camPreviewBounds.width - 150) * 0.5,
y: camPreviewBounds.minY + (camPreviewBounds.height - 150) * 0.5,
width: 150,
height: 150
)
let path = UIBezierPath(roundedRect: camPreviewBounds, cornerRadius: 0)
path.append(UIBezierPath(ovalIn: cropRect))
let layer = CAShapeLayer()
layer.path = path.cgPath
layer.fillRule = CAShapeLayerFillRule.evenOdd;
layer.fillColor = UIColor.black.cgColor
layer.opacity = 0.5;
view.layer.addSublayer(layer)
}
private func setupViews() {
view.addSubview(shutterButton)
view.addSubview(flashButton)
view.addSubview(cancelButton)
}
private func setupConstraints() {
var cancelButtonConstraints = [NSLayoutConstraint]()
var shutterButtonConstraints = [NSLayoutConstraint]()
var flashConstraints = [NSLayoutConstraint]()
shutterButtonConstraints = [
shutterButton.centerXAnchor.constraint(equalTo: view.centerXAnchor),
shutterButton.widthAnchor.constraint(equalToConstant: 65.0),
shutterButton.heightAnchor.constraint(equalToConstant: 65.0)
]
flashConstraints = [
flashButton.leftAnchor.constraint(equalTo: view.leftAnchor, constant: 24.0),
flashButton.topAnchor.constraint(equalTo: view.topAnchor, constant: 30)
]
if #available(iOS 11.0, *) {
cancelButtonConstraints = [
cancelButton.leftAnchor.constraint(equalTo: view.safeAreaLayoutGuide.leftAnchor, constant: 24.0),
view.safeAreaLayoutGuide.bottomAnchor.constraint(equalTo: cancelButton.bottomAnchor, constant: (65.0 / 2) - 10.0)
]
let shutterButtonBottomConstraint = view.safeAreaLayoutGuide.bottomAnchor.constraint(equalTo: shutterButton.bottomAnchor, constant: 8.0)
shutterButtonConstraints.append(shutterButtonBottomConstraint)
} else {
cancelButtonConstraints = [
cancelButton.leftAnchor.constraint(equalTo: view.leftAnchor, constant: 24.0),
view.bottomAnchor.constraint(equalTo: cancelButton.bottomAnchor, constant: (65.0 / 2) - 10.0)
]
let shutterButtonBottomConstraint = view.bottomAnchor.constraint(equalTo: shutterButton.bottomAnchor, constant: 8.0)
shutterButtonConstraints.append(shutterButtonBottomConstraint)
}
NSLayoutConstraint.activate(cancelButtonConstraints + shutterButtonConstraints + flashConstraints)
}
private func setupPhotoOutput() {
capturePhotoOutput = AVCapturePhotoOutput()
capturePhotoOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(capturePhotoOutput!)
}
#objc func dismissCamera() {
self.dismiss(animated: true, completion: nil)
}
#objc private func toggleFlash() {
if let avDevice = AVCaptureDevice.default(for: AVMediaType.video) {
if (avDevice.hasTorch) {
do {
try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
if avDevice.isTorchActive {
avDevice.torchMode = AVCaptureDevice.TorchMode.off
} else {
avDevice.torchMode = AVCaptureDevice.TorchMode.on
}
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
}
extension CircularCameraViewController : AVCapturePhotoCaptureDelegate {
#objc private func capturePhoto() {
let photoSettings = AVCapturePhotoSettings()
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.flashMode = .auto
// Set ourselves as the delegate for `capturePhoto`.
capturePhotoOutput?.capturePhoto(with: photoSettings, delegate: self)
}
#available(iOS 11.0, *)
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?) {
guard error == nil else {
fatalError("Failed to capture photo: \(String(describing: error))")
}
guard let imageData = photo.fileDataRepresentation() else {
fatalError("Failed to convert pixel buffer")
}
guard let image = UIImage(data: imageData) else {
fatalError("Failed to convert image data to UIImage")
}
guard let croppedImg = image.cropToRect(rect: cropRect) else {
fatalError("Failed to crop image")
}
UIImageWriteToSavedPhotosAlbum(croppedImg, nil, nil, nil);
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
guard error == nil, let photoSample = photoSampleBuffer else {
fatalError("Failed to capture photo: \(String(describing: error))")
}
guard let imgData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSample, previewPhotoSampleBuffer: previewPhotoSampleBuffer) else {
fatalError("Failed to get image data: \(String(describing: error))")
}
guard let image = UIImage(data: imgData) else {
fatalError("Failed to convert image data to UIImage: \(String(describing: error))")
}
}
}
UIImage extension:
func cropToRect(rect: CGRect!) -> UIImage? {
let scaledRect = CGRect(x: rect.origin.x * self.scale, y: rect.origin.y * self.scale, width: rect.size.width * self.scale, height: rect.size.height * self.scale);
guard let imageRef: CGImage = self.cgImage?.cropping(to:scaledRect)
else {
return nil
}
let croppedImage: UIImage = UIImage(cgImage: imageRef, scale: self.scale, orientation: self.imageOrientation)
return croppedImage
}
When cropping an image, you need to scale the "crop rect" from its size relative to the image size.
Also, when capturing from the camera, you need to take .imageOrientation into account.
Try changing your UIImage extension to this:
extension UIImage {
func cropToRect(rect: CGRect, viewSize: CGSize) -> UIImage? {
var cr = rect
switch self.imageOrientation {
case .right, .rightMirrored, .left, .leftMirrored:
// rotate the crop rect if needed
cr.origin.x = rect.origin.y
cr.origin.y = rect.origin.x
cr.size.width = rect.size.height
cr.size.height = rect.size.width
default:
break
}
let imageViewScale = max(self.size.width / viewSize.width,
self.size.height / viewSize.height)
// scale the crop rect
let cropZone = CGRect(x:cr.origin.x * imageViewScale,
y:cr.origin.y * imageViewScale,
width:cr.size.width * imageViewScale,
height:cr.size.height * imageViewScale)
// Perform cropping in Core Graphics
guard let cutImageRef: CGImage = self.cgImage?.cropping(to:cropZone)
else {
return nil
}
// Return image to UIImage
let croppedImage: UIImage = UIImage(cgImage: cutImageRef, scale: self.scale, orientation: self.imageOrientation)
return croppedImage
}
}
and change your call in photoOutput() to:
guard let croppedImg = image.cropToRect(rect: cropRect, viewSize: view.frame.size) else {
fatalError("Failed to crop image")
}
Since your code is using the full view, that should work fine. If you change it to use a different sized view as your videoPreviewLayer then use that size instead of view.frame.size.

How to find the new center of an UIImageView after scaling?

I am creating an app where the user can add an image to a "canvas" and resize and move the image around using pinch and pan gesture recognizers. The image view is a custom one I created using this article:
Bezier Paths and Gesture Recognizers
This works really nicely. The image resizes and moves very smoothly. I can capture the center and the size of the image after the pan and pinch gestures. The problem is that after I save the size and coordinates of the image it isn't respecting those when I load it back into the "canvas". It is as if the center is offset by "X" number of pixels.
Here is my code for making the resizable and movable image view:
‘’’
import UIKit
import Foundation
class MovableImage: UIImageView {
let size: CGFloat = 150.0
var imageMovedHandler:((_ x: CGFloat, _ y: CGFloat) -> ())?
var imageDeletedHandler:((_ delete: Bool) -> ())?
var longPressHandler:((_ selected: Bool) -> ())?
var imageSizeChangedHandler:((_ newImageView: MovableImage) -> ())?
let deleteButton = UIButton(type: .close)
init(origin: CGPoint) {
super.init(frame: CGRect(x: origin.x, y: origin.y, width: size, height: size)) //
debugCenterDot()
initGestureRecognizers()
}
//added a dot to try and understand what is happening with the "center" of the imageview, but it didn't show in the center of the imageview
func debugCenterDot() {
let dot = UIBezierPath(ovalIn: CGRect(x: self.center.x, y: self.center.y, width: 15, height: 15))
let dotLayer = CAShapeLayer()
dotLayer.path = dot.cgPath
dotLayer.strokeColor = UIColor.yellow.cgColor
self.layer.addSublayer(dotLayer)
self.setNeedsDisplay()
}
internal func addButton() {
deleteButton.tintColor = UIColor.red
deleteButton.backgroundColor = UIColor.white
deleteButton.addTarget(self, action: #selector(deleteSelf(sender:)), for: .touchUpInside)
deleteButton.frame = .zero //CGRect(x: 8, y: 8, width: 15, height: 15)
deleteButton.translatesAutoresizingMaskIntoConstraints = false
self.addSubview(deleteButton)
NSLayoutConstraint.activate([
deleteButton.widthAnchor.constraint(equalToConstant: 15),
deleteButton.widthAnchor.constraint(equalTo: deleteButton.heightAnchor),
deleteButton.leadingAnchor.constraint(equalTo: self.safeAreaLayoutGuide.leadingAnchor, constant: 8),
deleteButton.topAnchor.constraint(equalTo: self.safeAreaLayoutGuide.topAnchor, constant: 8),
])
}
#objc func deleteSelf(sender: UIButton) {
imageDeletedHandler?(true)
self.removeFromSuperview()
}
func initGestureRecognizers() {
let panGR = UIPanGestureRecognizer(target: self, action: #selector(didPan(panGR:)))
addGestureRecognizer(panGR)
let pinchGR = UIPinchGestureRecognizer(target: self, action: #selector(didPinch(pinchGR:)))
addGestureRecognizer(pinchGR)
let longPressGR = UILongPressGestureRecognizer(target: self, action: #selector(didLongPress(longPressGR:)))
longPressGR.minimumPressDuration = 1
addGestureRecognizer(longPressGR)
}
#objc func didLongPress(longPressGR: UILongPressGestureRecognizer) {
self.superview!.bringSubviewToFront(self)
self.layer.borderWidth = 2
self.layer.borderColor = UIColor.red.cgColor
addButton()
longPressHandler?(true)
}
#objc func didPan(panGR: UIPanGestureRecognizer) {
self.superview!.bringSubviewToFront(self)
if self.layer.borderWidth == 2 {
let translation = panGR.translation(in: self)
print("BEFORE PAN: \(self.center)")
self.center.x += translation.x
self.center.y += translation.y
print("AFTER PAN: \(self.center)")
panGR.setTranslation(CGPoint.zero, in: self)
if panGR.state == .ended {
imageMovedHandler?(self.center.x, self.center.y)
self.layer.borderWidth = 0
self.layer.borderColor = nil
self.deleteButton.removeFromSuperview()
}
}
}
#objc func didPinch(pinchGR: UIPinchGestureRecognizer) {
self.superview?.bringSubviewToFront(self)
if self.layer.borderWidth == 2 {
let scale = pinchGR.scale
self.transform = CGAffineTransform(scaleX: scale, y: scale)
if pinchGR.state == .ended {
imageSizeChangedHandler?(self)
}
}
}
func scaleOf(transform: CGAffineTransform) -> CGPoint {
let xscale = sqrt(transform.a * transform.a + transform.c * transform.c)
let yscale = sqrt(transform.b * transform.b + transform.d * transform.d)
return CGPoint(x: xscale, y: yscale)
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
}
‘’’
And here is how I am loading it back into the "canvas" and saving it back to CoreData (save functions in closures near the bottom of the load function):
'''
func loadImages(new: Bool, enableInteraction: Bool) {
let pagePhotos = LoadPagePhotos()
var pageImages: [NSManagedObject] = []
var x: CGFloat?
var y: CGFloat?
var width: CGFloat?
var height: CGFloat?
var image: UIImage?
if isYear {
pageImages = pagePhotos.resetYearPhotosPositions(journalName: journalName, yearPosition: yearPosition)
} else {
pageImages = pagePhotos.resetPhotosPositions(journalName: journalName, monthName: monthName, weekPosition: positionWeek)
}
scrollView.mainView.newImages.forEach { i in
i.removeFromSuperview()
}
scrollView.mainView.newImages.removeAll()
if pageImages.count > 0 {
pageImages.forEach{ (photo) in
x = CGFloat((photo.value(forKey: "pageImageX") as? Float)!)
y = CGFloat((photo.value(forKey: "pageImageY") as? Float)!)
height = CGFloat((photo.value(forKey: "pageImageSizeHeight") as? Float)!)
width = CGFloat((photo.value(forKey: "pageImageSizeWidth") as? Float)!)
image = photo.value(forKey: "image") as? UIImage
let thisImage: MovableImage = MovableImage(origin: CGPoint.zero)
thisImage.contentMode = .scaleAspectFit
thisImage.center = CGPoint(x: x!, y: y!)
thisImage.image = image!
thisImage.frame.size.height = height!
thisImage.frame.size.width = width!
scrollView.mainView.addSubview(thisImage)
scrollView.mainView.newImages.append(thisImage)
if enableInteraction {
thisImage.isUserInteractionEnabled = true
} else {
thisImage.isUserInteractionEnabled = false
}
thisImage.layer.zPosition = 1
thisImage.layer.borderWidth = 0
if new {
imageOptionsMenuView.isHidden = false
} else {
imageOptionsMenuView.isHidden = true
}
movableImage = thisImage
//for clarity sake I moved the save functions to separate block here in stack overflow so it is easier to read
}
}
'''
The closures "imageMovedHandler" and "imageSizeChangedHandler" are used to detect when moving and resizing is done and I save the image to CoreData. Here they are:
'''
if movableImage != nil {
movableImage?.imageMovedHandler = { [unowned self] (x, y) in
if self.isYear {
if self.scrollView.mainView.newImages.count > 0 {
for (idx, i) in self.scrollView.mainView.newImages.enumerated() {
if i.layer.borderWidth == 2 {
let id = idx + 1
_ = LoadPagePhotos().updateYearPhoto(journalName: self.journalName, yearPosition: self.yearPosition, pageImageId: id, imageHeight: Float(i.frame.size.height), imageWidth: Float(i.frame.size.width), imageX: Float(i.center.x), imageY: Float(i.center.y), pagePhoto: i.image!, photoPath: nil)
}
}
}
} else {
if self.scrollView.mainView.newImages.count > 0 {
for (idx, i) in self.scrollView.mainView.newImages.enumerated() {
if i.layer.borderWidth == 2 {
let id = idx + 1
_ = LoadPagePhotos().updatePhoto(journalName: self.journalName, monthName: self.monthName, weekPosition: self.positionWeek, pageImageId: id, imageHeight: Float(i.frame.size.height), imageWidth: Float(i.frame.size.width), imageX: Float(i.center.x), imageY: Float(i.center.y), pagePhoto: i.image!, photoPath: nil)
}
}
}
}
self.loadImages(new: false, enableInteraction: true)
}
movableImage?.imageSizeChangedHandler = { [unowned self] (newImageView) in
var id = 0
var img = 0
if self.isYear {
if self.scrollView.mainView.newImages.count > 0 {
for (idx, i) in self.scrollView.mainView.newImages.enumerated() {
if i.layer.borderWidth == 2 {
id = idx + 1
img = idx
}
}
self.scrollView.mainView.newImages[img] = newImageView
_ = LoadPagePhotos().updateYearPhoto(journalName: self.journalName, yearPosition: self.yearPosition, pageImageId: id, imageHeight: Float(newImageView.frame.size.height), imageWidth: Float(newImageView.frame.size.width), imageX: Float(newImageView.center.x), imageY: Float(newImageView.center.y), pagePhoto: newImageView.image!, photoPath: nil)
}
} else {
if self.scrollView.mainView.newImages.count > 0 {
for (idx, i) in self.scrollView.mainView.newImages.enumerated() {
if i.layer.borderWidth == 2 {
id = idx + 1
img = idx
}
}
self.scrollView.mainView.newImages[img] = newImageView
_ = LoadPagePhotos().updatePhoto(journalName: self.journalName, monthName: self.monthName, weekPosition: self.positionWeek, pageImageId: id, imageHeight: Float(newImageView.frame.size.height), imageWidth: Float(newImageView.frame.size.width), imageX: Float(newImageView.center.x), imageY: Float(newImageView.center.y), pagePhoto: newImageView.image!, photoPath: nil)
}
}
self.loadImages(new: false, enableInteraction: true)
}
}
}
'''
Here is an image of what is happening when I move the image around the canvas:
This first image shows where I stopped moving the image to:
This image shows where the image was loaded after saving the size and coordinates:
The desired outcome is:
When pinching, panning, and saving the image and then loading, the image retains its current coordinates and size in the canvas.
EDIT:
It should also be noted that the offset of the image when moving it only happens after "scaling

CGDataProvider returning null in Swift 4

I'm trying to fix a problem in SwiftColorPicker (by Matthias Schlemm) that popped up with Swift 4.0. I'm going to include the whole PickerImage class, so you can see the full context.
import UIKit
import ImageIO
open class PickerImage {
var provider:CGDataProvider!
var imageSource:CGImageSource?
var image:UIImage?
var mutableData:CFMutableData
var width:Int
var height:Int
fileprivate func createImageFromData(_ width:Int, height:Int) {
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
let provider = CGDataProvider(data: mutableData)
imageSource = CGImageSourceCreateWithDataProvider(provider!, nil)
let cgimg = CGImage(width: Int(width), height: Int(height), bitsPerComponent: Int(8), bitsPerPixel: Int(32), bytesPerRow: Int(width) * Int(4),
space: colorSpace, bitmapInfo: bitmapInfo, provider: provider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)
image = UIImage(cgImage: cgimg!)
}
func changeSize(_ width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
CFDataSetLength(mutableData, size)
createImageFromData(width, height: height)
}
init(width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
mutableData = CFDataCreateMutable(kCFAllocatorDefault, size)
createImageFromData(width, height: height)
}
open func writeColorData(_ h:CGFloat, a:CGFloat) {
let d = CFDataGetMutableBytePtr(self.mutableData)
if width == 0 || height == 0 {
return
}
var i:Int = 0
let h360:CGFloat = ((h == 1 ? 0 : h) * 360) / 60.0
let sector:Int = Int(floor(h360))
let f:CGFloat = h360 - CGFloat(sector)
let f1:CGFloat = 1.0 - f
var p:CGFloat = 0.0
var q:CGFloat = 0.0
var t:CGFloat = 0.0
let sd:CGFloat = 1.0 / CGFloat(width)
let vd:CGFloat = 1 / CGFloat(height)
var double_s:CGFloat = 0
var pf:CGFloat = 0
let v_range = 0..<height
let s_range = 0..<width
for v in v_range {
pf = 255 * CGFloat(v) * vd
for s in s_range {
i = (v * width + s) * 4
d?[i] = UInt8(255)
if s == 0 {
q = pf
d?[i+1] = UInt8(q)
d?[i+2] = UInt8(q)
d?[i+3] = UInt8(q)
continue
}
double_s = CGFloat(s) * sd
p = pf * (1.0 - double_s)
q = pf * (1.0 - double_s * f)
t = pf * ( 1.0 - double_s * f1)
switch(sector) {
case 0:
d?[i+1] = UInt8(pf)
d?[i+2] = UInt8(t)
d?[i+3] = UInt8(p)
case 1:
d?[i+1] = UInt8(q)
d?[i+2] = UInt8(pf)
d?[i+3] = UInt8(p)
case 2:
d?[i+1] = UInt8(p)
d?[i+2] = UInt8(pf)
d?[i+3] = UInt8(t)
case 3:
d?[i+1] = UInt8(p)
d?[i+2] = UInt8(q)
d?[i+3] = UInt8(pf)
case 4:
d?[i+1] = UInt8(t)
d?[i+2] = UInt8(p)
d?[i+3] = UInt8(pf)
default:
d?[i+1] = UInt8(pf)
d?[i+2] = UInt8(p)
d?[i+3] = UInt8(q)
}
}
}
}
}
In createImageFromData, the line
let provider = CGDataProvider(data: mutableData)
is returning a nil value, which, of course, causes the following line to crash. This was working fine in Swift 3.
Here are the values in debugger:
Dealing with memory allocation is a bit beyond my current skillset, so I'm struggling with what's actually going on here. Has anything related to this changed in Swift 4.0 that would cause the CGDataProvider call to return a nil value?
Edit:
Here is the ColorPicker class that initializes the PickerImage objects.
import UIKit
import ImageIO
open class ColorPicker: UIView {
fileprivate var pickerImage1:PickerImage?
fileprivate var pickerImage2:PickerImage?
fileprivate var image:UIImage?
fileprivate var data1Shown = false
fileprivate lazy var opQueue:OperationQueue = {return OperationQueue()}()
fileprivate var lock:NSLock = NSLock()
fileprivate var rerender = false
open var onColorChange:((_ color:UIColor, _ finished:Bool)->Void)? = nil
open var a:CGFloat = 1 {
didSet {
if a < 0 || a > 1 {
a = max(0, min(1, a))
}
}
}
open var h:CGFloat = 0 { // // [0,1]
didSet {
if h > 1 || h < 0 {
h = max(0, min(1, h))
}
renderBitmap()
setNeedsDisplay()
}
}
fileprivate var currentPoint:CGPoint = CGPoint.zero
open func saturationFromCurrentPoint() -> CGFloat {
return (1 / bounds.width) * currentPoint.x
}
open func brigthnessFromCurrentPoint() -> CGFloat {
return (1 / bounds.height) * currentPoint.y
}
open var color:UIColor {
set(value) {
var hue:CGFloat = 1
var saturation:CGFloat = 1
var brightness:CGFloat = 1
var alpha:CGFloat = 1
value.getHue(&hue, saturation: &saturation, brightness: &brightness, alpha: &alpha)
a = alpha
if hue != h || pickerImage1 === nil {
self.h = hue
}
currentPoint = CGPoint(x: saturation * bounds.width, y: brightness * bounds.height)
self.setNeedsDisplay()
}
get {
return UIColor(hue: h, saturation: saturationFromCurrentPoint(), brightness: brigthnessFromCurrentPoint(), alpha: a)
}
}
public override init(frame: CGRect) {
super.init(frame: frame)
commonInit()
}
public required init?(coder aDecoder: NSCoder) {
super.init(coder: aDecoder)
commonInit()
}
func commonInit() {
isUserInteractionEnabled = true
clipsToBounds = false
self.addObserver(self, forKeyPath: "bounds", options: [NSKeyValueObservingOptions.new, NSKeyValueObservingOptions.initial], context: nil)
}
deinit {
self.removeObserver(self, forKeyPath: "bounds")
}
open override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) {
if keyPath == "bounds" {
if let pImage1 = pickerImage1 {
pImage1.changeSize(Int(self.bounds.width), height: Int(self.bounds.height))
}
if let pImage2 = pickerImage2 {
pImage2.changeSize(Int(self.bounds.width), height: Int(self.bounds.height))
}
renderBitmap()
self.setNeedsDisplay()
} else {
super.observeValue(forKeyPath: keyPath, of: object, change: change, context: context)
}
}
open override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first! as UITouch
handleTouche(touch, ended: false)
}
open override func touchesMoved(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first! as UITouch
handleTouche(touch, ended: false)
}
open override func touchesEnded(_ touches: Set<UITouch>, with event: UIEvent?) {
let touch = touches.first! as UITouch
handleTouche(touch, ended: true)
}
fileprivate func handleColorChange(_ color:UIColor, changing:Bool) {
if color !== self.color {
if let handler = onColorChange {
handler(color, !changing)
}
setNeedsDisplay()
}
}
fileprivate func handleTouche(_ touch:UITouch, ended:Bool) {
// set current point
let point = touch.location(in: self)
if self.bounds.contains(point) {
currentPoint = point
} else {
let x:CGFloat = min(bounds.width, max(0, point.x))
let y:CGFloat = min(bounds.width, max(0, point.y))
currentPoint = CGPoint(x: x, y: y)
}
handleColorChange(pointToColor(point), changing: !ended)
}
fileprivate func pointToColor(_ point:CGPoint) ->UIColor {
let s:CGFloat = min(1, max(0, (1.0 / bounds.width) * point.x))
let b:CGFloat = min(1, max(0, (1.0 / bounds.height) * point.y))
return UIColor(hue: h, saturation: s, brightness: b, alpha:a)
}
fileprivate func renderBitmap() {
if self.bounds.isEmpty {
return
}
if !lock.try() {
rerender = true
return
}
rerender = false
if pickerImage1 == nil {
self.pickerImage1 = PickerImage(width: Int(bounds.width), height: Int(bounds.height))
self.pickerImage2 = PickerImage(width: Int(bounds.width), height: Int(bounds.height))
}
opQueue.addOperation { () -> Void in
// Write colors to data array
if self.data1Shown { self.pickerImage2!.writeColorData(self.h, a:self.a) }
else { self.pickerImage1!.writeColorData(self.h, a:self.a)}
// flip images
// self.image = self.data1Shown ? self.pickerImage2!.image! : self.pickerImage1!.image!
self.data1Shown = !self.data1Shown
// make changes visible
OperationQueue.main.addOperation({ () -> Void in
self.setNeedsDisplay()
self.lock.unlock()
if self.rerender {
self.renderBitmap()
}
})
}
}
open override func draw(_ rect: CGRect) {
if let img = image {
img.draw(in: rect)
}
//// Oval Drawing
let ovalPath = UIBezierPath(ovalIn: CGRect(x: currentPoint.x - 5, y: currentPoint.y - 5, width: 10, height: 10))
UIColor.white.setStroke()
ovalPath.lineWidth = 1
ovalPath.stroke()
//// Oval 2 Drawing
let oval2Path = UIBezierPath(ovalIn: CGRect(x: currentPoint.x - 4, y: currentPoint.y - 4, width: 8, height: 8))
UIColor.black.setStroke()
oval2Path.lineWidth = 1
oval2Path.stroke()
}
}
It's not a problem of Swift 4, but a problem of iOS 11. You may find your code works on iOS 10 simulator.
The original code seemingly works in iOS 10, depending on just a luck.
In this part of the code:
init(width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
mutableData = CFDataCreateMutable(kCFAllocatorDefault, size)
createImageFromData(width, height: height)
}
The property mutableData is initialized with a CFMutableData of capacity: size, and empty (that is, content-less).
And in iOS 11, the initializer CGDataProvider.init(data:) rejects an empty CFData as it should not be empty as a data provider.
A quick fix would be something like this:
init(width:Int, height:Int) {
self.width = width
self.height = height
let size:Int = width * height * 4
mutableData = CFDataCreateMutable(kCFAllocatorDefault, size)
CFDataSetLength(mutableData, size) //<-set the length of the data
createImageFromData(width, height: height)
}
But I'm not sure other parts of the code would work as expected in iOS 11.
The problem is you added let to that line and it creates a new variable call provider within the function createImageFromData. That way, it's never passed to the provider parameter of the class and then, it's always null. Just remove the let and it should work properly.
fileprivate func createImageFromData(_ width:Int, height:Int) {
let colorSpace = CGColorSpaceCreateDeviceRGB()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue)
provider = CGDataProvider(data: mutableData)
imageSource = CGImageSourceCreateWithDataProvider(provider!, nil)
let cgimg = CGImage(width: Int(width), height: Int(height), bitsPerComponent: Int(8), bitsPerPixel: Int(32), bytesPerRow: Int(width) * Int(4),
space: colorSpace, bitmapInfo: bitmapInfo, provider: provider!, decode: nil, shouldInterpolate: true, intent: CGColorRenderingIntent.defaultIntent)
image = UIImage(cgImage: cgimg!)
}

how to make exact blur

I want Blur actual like first Image.
I have did some code and made it like second image.
My Code For Blur is Like
let blurEffect = UIBlurEffect(style: UIBlurEffectStyle.Dark)
blurEffectView = UIVisualEffectView(effect: blurEffect)
blurEffectView.layer.opacity = 0.8
blurEffectView.alpha = 0.6
blurEffectView.frame = CGRectMake(0, 42, UIScreen.mainScreen().bounds.width, UIScreen.mainScreen().bounds.height - 42)
sourceView.addSubview(blurEffectView)
Sourceview is my background view. Which I wants to make blur. Any Suggestion ?
The alpha and the layer.opacity corrections are not necessary, you can do it also with an extension:
extension UIImageView{
func makeBlurImage(imageView:UIImageView?)
{
let blurEffect = UIBlurEffect(style: UIBlurEffectStyle.Dark)
let blurEffectView = UIVisualEffectView(effect: blurEffect)
blurEffectView.frame = imageView!.bounds
blurEffectView.autoresizingMask = [.FlexibleWidth, .FlexibleHeight] // to support device rotation
imageView?.addSubview(blurEffectView)
}
}
Usage:
let imageView = UIImageView(frame: CGRectMake(0, 100, 300, 400))
let image:UIImage = UIImage(named: "photo.png")!
imageView.image = image
//Apply blur effect
imageView.makeBlurImage(imageView)
self.view.addSubview(imageView)
But if you want to apply the blur effect to an UIView you can use this code:
protocol Blurable
{
var layer: CALayer { get }
var subviews: [UIView] { get }
var frame: CGRect { get }
var superview: UIView? { get }
func addSubview(view: UIView)
func removeFromSuperview()
func blur(blurRadius blurRadius: CGFloat)
func unBlur()
var isBlurred: Bool { get }
}
extension Blurable
{
func blur(blurRadius blurRadius: CGFloat)
{
if self.superview == nil
{
return
}
UIGraphicsBeginImageContextWithOptions(CGSize(width: frame.width, height: frame.height), false, 1)
layer.renderInContext(UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext();
guard let blur = CIFilter(name: "CIGaussianBlur"),
this = self as? UIView else
{
return
}
blur.setValue(CIImage(image: image), forKey: kCIInputImageKey)
blur.setValue(blurRadius, forKey: kCIInputRadiusKey)
let ciContext = CIContext(options: nil)
let result = blur.valueForKey(kCIOutputImageKey) as! CIImage!
let boundingRect = CGRect(x:0,
y: 0,
width: frame.width,
height: frame.height)
let cgImage = ciContext.createCGImage(result, fromRect: boundingRect)
let filteredImage = UIImage(CGImage: cgImage)
let blurOverlay = BlurOverlay()
blurOverlay.frame = boundingRect
blurOverlay.image = filteredImage
blurOverlay.contentMode = UIViewContentMode.Left
if let superview = superview as? UIStackView,
index = (superview as UIStackView).arrangedSubviews.indexOf(this)
{
removeFromSuperview()
superview.insertArrangedSubview(blurOverlay, atIndex: index)
}
else
{
blurOverlay.frame.origin = frame.origin
UIView.transitionFromView(this,
toView: blurOverlay,
duration: 0.2,
options: UIViewAnimationOptions.CurveEaseIn,
completion: nil)
}
objc_setAssociatedObject(this,
&BlurableKey.blurable,
blurOverlay,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
func unBlur()
{
guard let this = self as? UIView,
blurOverlay = objc_getAssociatedObject(self as? UIView, &BlurableKey.blurable) as? BlurOverlay else
{
return
}
if let superview = blurOverlay.superview as? UIStackView,
index = (blurOverlay.superview as! UIStackView).arrangedSubviews.indexOf(blurOverlay)
{
blurOverlay.removeFromSuperview()
superview.insertArrangedSubview(this, atIndex: index)
}
else
{
this.frame.origin = blurOverlay.frame.origin
UIView.transitionFromView(blurOverlay,
toView: this,
duration: 0.2,
options: UIViewAnimationOptions.CurveEaseIn,
completion: nil)
}
objc_setAssociatedObject(this,
&BlurableKey.blurable,
nil,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
var isBlurred: Bool
{
return objc_getAssociatedObject(self as? UIView, &BlurableKey.blurable) is BlurOverlay
}
}
extension UIView: Blurable
{
}
class BlurOverlay: UIImageView
{
}
struct BlurableKey
{
static var blurable = "blurable"
}
Swift 4.x
extension UIView {
struct BlurableKey {
static var blurable = "blurable"
}
func blur(radius: CGFloat) {
guard let superview = superview else { return }
UIGraphicsBeginImageContextWithOptions(CGSize(width: frame.width, height: frame.height), false, 1)
layer.render(in: UIGraphicsGetCurrentContext()!)
guard let image = UIGraphicsGetImageFromCurrentImageContext() else { return }
UIGraphicsEndImageContext()
guard let blur = CIFilter(name: "CIGaussianBlur") else { return }
blur.setValue(CIImage(image: image), forKey: kCIInputImageKey)
blur.setValue(radius, forKey: kCIInputRadiusKey)
let ciContext = CIContext(options: nil)
guard let result = blur.value(forKey: kCIOutputImageKey) as? CIImage else { return }
let boundingRect = CGRect(x: 0, y: 0, width: frame.width, height: frame.height)
guard let cgImage = ciContext.createCGImage(result, from: boundingRect) else { return }
let filteredImage = UIImage(cgImage: cgImage)
let blurOverlay = UIImageView()
blurOverlay.frame = boundingRect
blurOverlay.image = filteredImage
blurOverlay.contentMode = UIViewContentMode.left
if let stackView = superview as? UIStackView, let index = stackView.arrangedSubviews.index(of: self) {
removeFromSuperview()
stackView.insertArrangedSubview(blurOverlay, at: index)
} else {
blurOverlay.frame.origin = frame.origin
UIView.transition(from: self,
to: blurOverlay,
duration: 0.2,
options: UIViewAnimationOptions.curveEaseIn,
completion: nil)
}
objc_setAssociatedObject(self,
&BlurableKey.blurable,
blurOverlay,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
func unBlur() {
guard let blurOverlay = objc_getAssociatedObject(self, &BlurableKey.blurable) as? UIImageView else { return }
if let stackView = blurOverlay.superview as? UIStackView, let index = stackView.arrangedSubviews.index(of: blurOverlay) {
blurOverlay.removeFromSuperview()
stackView.insertArrangedSubview(self, at: index)
} else {
frame.origin = blurOverlay.frame.origin
UIView.transition(from: blurOverlay,
to: self,
duration: 0.2,
options: UIViewAnimationOptions.curveEaseIn,
completion: nil)
}
objc_setAssociatedObject(self,
&BlurableKey.blurable,
nil,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
var isBlurred: Bool {
return objc_getAssociatedObject(self, &BlurableKey.blurable) is UIImageView
}
}
The usage is for example:
segmentedControl.unBlur()
segmentedControl.blur(blurRadius: 2)
This is the source of the project Blurable.
You can find more detail in his GitHub project here

Display image without scaling to view

In following code, even small images enlarge and fill whole view. Could you please help me how to display image as it's original state.
override func drawRect(rect: CGRect) {
if let inputCIImage = inputCIImage {
clampFilter.setValue(inputCIImage, forKey: kCIInputImageKey)
blurFilter.setValue(clampFilter.outputImage!, forKey: kCIInputImageKey)
let rect = CGRect(x: 0, y: 0, width: drawableWidth, height: drawableHeight)
ciContext.drawImage(blurFilter.outputImage!, inRect: rect, fromRect: inputCIImage.extent)
}
}
I figured out with following way :
func aspectFit(fromRect: CGRect, toRect: CGRect) -> CGRect {
let fromAspectRatio = fromRect.size.width / fromRect.size.height;
let toAspectRatio = toRect.size.width / toRect.size.height;
var fitRect = toRect
if (fromAspectRatio > toAspectRatio) {
fitRect.size.height = toRect.size.width / fromAspectRatio;
fitRect.origin.y += (toRect.size.height - fitRect.size.height) * 0.5;
} else {
fitRect.size.width = toRect.size.height * fromAspectRatio;
fitRect.origin.x += (toRect.size.width - fitRect.size.width) * 0.5;
}
return CGRectIntegral(fitRect)
}
func aspectFill(fromRect: CGRect, toRect: CGRect) -> CGRect {
let fromAspectRatio = fromRect.size.width / fromRect.size.height;
let toAspectRatio = toRect.size.width / toRect.size.height;
var fitRect = toRect
if (fromAspectRatio > toAspectRatio) {
fitRect.size.width = toRect.size.height * fromAspectRatio;
fitRect.origin.x += (toRect.size.width - fitRect.size.width) * 0.5;
} else {
fitRect.size.height = toRect.size.width / fromAspectRatio;
fitRect.origin.y += (toRect.size.height - fitRect.size.height) * 0.5;
}
return CGRectIntegral(fitRect)
}
func imageBoundsForContentMode(fromRect: CGRect, toRect: CGRect) -> CGRect {
switch contentMode {
case .ScaleAspectFill:
return aspectFill(fromRect, toRect: toRect)
case .ScaleAspectFit:
return aspectFit(fromRect, toRect: toRect)
default:
return fromRect
}
}
override func drawRect(rect: CGRect) {
if let inputCIImage = inputCIImage {
clampFilter.setValue(inputCIImage, forKey: kCIInputImageKey)
blurFilter.setValue(clampFilter.outputImage!, forKey: kCIInputImageKey)
let rect = CGRect(x: 0, y: 0, width: drawableWidth, height: drawableHeight)
let inputBounds = inputCIImage.extent
let targetBounds = imageBoundsForContentMode(inputBounds, toRect: rect)
ciContext.drawImage(blurFilter.outputImage!, inRect: targetBounds, fromRect: inputCIImage.extent)
}
}
You can get the size of image like if your image is
let image = UIImage(named: "someImage.png")
then you can get its width and size
print(image.size)
and then pass the size to your custom view to make the size you want.

Resources