I have a view controller which takes a photo with a circular view in the center.
After taking a photo, I need to crop the CGRect with which I created the circular view. I need to crop the rectangle, not the circle.
I tried https://stackoverflow.com/a/57258806/12411655 and many other solutions, but it doesn't crop CGRect that I need.
How do I convert the CGRect in the view's coordinates to UIImage's coordinates?
class CircularCameraViewController: UIViewController {
var captureSession: AVCaptureSession!
var capturePhotoOutput: AVCapturePhotoOutput!
var cropRect: CGRect!
public lazy var shutterButton: ShutterButton = {
let button = ShutterButton()
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(capturePhoto), for: .touchUpInside)
return button
}()
private lazy var cancelButton: UIButton = {
let button = UIButton()
button.setTitle("Cancel", for: .normal)
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(dismissCamera), for: .touchUpInside)
return button
}()
private lazy var flashButton: UIButton = {
let image = UIImage(named: "flash", in: Bundle(for: ScannerViewController.self), compatibleWith: nil)?.withRenderingMode(.alwaysTemplate)
let button = UIButton()
button.setImage(image, for: .normal)
button.translatesAutoresizingMaskIntoConstraints = false
button.addTarget(self, action: #selector(toggleFlash), for: .touchUpInside)
button.tintColor = .white
return button
}()
override func viewDidLoad() {
super.viewDidLoad()
setupCamera()
setupPhotoOutput()
setupViews()
setupConstraints()
captureSession.startRunning()
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
}
override func viewWillDisappear(_ animated: Bool) {
captureSession.stopRunning()
}
private func setupCamera() {
let captureDevice = AVCaptureDevice.default(for: AVMediaType.video)
var input: AVCaptureDeviceInput
do {
input = try AVCaptureDeviceInput(device: captureDevice!)
} catch {
fatalError("Error configuring capture device: \(error)");
}
captureSession = AVCaptureSession()
captureSession.addInput(input)
// Setup the preview view.
let videoPreviewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
videoPreviewLayer.videoGravity = AVLayerVideoGravity.resizeAspectFill
videoPreviewLayer.frame = view.layer.bounds
view.layer.addSublayer(videoPreviewLayer)
let camPreviewBounds = view.bounds
cropRect = CGRect(
x: camPreviewBounds.minX + (camPreviewBounds.width - 150) * 0.5,
y: camPreviewBounds.minY + (camPreviewBounds.height - 150) * 0.5,
width: 150,
height: 150
)
let path = UIBezierPath(roundedRect: camPreviewBounds, cornerRadius: 0)
path.append(UIBezierPath(ovalIn: cropRect))
let layer = CAShapeLayer()
layer.path = path.cgPath
layer.fillRule = CAShapeLayerFillRule.evenOdd;
layer.fillColor = UIColor.black.cgColor
layer.opacity = 0.5;
view.layer.addSublayer(layer)
}
private func setupViews() {
view.addSubview(shutterButton)
view.addSubview(flashButton)
view.addSubview(cancelButton)
}
private func setupConstraints() {
var cancelButtonConstraints = [NSLayoutConstraint]()
var shutterButtonConstraints = [NSLayoutConstraint]()
var flashConstraints = [NSLayoutConstraint]()
shutterButtonConstraints = [
shutterButton.centerXAnchor.constraint(equalTo: view.centerXAnchor),
shutterButton.widthAnchor.constraint(equalToConstant: 65.0),
shutterButton.heightAnchor.constraint(equalToConstant: 65.0)
]
flashConstraints = [
flashButton.leftAnchor.constraint(equalTo: view.leftAnchor, constant: 24.0),
flashButton.topAnchor.constraint(equalTo: view.topAnchor, constant: 30)
]
if #available(iOS 11.0, *) {
cancelButtonConstraints = [
cancelButton.leftAnchor.constraint(equalTo: view.safeAreaLayoutGuide.leftAnchor, constant: 24.0),
view.safeAreaLayoutGuide.bottomAnchor.constraint(equalTo: cancelButton.bottomAnchor, constant: (65.0 / 2) - 10.0)
]
let shutterButtonBottomConstraint = view.safeAreaLayoutGuide.bottomAnchor.constraint(equalTo: shutterButton.bottomAnchor, constant: 8.0)
shutterButtonConstraints.append(shutterButtonBottomConstraint)
} else {
cancelButtonConstraints = [
cancelButton.leftAnchor.constraint(equalTo: view.leftAnchor, constant: 24.0),
view.bottomAnchor.constraint(equalTo: cancelButton.bottomAnchor, constant: (65.0 / 2) - 10.0)
]
let shutterButtonBottomConstraint = view.bottomAnchor.constraint(equalTo: shutterButton.bottomAnchor, constant: 8.0)
shutterButtonConstraints.append(shutterButtonBottomConstraint)
}
NSLayoutConstraint.activate(cancelButtonConstraints + shutterButtonConstraints + flashConstraints)
}
private func setupPhotoOutput() {
capturePhotoOutput = AVCapturePhotoOutput()
capturePhotoOutput.isHighResolutionCaptureEnabled = true
captureSession.addOutput(capturePhotoOutput!)
}
#objc func dismissCamera() {
self.dismiss(animated: true, completion: nil)
}
#objc private func toggleFlash() {
if let avDevice = AVCaptureDevice.default(for: AVMediaType.video) {
if (avDevice.hasTorch) {
do {
try avDevice.lockForConfiguration()
} catch {
print("aaaa")
}
if avDevice.isTorchActive {
avDevice.torchMode = AVCaptureDevice.TorchMode.off
} else {
avDevice.torchMode = AVCaptureDevice.TorchMode.on
}
}
// unlock your device
avDevice.unlockForConfiguration()
}
}
}
extension CircularCameraViewController : AVCapturePhotoCaptureDelegate {
#objc private func capturePhoto() {
let photoSettings = AVCapturePhotoSettings()
photoSettings.isAutoStillImageStabilizationEnabled = true
photoSettings.isHighResolutionPhotoEnabled = true
photoSettings.flashMode = .auto
// Set ourselves as the delegate for `capturePhoto`.
capturePhotoOutput?.capturePhoto(with: photoSettings, delegate: self)
}
#available(iOS 11.0, *)
func photoOutput(_ output: AVCapturePhotoOutput,
didFinishProcessingPhoto photo: AVCapturePhoto,
error: Error?) {
guard error == nil else {
fatalError("Failed to capture photo: \(String(describing: error))")
}
guard let imageData = photo.fileDataRepresentation() else {
fatalError("Failed to convert pixel buffer")
}
guard let image = UIImage(data: imageData) else {
fatalError("Failed to convert image data to UIImage")
}
guard let croppedImg = image.cropToRect(rect: cropRect) else {
fatalError("Failed to crop image")
}
UIImageWriteToSavedPhotosAlbum(croppedImg, nil, nil, nil);
}
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photoSampleBuffer: CMSampleBuffer?, previewPhoto previewPhotoSampleBuffer: CMSampleBuffer?, resolvedSettings: AVCaptureResolvedPhotoSettings, bracketSettings: AVCaptureBracketedStillImageSettings?, error: Error?) {
guard error == nil, let photoSample = photoSampleBuffer else {
fatalError("Failed to capture photo: \(String(describing: error))")
}
guard let imgData = AVCapturePhotoOutput.jpegPhotoDataRepresentation(forJPEGSampleBuffer: photoSample, previewPhotoSampleBuffer: previewPhotoSampleBuffer) else {
fatalError("Failed to get image data: \(String(describing: error))")
}
guard let image = UIImage(data: imgData) else {
fatalError("Failed to convert image data to UIImage: \(String(describing: error))")
}
}
}
UIImage extension:
func cropToRect(rect: CGRect!) -> UIImage? {
let scaledRect = CGRect(x: rect.origin.x * self.scale, y: rect.origin.y * self.scale, width: rect.size.width * self.scale, height: rect.size.height * self.scale);
guard let imageRef: CGImage = self.cgImage?.cropping(to:scaledRect)
else {
return nil
}
let croppedImage: UIImage = UIImage(cgImage: imageRef, scale: self.scale, orientation: self.imageOrientation)
return croppedImage
}
When cropping an image, you need to scale the "crop rect" from its size relative to the image size.
Also, when capturing from the camera, you need to take .imageOrientation into account.
Try changing your UIImage extension to this:
extension UIImage {
func cropToRect(rect: CGRect, viewSize: CGSize) -> UIImage? {
var cr = rect
switch self.imageOrientation {
case .right, .rightMirrored, .left, .leftMirrored:
// rotate the crop rect if needed
cr.origin.x = rect.origin.y
cr.origin.y = rect.origin.x
cr.size.width = rect.size.height
cr.size.height = rect.size.width
default:
break
}
let imageViewScale = max(self.size.width / viewSize.width,
self.size.height / viewSize.height)
// scale the crop rect
let cropZone = CGRect(x:cr.origin.x * imageViewScale,
y:cr.origin.y * imageViewScale,
width:cr.size.width * imageViewScale,
height:cr.size.height * imageViewScale)
// Perform cropping in Core Graphics
guard let cutImageRef: CGImage = self.cgImage?.cropping(to:cropZone)
else {
return nil
}
// Return image to UIImage
let croppedImage: UIImage = UIImage(cgImage: cutImageRef, scale: self.scale, orientation: self.imageOrientation)
return croppedImage
}
}
and change your call in photoOutput() to:
guard let croppedImg = image.cropToRect(rect: cropRect, viewSize: view.frame.size) else {
fatalError("Failed to crop image")
}
Since your code is using the full view, that should work fine. If you change it to use a different sized view as your videoPreviewLayer then use that size instead of view.frame.size.
Related
I am currently trying to implement something similar to Instagram's story feature where you take a picture or a video and when swiping left or right you change the current filter over the content. ( here is an example of what I managed to do in my app for images https://imgur.com/a/pYKrPkA )
As you can see in the example, I got it done for images but now my problem is that I am trying to make if work for videos aswell and I am a bit lost from where to start.
final class Filter: NSObject {
var isEnabled: Bool = true
var overlayImage: CIImage?
var ciFilter: CIFilter?
init(ciFilter: CIFilter?) {
self.ciFilter = ciFilter
super.init()
}
/// Empty filter for the original photo
static func emptyFilter() -> Filter {
return Filter(ciFilter: nil)
}
func imageByProcessingImage(_ image: CIImage, at time: CFTimeInterval) -> CIImage? {
guard isEnabled else { return image }
var image = image
if let overlayImage = overlayImage {
image = overlayImage.composited(over: image)
}
guard let ciFilter = ciFilter else {
return image
}
ciFilter.setValue(image, forKey: kCIInputImageKey)
return ciFilter.value(forKey: kCIOutputImageKey) as? CIImage
}
}
class StoriesImageView: UIView {
private var metalView: MTKView?
private var ciImage: CIImage?
private var preferredCIImageTransform: CGAffineTransform?
private let device = MTLCreateSystemDefaultDevice()
private var commandQueue: MTLCommandQueue?
private var context: CIContext?
override func layoutSubviews() {
super.layoutSubviews()
metalView?.frame = bounds
}
override func setNeedsDisplay() {
super.setNeedsDisplay()
metalView?.setNeedsDisplay()
}
func setImage(with image: UIImage) {
preferredCIImageTransform = preferredCIImageTransform(from: image)
if let cgImage = image.cgImage {
ciImage = CIImage(cgImage: cgImage)
loadContextIfNeeded()
}
setNeedsDisplay()
}
/// Return the image fitted to 1080x1920.
func renderedUIImage() -> UIImage? {
return renderedUIImage(in: CGRect(origin: .zero, size: CGSize(width: 1080, height: 1920)))
}
/// Returns CIImage in fitted to main screen bounds.
func renderedCIIImage() -> CIImage? {
return renderedCIImage(in: CGRect(rect: bounds, contentScale: UIScreen.main.scale))
}
func renderedUIImage(in rect: CGRect) -> UIImage? {
if let image = renderedCIImage(in: rect), let context = context {
if let imageRef = context.createCGImage(image, from: image.extent) {
return UIImage(cgImage: imageRef)
}
}
return nil
}
func renderedCIImage(in rect: CGRect) -> CIImage? {
if var image = ciImage, let transform = preferredCIImageTransform {
image = image.transformed(by: transform)
return scaleAndResize(image, for: rect)
}
return nil
}
private func cleanupContext() {
metalView?.removeFromSuperview()
metalView?.releaseDrawables()
metalView = nil
}
private func loadContextIfNeeded() {
setContext()
}
private func setContext() {
let mView = MTKView(frame: bounds, device: device)
mView.clearColor = MTLClearColor(red: 0, green: 0, blue: 0, alpha: 0)
mView.framebufferOnly = false
mView.enableSetNeedsDisplay = true
mView.contentScaleFactor = contentScaleFactor
mView.delegate = self
metalView = mView
commandQueue = device?.makeCommandQueue()
context = CIContext(mtlDevice: device!)
insertSubview(metalView!, at: 0)
}
private func scaleAndResize(_ image: CIImage, for rect: CGRect) -> CIImage {
let imageSize = image.extent.size
let horizontalScale = rect.size.width / imageSize.width
let verticalScale = rect.size.height / imageSize.height
let scale = min(horizontalScale, verticalScale)
return image.transformed(by: CGAffineTransform(scaleX: scale, y: scale))
}
private func preferredCIImageTransform(from image: UIImage) -> CGAffineTransform {
if image.imageOrientation == .up {
return .identity
}
var transform: CGAffineTransform = .identity
switch image.imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: image.size.height)
transform = transform.rotated(by: .pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.rotated(by: .pi / 2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: image.size.height)
transform = transform.rotated(by: .pi / -2)
case .up, .upMirrored: break
#unknown default: fatalError("Unknown image orientation")
}
switch image.imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: image.size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .up, .down, .left, .right: break
#unknown default: fatalError("Unknown image orientation")
}
return transform
}
}
extension StoriesImageView: MTKViewDelegate {
func draw(in view: MTKView) {
autoreleasepool {
let rect = CGRect(rect: view.bounds, contentScale: UIScreen.main.scale)
if let image = renderedCIImage(in: rect) {
let commandBuffer = commandQueue?.makeCommandBuffer()
guard let drawable = view.currentDrawable else {
return
}
let heightDifference = (view.drawableSize.height - image.extent.size.height) / 2
let destination = CIRenderDestination(width: Int(view.drawableSize.width),
height: Int(view.drawableSize.height - heightDifference),
pixelFormat: view.colorPixelFormat,
commandBuffer: commandBuffer,
mtlTextureProvider: { () -> MTLTexture in
return drawable.texture
})
_ = try? context?.startTask(toRender: image, to: destination)
commandBuffer?.present(drawable)
commandBuffer?.commit()
}
}
}
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {}
}
final class StoriesSwipeableImageView: StoriesImageView {
private let scrollView: UIScrollView = UIScrollView()
private let preprocessingFilter: Filter? = nil
var isRefreshingAutomaticallyWhenScrolling: Bool = true
var filters: [Filter]? {
didSet {
updateScrollViewContentSize()
updateCurrentSelected(notify: true)
}
}
var selectedFilter: Filter? {
didSet {
if selectedFilter != oldValue {
setNeedsLayout()
}
}
}
override init(frame: CGRect) {
super.init(frame: frame)
setup()
}
required init?(coder: NSCoder) {
super.init(coder: coder)
setup()
}
override func layoutSubviews() {
super.layoutSubviews()
scrollView.frame = bounds
updateScrollViewContentSize()
}
private func setup() {
scrollView.delegate = self
scrollView.isPagingEnabled = true
scrollView.showsHorizontalScrollIndicator = false
scrollView.showsVerticalScrollIndicator = false
scrollView.bounces = true
scrollView.alwaysBounceVertical = true
scrollView.alwaysBounceHorizontal = true
scrollView.backgroundColor = .clear
addSubview(scrollView)
}
private func updateScrollViewContentSize() {
let filterCount = filters?.count ?? 0
scrollView.contentSize = CGSize(width: filterCount * Int(frame.size.width) * 3,
height: Int(frame.size.height))
if let selectedFilter = selectedFilter {
scroll(to: selectedFilter, animated: false)
}
}
private func scroll(to filter: Filter, animated: Bool) {
if let index = filters?.firstIndex(where: { $0 === filter }) {
let contentOffset = CGPoint(x: scrollView.contentSize.width / 3 + scrollView.frame.size.width * CGFloat(index), y: 0)
scrollView.setContentOffset(contentOffset, animated: animated)
updateCurrentSelected(notify: false)
} else {
fatalError("Filter is not available in filters collection")
}
}
private func updateCurrentSelected(notify: Bool) {
guard frame.size.width != 0 else { return }
let filterCount = filters?.count ?? 0
let selectedIndex = Int(scrollView.contentOffset.x + scrollView.frame.size.width / 2) / Int(scrollView.frame.size.width) % filterCount
var newFilterGroup: Filter?
if selectedIndex >= 0 && selectedIndex < filterCount {
newFilterGroup = filters?[selectedIndex]
} else {
fatalError("Invalid contentOffset")
}
if selectedFilter != newFilterGroup {
selectedFilter = newFilterGroup
if notify {
// Notify delegate?
}
}
}
override func renderedCIImage(in rect: CGRect) -> CIImage? {
guard var image = super.renderedCIImage(in: rect) else {
print("Failed to render image")
return nil
}
let timeinterval: CFTimeInterval = 0
if let preprocessingFilter = self.preprocessingFilter {
image = preprocessingFilter.imageByProcessingImage(image, at: timeinterval)!
}
let extent = image.extent
let contentSize = scrollView.bounds.size
if contentSize.width == 0 {
return image
}
let filtersCount = filters?.count ?? 0
if filtersCount == 0 {
return image
}
let ratio = scrollView.contentOffset.x / contentSize.width
var index = Int(ratio)
let upIndex = Int(ceil(ratio))
let remaningRatio = ratio - CGFloat(index)
var xImage = extent.size.width * -remaningRatio
var outputImage: CIImage? = CIImage(color: CIColor(red: 0, green: 0, blue: 0))
while index <= upIndex {
let currentIndex = index % filtersCount
let filter = filters?[currentIndex]
var filteredImage = filter?.imageByProcessingImage(image, at: timeinterval)
filteredImage = filteredImage?.cropped(to:
CGRect(x: extent.origin.x + xImage,
y: extent.origin.y,
width: extent.size.width,
height: extent.size.height)
)
guard let output = outputImage else { return nil }
outputImage = filteredImage?.composited(over: output)
xImage += extent.size.width
index += 1
}
outputImage = outputImage?.cropped(to: extent)
return outputImage
}
}
extension StoriesSwipeableImageView: UIScrollViewDelegate {
func scrollViewDidScroll(_ scrollView: UIScrollView) {
let width = scrollView.frame.size.width
let contentOffsetX = scrollView.contentOffset.x
let contentSizeWidth = scrollView.contentSize.width
let normalWidth = CGFloat(filters?.count ?? 0) * width
if width > 0 && contentSizeWidth > 0 {
if contentOffsetX <= 0 {
scrollView.contentOffset = CGPoint(x: contentOffsetX + normalWidth, y: scrollView.contentOffset.y)
} else if contentOffsetX + width >= contentSizeWidth {
scrollView.contentOffset = CGPoint(x: contentOffsetX - normalWidth, y: scrollView.contentOffset.y)
}
}
if isRefreshingAutomaticallyWhenScrolling {
setNeedsDisplay()
}
}
func scrollViewDidScrollToTop(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndScrollingAnimation(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDecelerating(_ scrollView: UIScrollView) {
updateCurrentSelected(notify: true)
}
func scrollViewDidEndDragging(_ scrollView: UIScrollView, willDecelerate decelerate: Bool) {
if !decelerate {
updateCurrentSelected(notify: true)
}
}
}
These 3 are the classes that do the magic for the image part. Does anyone have a suggestion or a starting point for this? I tried looking over at https://github.com/rFlex/SCRecorder but I get a bit lost in Obj-C.
In iOS 9 / OS X 10.11 / tvOS, there's a convenience method for applying CIFilters to video. It works on an AVVideoComposition, so you can use it both for playback and for file-to-file import/export. See AVVideoComposition.init(asset:applyingCIFiltersWithHandler:) for the method docs.
There's an example in Apple's Core Image Programming Guide, too:
let filter = CIFilter(name: "CIGaussianBlur")!
let composition = AVVideoComposition(asset: asset, applyingCIFiltersWithHandler: { request in
// Clamp to avoid blurring transparent pixels at the image edges
let source = request.sourceImage.clampingToExtent()
filter.setValue(source, forKey: kCIInputImageKey)
// Vary filter parameters based on video timing
let seconds = CMTimeGetSeconds(request.compositionTime)
filter.setValue(seconds * 10.0, forKey: kCIInputRadiusKey)
// Crop the blurred output to the bounds of the original image
let output = filter.outputImage!.cropping(to: request.sourceImage.extent)
// Provide the filter output to the composition
request.finish(with: output, context: nil)
})
That part sets up the composition. After you've done that, you can either play it by assigning it to an AVPlayer or write it to a file with AVAssetExportSession. Since you're after the latter, here's an example of that:
let export = AVAssetExportSession(asset: asset, presetName: AVAssetExportPreset1920x1200)
export.outputFileType = AVFileTypeQuickTimeMovie
export.outputURL = outURL
export.videoComposition = composition
export.exportAsynchronouslyWithCompletionHandler(/*...*/)
There's a bit more about this in the WWDC15 session on Core Image, starting around 20 minutes in.
I am following a code example to make a blurred UILabel, https://stackoverflow.com/a/62224908/2226315.
My requirement is to make the label on blur after label initialization instead of calling the blur method at runtime. However, when I try to call blur after label gets initialized the value returned from UIGraphicsGetCurrentContext is nil hence having a "Fatal error: Unexpectedly found nil while unwrapping an Optional value"
UIGraphicsBeginImageContext(bounds.size)
print("DEBUG: bounds.size", bounds.size)
self.layer.render(in: UIGraphicsGetCurrentContext()!) // <- return nil
var image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
print("DEBUG: image image", image)
I tried adding the code in all the following places individually, the context can be fetched now however it does not generate the blur effect as expected.
override func layoutSubviews() {
super.layoutSubviews()
self.blur()
}
// OR
override func draw(_ rect: CGRect) {
super.draw(rect)
self.blur()
}
Full code snippet,
class BlurredLabel: UILabel {
func blur(_ blurRadius: Double = 2.5) {
let blurredImage = getBlurryImage(blurRadius)
let blurredImageView = UIImageView(image: blurredImage)
blurredImageView.translatesAutoresizingMaskIntoConstraints = false
blurredImageView.tag = 100
blurredImageView.contentMode = .center
blurredImageView.backgroundColor = .white
addSubview(blurredImageView)
NSLayoutConstraint.activate([
blurredImageView.centerXAnchor.constraint(equalTo: centerXAnchor),
blurredImageView.centerYAnchor.constraint(equalTo: centerYAnchor)
])
}
func unblur() {
subviews.forEach { subview in
if subview.tag == 100 {
subview.removeFromSuperview()
}
}
}
private func getBlurryImage(_ blurRadius: Double = 2.5) -> UIImage? {
UIGraphicsBeginImageContext(bounds.size)
layer.render(in: UIGraphicsGetCurrentContext()!)
guard let image = UIGraphicsGetImageFromCurrentImageContext(),
let blurFilter = CIFilter(name: "CIGaussianBlur") else {
UIGraphicsEndImageContext()
return nil
}
UIGraphicsEndImageContext()
blurFilter.setDefaults()
blurFilter.setValue(CIImage(image: image), forKey: kCIInputImageKey)
blurFilter.setValue(blurRadius, forKey: kCIInputRadiusKey)
var convertedImage: UIImage?
let context = CIContext(options: nil)
if let blurOutputImage = blurFilter.outputImage,
let cgImage = context.createCGImage(blurOutputImage, from: blurOutputImage.extent) {
convertedImage = UIImage(cgImage: cgImage)
}
return convertedImage
}
}
REFERENCE
Add blur view to label?
How to blur UILabel text
UPDATE
Usage based on "Eugene Dudnyk" answer
definitionLabel = BlurredLabel()
definitionLabel.numberOfLines = 0
definitionLabel.lineBreakMode = .byWordWrapping
definitionLabel.textColor = UIColor(named: "text")
definitionLabel.text = "Lorem Ipsum is simply dummy text"
definitionLabel.clipsToBounds = false
definitionLabel.isBluring = true
Here is a better solution - instead of retrieving the blurred image, just let the label blur itself.
When you need it to be blurred, set label.isBlurring = true.
Also, this solution is better for performance, because it reuses the same context and does not need the image view.
class BlurredLabel: UILabel {
var isBlurring = false {
didSet {
setNeedsDisplay()
}
}
var blurRadius: Double = 2.5 {
didSet {
blurFilter?.setValue(blurRadius, forKey: kCIInputRadiusKey)
}
}
lazy var blurFilter: CIFilter? = {
let blurFilter = CIFilter(name: "CIGaussianBlur")
blurFilter?.setDefaults()
blurFilter?.setValue(blurRadius, forKey: kCIInputRadiusKey)
return blurFilter
}()
override init(frame: CGRect) {
super.init(frame: frame)
layer.isOpaque = false
layer.needsDisplayOnBoundsChange = true
layer.contentsScale = UIScreen.main.scale
layer.contentsGravity = .center
isOpaque = false
isUserInteractionEnabled = false
contentMode = .redraw
}
required init?(coder: NSCoder) {
fatalError("init(coder:) has not been implemented")
}
override func display(_ layer: CALayer) {
let bounds = layer.bounds
guard !bounds.isEmpty && bounds.size.width < CGFloat(UINT16_MAX) else {
layer.contents = nil
return
}
UIGraphicsBeginImageContextWithOptions(layer.bounds.size, layer.isOpaque, layer.contentsScale)
if let ctx = UIGraphicsGetCurrentContext() {
self.layer.draw(in: ctx)
var image = UIGraphicsGetImageFromCurrentImageContext()?.cgImage
if isBlurring, let cgImage = image {
blurFilter?.setValue(CIImage(cgImage: cgImage), forKey: kCIInputImageKey)
let ciContext = CIContext(cgContext: ctx, options: nil)
if let blurOutputImage = blurFilter?.outputImage,
let cgImage = ciContext.createCGImage(blurOutputImage, from: blurOutputImage.extent) {
image = cgImage
}
}
layer.contents = image
}
UIGraphicsEndImageContext()
}
}
Transformed #EugeneDudnyk answer to UIView extension so it can be used also with TextView.
extension UIView {
struct BlurableKey {
static var blurable = "blurable"
}
func blur(radius: CGFloat) {
guard superview != nil else { return }
UIGraphicsBeginImageContextWithOptions(CGSize(width: frame.width, height: frame.height), false, 1)
layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
guard
let blur = CIFilter(name: "CIGaussianBlur"),
let image = image
else {
return
}
blur.setValue(CIImage(image: image), forKey: kCIInputImageKey)
blur.setValue(radius, forKey: kCIInputRadiusKey)
let ciContext = CIContext(options: nil)
let boundingRect = CGRect(
x:0,
y: 0,
width: frame.width,
height: frame.height
)
guard
let result = blur.value(forKey: kCIOutputImageKey) as? CIImage,
let cgImage = ciContext.createCGImage(result, from: boundingRect)
else {
return
}
let blurOverlay = UIImageView()
blurOverlay.frame = boundingRect
blurOverlay.image = UIImage(cgImage: cgImage)
blurOverlay.contentMode = .left
addSubview(blurOverlay)
objc_setAssociatedObject(
self,
&BlurableKey.blurable,
blurOverlay,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN
)
}
func unBlur() {
guard
let blurOverlay = objc_getAssociatedObject(self, &BlurableKey.blurable) as? UIImageView
else {
return
}
blurOverlay.removeFromSuperview()
objc_setAssociatedObject(
self,
&BlurableKey.blurable,
nil,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN
)
}
var isBlurred: Bool {
return objc_getAssociatedObject(self, &BlurableKey.blurable) is UIImageView
}
}
I have a UIViewController with an UIImageView, if I tap the screen, i want the UIImageView to change its contentMode. But I found out that this is not working with some images, mainly those from AVCaptureSession.
Screenshots:
Aspect fill
Aspect fit
I also found out that it's working fine when I change device orientation to landscape and back. But when I tap the screen is not working again.
Aspect fit after changed orientation to landscape and back (this is how I want it to look everytime in aspect fit)
My code:
CameraController:
class CameraController: UIViewController {
private var captureSession = AVCaptureSession()
private var captureDevice: AVCaptureDevice!
private var capturePhotoOutput = AVCapturePhotoOutput()
private var previewLayer: AVCaptureVideoPreviewLayer!
override func viewDidLoad() {
super.viewDidLoad()
setupCaptureSession()
setupCaptureDevice()
setupInputAndOutput()
setupPreviewLayer()
startCaptureSession()
setupLayout()
}
private func setupCaptureSession() {
captureSession.sessionPreset = .photo
}
private func setupCaptureDevice() {
guard let device = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: AVMediaType.video, position: .back).devices.first else { return }
captureDevice = device
}
private func setupInputAndOutput() {
do {
let captureDeviceInput = try AVCaptureDeviceInput(device: captureDevice)
captureSession.addInput(captureDeviceInput)
let captureSettings = AVCapturePhotoSettings(format: [AVVideoCodecKey: AVVideoCodecType.jpeg])
capturePhotoOutput.setPreparedPhotoSettingsArray([captureSettings], completionHandler: nil)
captureSession.addOutput(capturePhotoOutput)
} catch {
print(error.localizedDescription)
}
}
private func setupPreviewLayer() {
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.connection?.videoOrientation = .portrait
previewLayer.frame = view.frame
view.layer.insertSublayer(previewLayer, at: 0)
}
private func startCaptureSession() {
captureSession.startRunning()
}
private func setupLayout() {
let captureButton = UIButton(frame: CGRect(x: 0, y: 0, width: 44, height: 44))
captureButton.backgroundColor = .white
captureButton.layer.cornerRadius = 22
captureButton.addTarget(self, action: #selector(didPressCaptureButton), for: .touchUpInside)
captureButton.center.x = view.center.x
captureButton.center.y = view.frame.height - 50
view.addSubview(captureButton)
}
#objc private func didPressCaptureButton() {
let settings = AVCapturePhotoSettings()
capturePhotoOutput.capturePhoto(with: settings, delegate: self)
}
}
extension CameraController: AVCapturePhotoCaptureDelegate {
func photoOutput(_ output: AVCapturePhotoOutput, didFinishProcessingPhoto photo: AVCapturePhoto, error: Error?) {
guard let imageData = photo.fileDataRepresentation() else { return }
guard let image = UIImage(data: imageData) else { return }
print("Image size: ", image.size)
let previewController = PreviewController()
previewController.image = image
present(previewController, animated: true, completion: {
self.captureSession.stopRunning()
})
}
}
PreviewController:
class PreviewController: UIViewController {
var imageView: UIImageView!
var image: UIImage!
override func viewDidLoad() {
super.viewDidLoad()
setupImageView()
}
func setupImageView() {
imageView = UIImageView(image: image)
imageView.contentMode = .scaleAspectFill
view.addSubview(imageView)
imageView.addConstraintsToFillSuperview()
}
override func touchesBegan(_ touches: Set<UITouch>, with event: UIEvent?) {
let contentMode: UIViewContentMode = imageView.contentMode == .scaleAspectFill ? .scaleAspectFit : .scaleAspectFill
imageView.contentMode = contentMode
}
}
What am I doing wrong here?
Thank You!
You can change ImageView contentMode on runtime with:
self.imageView.contentMode = newContentMode
self.imageView.setNeedsDisplay()
I just about have this solved. Thanks to some brilliant help getting me on the right track.
This is the code I have now.
Basically, I can now make an image out of the drawn overlay, and the cameraPreview. But can't yet combine them. There seems very little useful code that I can find that does this simply.
So the important part is the extension block right at the top, and the additions to the
func saveToCamera() near the bottom of the code.
In short, I now have the two images I need, I think. The snap of the myImage is appearing on a white background - so not sure if that's natural - or not. That's how it appears on a Simulator. So it may just be natural.
Image 1. A screen capture.
Image 2. The saved image of myImage as per the explaination.
import UIKit
import AVFoundation
import Foundation
// extension must be outside class
extension UIImage {
convenience init(view: UIView) {
UIGraphicsBeginImageContext(view.frame.size)
view.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
self.init(cgImage: (image?.cgImage)!)
}
}
class ViewController: UIViewController {
#IBOutlet weak var navigationBar: UINavigationBar!
#IBOutlet weak var imgOverlay: UIImageView!
#IBOutlet weak var btnCapture: UIButton!
#IBOutlet weak var shapeLayer: UIView!
let captureSession = AVCaptureSession()
let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
//var shapeLayer : CALayer?
// If we find a device we'll store it here for later use
var captureDevice : AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
//=======================
let midX = self.view.bounds.midX
let midY = self.view.bounds.midY
for index in 1...10 {
let circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat((index * 10)), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
let shapeLayerPath = CAShapeLayer()
shapeLayerPath.path = circlePath.cgPath
//change the fill color
shapeLayerPath.fillColor = UIColor.clear.cgColor
//you can change the stroke color
shapeLayerPath.strokeColor = UIColor.blue.cgColor
//you can change the line width
shapeLayerPath.lineWidth = 0.5
// add the blue-circle layer to the shapeLayer ImageView
shapeLayer.layer.addSublayer(shapeLayerPath)
}
print("Shape layer drawn")
//=====================
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] {
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.back) {
captureDevice = device
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
}
}
#IBAction func actionCameraCapture(_ sender: AnyObject) {
print("Camera button pressed")
saveToCamera()
}
func beginSession() {
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
catch {
print("error: \(error.localizedDescription)")
}
guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else {
print("no preview layer")
return
}
// this is what displays the camera view. But - it's on TOP of the drawn view, and under the overview. ??
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.layer.frame
captureSession.startRunning()
print("Capture session running")
self.view.addSubview(navigationBar)
//self.view.addSubview(imgOverlay)
self.view.addSubview(btnCapture)
// shapeLayer ImageView is already a subview created in IB
// but this will bring it to the front
self.view.addSubview(shapeLayer)
}
func saveToCamera() {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
if let cameraImage = UIImage(data: imageData) {
// cameraImage is the camera preview image.
// I need to combine/merge it with the myImage that is actually the blue circles.
// This converts the UIView of the bllue circles to an image. Uses 'extension' at top of code.
let myImage = UIImage(view: self.shapeLayer)
print("converting myImage to an image")
UIImageWriteToSavedPhotosAlbum(cameraImage, nil, nil, nil)
}
}
})
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
Give this a try... instead of combining your overlay view, it draws the circles and combines the output:
import UIKit
import AVFoundation
import Foundation
class CameraWithTargetViewController: UIViewController {
#IBOutlet weak var navigationBar: UINavigationBar!
#IBOutlet weak var imgOverlay: UIImageView!
#IBOutlet weak var btnCapture: UIButton!
#IBOutlet weak var shapeLayer: UIView!
let captureSession = AVCaptureSession()
let stillImageOutput = AVCaptureStillImageOutput()
var previewLayer : AVCaptureVideoPreviewLayer?
//var shapeLayer : CALayer?
// If we find a device we'll store it here for later use
var captureDevice : AVCaptureDevice?
override func viewDidLoad() {
super.viewDidLoad()
// Do any additional setup after loading the view, typically from a nib.
//=======================
captureSession.sessionPreset = AVCaptureSessionPresetHigh
if let devices = AVCaptureDevice.devices() as? [AVCaptureDevice] {
// Loop through all the capture devices on this phone
for device in devices {
// Make sure this particular device supports video
if (device.hasMediaType(AVMediaTypeVideo)) {
// Finally check the position and confirm we've got the back camera
if(device.position == AVCaptureDevicePosition.back) {
captureDevice = device
if captureDevice != nil {
print("Capture device found")
beginSession()
}
}
}
}
}
}
#IBAction func actionCameraCapture(_ sender: AnyObject) {
print("Camera button pressed")
saveToCamera()
}
func beginSession() {
do {
try captureSession.addInput(AVCaptureDeviceInput(device: captureDevice))
stillImageOutput.outputSettings = [AVVideoCodecKey:AVVideoCodecJPEG]
if captureSession.canAddOutput(stillImageOutput) {
captureSession.addOutput(stillImageOutput)
}
}
catch {
print("error: \(error.localizedDescription)")
}
guard let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession) else {
print("no preview layer")
return
}
// this is what displays the camera view. But - it's on TOP of the drawn view, and under the overview. ??
self.view.layer.addSublayer(previewLayer)
previewLayer.frame = self.view.layer.frame
imgOverlay.frame = self.view.frame
imgOverlay.image = self.drawCirclesOnImage(fromImage: nil, targetSize: imgOverlay.bounds.size)
self.view.bringSubview(toFront: navigationBar)
self.view.bringSubview(toFront: imgOverlay)
self.view.bringSubview(toFront: btnCapture)
// don't use shapeLayer anymore...
// self.view.bringSubview(toFront: shapeLayer)
captureSession.startRunning()
print("Capture session running")
}
func getImageWithColor(color: UIColor, size: CGSize) -> UIImage {
let rect = CGRect(origin: CGPoint(x: 0, y: 0), size: CGSize(width: size.width, height: size.height))
UIGraphicsBeginImageContextWithOptions(size, false, 0)
color.setFill()
UIRectFill(rect)
let image: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return image
}
func drawCirclesOnImage(fromImage: UIImage? = nil, targetSize: CGSize? = CGSize.zero) -> UIImage? {
if fromImage == nil && targetSize == CGSize.zero {
return nil
}
var tmpimg: UIImage?
if targetSize == CGSize.zero {
tmpimg = fromImage
} else {
tmpimg = getImageWithColor(color: UIColor.clear, size: targetSize!)
}
guard let img = tmpimg else {
return nil
}
let imageSize = img.size
let scale: CGFloat = 0
UIGraphicsBeginImageContextWithOptions(imageSize, false, scale)
img.draw(at: CGPoint.zero)
let w = imageSize.width
let midX = imageSize.width / 2
let midY = imageSize.height / 2
// red circles - radius in %
let circleRads = [ 0.07, 0.13, 0.17, 0.22, 0.29, 0.36, 0.40, 0.48, 0.60, 0.75 ]
// center "dot" - radius is 1.5%
var circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: CGFloat(w * 0.015), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
UIColor.red.setFill()
circlePath.stroke()
circlePath.fill()
// blue circle is between first and second red circles
circlePath = UIBezierPath(arcCenter: CGPoint(x: midX,y: midY), radius: w * CGFloat((circleRads[0] + circleRads[1]) / 2.0), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
UIColor.blue.setStroke()
circlePath.lineWidth = 2.5
circlePath.stroke()
UIColor.red.setStroke()
for pct in circleRads {
let rad = w * CGFloat(pct)
circlePath = UIBezierPath(arcCenter: CGPoint(x: midX, y: midY), radius: CGFloat(rad), startAngle: CGFloat(0), endAngle:CGFloat(M_PI * 2), clockwise: true)
circlePath.lineWidth = 2.5
circlePath.stroke()
}
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage
}
func saveToCamera() {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
if let cameraImage = UIImage(data: imageData) {
if let nImage = self.drawCirclesOnImage(fromImage: cameraImage, targetSize: CGSize.zero) {
UIImageWriteToSavedPhotosAlbum(nImage, nil, nil, nil)
}
}
}
})
}
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
}
Ok, solved it pretty much. The important code is here. The resulting image is slightly out of skew, but I'll work away and fix that, unless someone can see a good fix for it.
func saveToCamera() {
if let videoConnection = stillImageOutput.connection(withMediaType: AVMediaTypeVideo) {
stillImageOutput.captureStillImageAsynchronously(from: videoConnection, completionHandler: { (CMSampleBuffer, Error) in
if let imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(CMSampleBuffer) {
if let cameraImage = UIImage(data: imageData) {
// cameraImage is the camera preview image.
// I need to combine/merge it with the myImage that is actually the blue circles.
// This converts the UIView of the bllue circles to an image. Uses 'extension' at top of code.
let myImage = UIImage(view: self.shapeLayer)
print("converting myImage to an image")
let newImage = self.composite(image:cameraImage, overlay:(myImage), scaleOverlay:true)
UIImageWriteToSavedPhotosAlbum(newImage!, nil, nil, nil)
}
}
})
}
}
func composite(image:UIImage, overlay:(UIImage), scaleOverlay: Bool = false)->UIImage?{
UIGraphicsBeginImageContext(image.size)
var rect = CGRect(x: 0, y: 0, width: image.size.width, height: image.size.height)
image.draw(in: rect)
if scaleOverlay == false {
rect = CGRect(x: 0, y: 0, width: overlay.size.width, height: overlay.size.height)
}
overlay.draw(in: rect)
return UIGraphicsGetImageFromCurrentImageContext()
}
The resulting saved image.
I want Blur actual like first Image.
I have did some code and made it like second image.
My Code For Blur is Like
let blurEffect = UIBlurEffect(style: UIBlurEffectStyle.Dark)
blurEffectView = UIVisualEffectView(effect: blurEffect)
blurEffectView.layer.opacity = 0.8
blurEffectView.alpha = 0.6
blurEffectView.frame = CGRectMake(0, 42, UIScreen.mainScreen().bounds.width, UIScreen.mainScreen().bounds.height - 42)
sourceView.addSubview(blurEffectView)
Sourceview is my background view. Which I wants to make blur. Any Suggestion ?
The alpha and the layer.opacity corrections are not necessary, you can do it also with an extension:
extension UIImageView{
func makeBlurImage(imageView:UIImageView?)
{
let blurEffect = UIBlurEffect(style: UIBlurEffectStyle.Dark)
let blurEffectView = UIVisualEffectView(effect: blurEffect)
blurEffectView.frame = imageView!.bounds
blurEffectView.autoresizingMask = [.FlexibleWidth, .FlexibleHeight] // to support device rotation
imageView?.addSubview(blurEffectView)
}
}
Usage:
let imageView = UIImageView(frame: CGRectMake(0, 100, 300, 400))
let image:UIImage = UIImage(named: "photo.png")!
imageView.image = image
//Apply blur effect
imageView.makeBlurImage(imageView)
self.view.addSubview(imageView)
But if you want to apply the blur effect to an UIView you can use this code:
protocol Blurable
{
var layer: CALayer { get }
var subviews: [UIView] { get }
var frame: CGRect { get }
var superview: UIView? { get }
func addSubview(view: UIView)
func removeFromSuperview()
func blur(blurRadius blurRadius: CGFloat)
func unBlur()
var isBlurred: Bool { get }
}
extension Blurable
{
func blur(blurRadius blurRadius: CGFloat)
{
if self.superview == nil
{
return
}
UIGraphicsBeginImageContextWithOptions(CGSize(width: frame.width, height: frame.height), false, 1)
layer.renderInContext(UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext();
guard let blur = CIFilter(name: "CIGaussianBlur"),
this = self as? UIView else
{
return
}
blur.setValue(CIImage(image: image), forKey: kCIInputImageKey)
blur.setValue(blurRadius, forKey: kCIInputRadiusKey)
let ciContext = CIContext(options: nil)
let result = blur.valueForKey(kCIOutputImageKey) as! CIImage!
let boundingRect = CGRect(x:0,
y: 0,
width: frame.width,
height: frame.height)
let cgImage = ciContext.createCGImage(result, fromRect: boundingRect)
let filteredImage = UIImage(CGImage: cgImage)
let blurOverlay = BlurOverlay()
blurOverlay.frame = boundingRect
blurOverlay.image = filteredImage
blurOverlay.contentMode = UIViewContentMode.Left
if let superview = superview as? UIStackView,
index = (superview as UIStackView).arrangedSubviews.indexOf(this)
{
removeFromSuperview()
superview.insertArrangedSubview(blurOverlay, atIndex: index)
}
else
{
blurOverlay.frame.origin = frame.origin
UIView.transitionFromView(this,
toView: blurOverlay,
duration: 0.2,
options: UIViewAnimationOptions.CurveEaseIn,
completion: nil)
}
objc_setAssociatedObject(this,
&BlurableKey.blurable,
blurOverlay,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
func unBlur()
{
guard let this = self as? UIView,
blurOverlay = objc_getAssociatedObject(self as? UIView, &BlurableKey.blurable) as? BlurOverlay else
{
return
}
if let superview = blurOverlay.superview as? UIStackView,
index = (blurOverlay.superview as! UIStackView).arrangedSubviews.indexOf(blurOverlay)
{
blurOverlay.removeFromSuperview()
superview.insertArrangedSubview(this, atIndex: index)
}
else
{
this.frame.origin = blurOverlay.frame.origin
UIView.transitionFromView(blurOverlay,
toView: this,
duration: 0.2,
options: UIViewAnimationOptions.CurveEaseIn,
completion: nil)
}
objc_setAssociatedObject(this,
&BlurableKey.blurable,
nil,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
var isBlurred: Bool
{
return objc_getAssociatedObject(self as? UIView, &BlurableKey.blurable) is BlurOverlay
}
}
extension UIView: Blurable
{
}
class BlurOverlay: UIImageView
{
}
struct BlurableKey
{
static var blurable = "blurable"
}
Swift 4.x
extension UIView {
struct BlurableKey {
static var blurable = "blurable"
}
func blur(radius: CGFloat) {
guard let superview = superview else { return }
UIGraphicsBeginImageContextWithOptions(CGSize(width: frame.width, height: frame.height), false, 1)
layer.render(in: UIGraphicsGetCurrentContext()!)
guard let image = UIGraphicsGetImageFromCurrentImageContext() else { return }
UIGraphicsEndImageContext()
guard let blur = CIFilter(name: "CIGaussianBlur") else { return }
blur.setValue(CIImage(image: image), forKey: kCIInputImageKey)
blur.setValue(radius, forKey: kCIInputRadiusKey)
let ciContext = CIContext(options: nil)
guard let result = blur.value(forKey: kCIOutputImageKey) as? CIImage else { return }
let boundingRect = CGRect(x: 0, y: 0, width: frame.width, height: frame.height)
guard let cgImage = ciContext.createCGImage(result, from: boundingRect) else { return }
let filteredImage = UIImage(cgImage: cgImage)
let blurOverlay = UIImageView()
blurOverlay.frame = boundingRect
blurOverlay.image = filteredImage
blurOverlay.contentMode = UIViewContentMode.left
if let stackView = superview as? UIStackView, let index = stackView.arrangedSubviews.index(of: self) {
removeFromSuperview()
stackView.insertArrangedSubview(blurOverlay, at: index)
} else {
blurOverlay.frame.origin = frame.origin
UIView.transition(from: self,
to: blurOverlay,
duration: 0.2,
options: UIViewAnimationOptions.curveEaseIn,
completion: nil)
}
objc_setAssociatedObject(self,
&BlurableKey.blurable,
blurOverlay,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
func unBlur() {
guard let blurOverlay = objc_getAssociatedObject(self, &BlurableKey.blurable) as? UIImageView else { return }
if let stackView = blurOverlay.superview as? UIStackView, let index = stackView.arrangedSubviews.index(of: blurOverlay) {
blurOverlay.removeFromSuperview()
stackView.insertArrangedSubview(self, at: index)
} else {
frame.origin = blurOverlay.frame.origin
UIView.transition(from: blurOverlay,
to: self,
duration: 0.2,
options: UIViewAnimationOptions.curveEaseIn,
completion: nil)
}
objc_setAssociatedObject(self,
&BlurableKey.blurable,
nil,
objc_AssociationPolicy.OBJC_ASSOCIATION_RETAIN)
}
var isBlurred: Bool {
return objc_getAssociatedObject(self, &BlurableKey.blurable) is UIImageView
}
}
The usage is for example:
segmentedControl.unBlur()
segmentedControl.blur(blurRadius: 2)
This is the source of the project Blurable.
You can find more detail in his GitHub project here