I have this code to convert a view to image. But i want my image to be in jpeg format.What chnages do i need to make in this code to get my image as JPEG format. I am not showing my view in device.
extension UIView{
func createTransparentImage() -> UIImage {
let renderFormat = UIGraphicsImageRendererFormat.default()
renderFormat.opaque = false
self.isOpaque = false
self.layer.isOpaque = true
self.backgroundColor = UIColor.clear
self.layer.backgroundColor = UIColor.clear.cgColor
let renderer = UIGraphicsImageRenderer(size: bounds.size, format: renderFormat)
return renderer.image{
(context) in
layer.render(in: context.cgContext)
}
}
}
So with your extension you get back a UIImage if you want you can save it as JPEG or PNG, by converting it to Data or NSData in Objective C:
let viewSnapshot = view.createTransparentImage()
//save as JPEG
if let data = viewSnapshot.jpegData(compressionQuality: 0.8) {
//variable data has the JPEG image with quality of 80%
}
//save as PNG
if let data = viewSnapshot.pngData() {
//variable data has the PNG data of the snapshot
}
If you wanted to save this image you can just do the following inside of the if let data
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
if let documentsDirectory = paths.first {
let fileName = documentsDirectory.appendingPathComponent("myImage.jpg")
try? data.write(to: fileName)
}
Given that you do not need UIImage but only JPEG data just do a slight modification on your extension as follows:
extension UIView{
func createImageData(quality: CGFloat = 0.8) -> Data {
let renderFormat = UIGraphicsImageRendererFormat.default()
renderFormat.opaque = false
self.isOpaque = false
self.layer.isOpaque = true
self.backgroundColor = UIColor.clear
self.layer.backgroundColor = UIColor.clear.cgColor
let renderer = UIGraphicsImageRenderer(size: bounds.size, format: renderFormat)
return renderer.jpegData(withCompressionQuality: quality, actions: { context in
layer.render(in: context.cgContext)
})
}
}
Notice the final line was changed to:
return renderer.jpegData(withCompressionQuality: quality, actions: { context in
Thus to use this you can simply do:
let viewSnapshotData = view.createImageData(quality: 0.9)
As per LeoDabus you can use the following extension to get snapshot of a UIView with alpha transparency if it is needed:
extension UIView {
var snapshot: UIImage {
isOpaque = false
backgroundColor = .clear
let format = UIGraphicsImageRendererFormat.default()
format.opaque = false
return UIGraphicsImageRenderer(size: bounds.size, format: format).image { _ in
drawHierarchy(in: bounds, afterScreenUpdates: true)
}
}
}
extension UIView{
func captureViewAsImage() -> UIImage? {
var screenshotImage :UIImage?
let layer = self.layer
UIGraphicsBeginImageContextWithOptions(layer.frame.size, false, UIScreen.main.scale);
guard let context = UIGraphicsGetCurrentContext() else {return nil}
layer.render(in:context)
screenshotImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return screenshotImage
}
}
and use this function as
let img = view.captureViewAsImage()
Related
I want to use Gaussianblur on an image, but also i want to use my imageview scalemode's scaleAspectFill.
I am blurring my image with following code:
func getImageWithBlur(image: UIImage) -> UIImage?{
let context = CIContext(options: nil)
guard let currentFilter = CIFilter(name: "CIGaussianBlur") else {
return nil
}
let beginImage = CIImage(image: image)
currentFilter.setValue(beginImage, forKey: kCIInputImageKey)
currentFilter.setValue(6.5, forKey: "inputRadius")
guard let output = currentFilter.outputImage, let cgimg = context.createCGImage(output, from: output.extent) else {
return nil
}
return UIImage(cgImage: cgimg)
}
But this is not working with scaleAspectFill mode.
They are both same images. But when i blur the second image, as you can see it is adding space from top and bottom. What should i do for fit well when using blur image too?
When you apply a CIGaussianBlur filter, the resulting image is larger than the original. This is because the blur is applied to the edges.
To get back an image at the original size, you need to use the original image extent.
Note, though, the blur is applied both inside and outside the edge, so if you clip only to the original extent, the edge will effectively "fade out". To avoid the edges altogether, you'll need to clip farther in.
Here is an example, using a UIImage extension to blur either with or without blurred edges:
extension UIImage {
func blurredImageWithBlurredEdges(inputRadius: CGFloat) -> UIImage? {
guard let currentFilter = CIFilter(name: "CIGaussianBlur") else {
return nil
}
guard let beginImage = CIImage(image: self) else {
return nil
}
currentFilter.setValue(beginImage, forKey: kCIInputImageKey)
currentFilter.setValue(inputRadius, forKey: "inputRadius")
guard let output = currentFilter.outputImage else {
return nil
}
// UIKit and UIImageView .contentMode doesn't play well with
// CIImage only, so we need to back the return UIImage with a CGImage
let context = CIContext()
// cropping rect because blur changed size of image
guard let final = context.createCGImage(output, from: beginImage.extent) else {
return nil
}
return UIImage(cgImage: final)
}
func blurredImageWithClippedEdges(inputRadius: CGFloat) -> UIImage? {
guard let currentFilter = CIFilter(name: "CIGaussianBlur") else {
return nil
}
guard let beginImage = CIImage(image: self) else {
return nil
}
currentFilter.setValue(beginImage, forKey: kCIInputImageKey)
currentFilter.setValue(inputRadius, forKey: "inputRadius")
guard let output = currentFilter.outputImage else {
return nil
}
// UIKit and UIImageView .contentMode doesn't play well with
// CIImage only, so we need to back the return UIImage with a CGImage
let context = CIContext()
// cropping rect because blur changed size of image
// to clear the blurred edges, use a fromRect that is
// the original image extent insetBy (negative) 1/2 of new extent origins
let newExtent = beginImage.extent.insetBy(dx: -output.extent.origin.x * 0.5, dy: -output.extent.origin.y * 0.5)
guard let final = context.createCGImage(output, from: newExtent) else {
return nil
}
return UIImage(cgImage: final)
}
}
and here is an example View Controller showing how to use it, and the different results:
class BlurTestViewController: UIViewController {
let imgViewA = UIImageView()
let imgViewB = UIImageView()
let imgViewC = UIImageView()
override func viewDidLoad() {
super.viewDidLoad()
let stackView = UIStackView()
stackView.axis = .vertical
stackView.alignment = .fill
stackView.distribution = .fillEqually
stackView.spacing = 8
stackView.translatesAutoresizingMaskIntoConstraints = false
view.addSubview(stackView)
NSLayoutConstraint.activate([
stackView.widthAnchor.constraint(equalToConstant: 200.0),
stackView.centerXAnchor.constraint(equalTo: view.centerXAnchor),
stackView.centerYAnchor.constraint(equalTo: view.centerYAnchor),
])
[imgViewA, imgViewB, imgViewC].forEach { v in
v.backgroundColor = .red
v.contentMode = .scaleAspectFill
v.clipsToBounds = true
// square image views (1:1 ratio)
v.heightAnchor.constraint(equalTo: v.widthAnchor, multiplier: 1.0).isActive = true
stackView.addArrangedSubview(v)
}
}
override func viewDidAppear(_ animated: Bool) {
super.viewDidAppear(animated)
guard let imgA = UIImage(named: "bkg640x360") else {
fatalError("Could not load image!")
}
guard let imgB = imgA.blurredImageWithBlurredEdges(inputRadius: 6.5) else {
fatalError("Could not create Blurred image with Blurred Edges")
}
guard let imgC = imgA.blurredImageWithClippedEdges(inputRadius: 6.5) else {
fatalError("Could not create Blurred image with Clipped Edges")
}
imgViewA.image = imgA
imgViewB.image = imgB
imgViewC.image = imgC
}
}
Using this original 640x360 image, with 200 x 200 image views:
We get this output:
Also worth mentioning - although I'm sure you've already noticed - these functions run very slowly on the simulator, but very quickly on an actual device.
I believe your issue is that the convolution kernel of the CIFilter is creating additional data as it applies the blur to the edges of the image. The CIContext isn't a strictly bounded space and is able to use area around the image to fully process all output. So rather than using output.extent in createCGImage, use the size of the input image (converted to a CGRect).
To account for the blurred alpha channel along the image edge, you can use the CIImage.unpremultiplyingAlpha().settingAlphaOne() methods to flatten the image before returning.
func getImageWithBlur(image: UIImage) -> UIImage? {
let context = CIContext(options: nil)
guard let currentFilter = CIFilter(name: "CIGaussianBlur") else { return nil }
let beginImage = CIImage(image: image)
currentFilter.setValue(beginImage, forKey: kCIInputImageKey)
currentFilter.setValue(6.5, forKey: "inputRadius")
let rect = CGRect(x: 0.0, y: 0.0, width: image.size.width, height: image.size.height)
guard let output = currentFilter.outputImage?.unpremultiplyingAlpha().settingAlphaOne(in: rect) else { return nil }
guard let cgimg = context.createCGImage(output, from: rect) else { return nil }
print("image.size: \(image.size)")
print("output.extent: \(output.extent)")
return UIImage(cgImage: cgimg)
}
I have a question about UIImage convert to base64 String.
And I already resize my image width and height (1024 x 768).
Then I convert this image to base64 String.
But when I use this base64 String to UIImage and look image width and height not I resize before(2304.0 x 3072.0).
How to make my base64 String image size correctly?
guard let image = image else { return }
print("image => \(image)")
//image => <UIImage:0x283cce7f0 anonymous {768, 1024}>
guard let base64ImageString = image.toBase64(format: .jpeg(0.2)) else { return }
let dataDecoded : Data = Data(base64Encoded: base64ImageString, options: .ignoreUnknownCharacters)!
let decodedimage = UIImage(data: dataDecoded)
let height = decodedimage?.size.height
let width = decodedimage?.size.width
print("====) \(String(describing: width)) / \(String(describing: height))")
//====) Optional(2304.0) / Optional(3072.0)
public enum ImageFormat {
case png
case jpeg(CGFloat)
}
extension UIImage {
public func toBase64(format: ImageFormat) -> String? {
var imageData: Data?
switch format {
case .png:
imageData = self.pngData()
case .jpeg(let compression):
imageData = self.jpegData(compressionQuality: compression)
}
return imageData?.base64EncodedString()
}
}
Try this way, the below code resizes your image, and should be added before you change it to base64.
guard let image = image else { return }
print("image => \(image)")
//image => <UIImage:0x283cce7f0 anonymous {768, 1024}>
let desiredSize = CGSize(width: 768, height: 1024)
UIGraphicsBeginImageContextWithOptions(desiredSize, false, 1.0)
image.draw(in: CGRect(origin: CGPoint.zero, size: CGSize(width:
desiredSize.width, height: desiredSize.height)))
let resizedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
guard let base64ImageString = resizedImage.toBase64(format: .jpeg(0.2)) else { return }
I am adding text to an array of images using swift. Below is my code for the loop:
var holderClass = HolderClass()
var newImages = [UIImage]()
var counter = 0
for (index, oldImage) in holderClass.oldImages.enumerated(){
let newImage = drawTextAtLoaction(text: "testing", image: oldImage)
newImages[index] = newImage
counter+=1
if(counter == self.storyModule.getImageCount()){
completionHandler(newImages)
}
}
Here is the adding text function:
func drawTextAtLoaction(text: String, image: UIImage) -> UIImage{
let textColor = UIColor.white
let textFont = UIFont(name: "Helvetica Bold", size: 12)!
let scale = UIScreen.main.scale
UIGraphicsBeginImageContextWithOptions(image.size, false, scale)
let textFontAttributes = [
NSAttributedString.Key.font: textFont,
NSAttributedString.Key.foregroundColor: textColor,
] as [NSAttributedString.Key : Any]
image.draw(in: CGRect(origin: CGPoint.zero, size: image.size))
let rect = CGRect(origin: CGPoint(x: 0, y: 0), size: image.size)
text.draw(in: rect, withAttributes: textFontAttributes)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
if newImage != nil{
return newImage!
}else{
return image
}
}
Intuitively this loop should take constant storage, however, as shown by the following image it takes linear storage and causes memory errors with larger numbers of images.
How can I use constant memory for such an operation?
edit: I think that I may have oversimplified this a bit. The oldImages array is really an array of images inside an object. So the initialization of old images looks as so.
class HolderClass{
private var oldImages: [UIImage]!
init(){
oldImages = [UIImage(), UIImage()]
}
}
edit 2:
This is how the image data is loaded. The following code is in viewDidLoad.
var holderClass = HolderClass()
DispatchQueue.global(qos: .background).async {
var dataIntermediate = [StoryData]()
let dataRequest:NSFetchRequest<StoryData> = StoryData.fetchRequest()
do {
dataIntermediate = try self.managedObjectContext.fetch(dataRequest)
for storyData in dataIntermediate{
var retrievedImageArray = [UIImage]()
if let loadedImage = DocumentSaveManager.loadImageFromPath(imageId: Int(storyData.id)){
retrievedImageArray.append(loadedImage)
}
holderData.oldImages = retrievedImageArray
}
}catch{
print("Failed to load data \(error.localizedDescription)")
}
}
This is the DocumentSaveManager class.
static func documentDirectoryURL() -> URL {
let documentsURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
return documentsURL
}
static func saveInDocumentsDirectory(foldername:String, filename: String) -> URL {
let fileURL = documentDirectoryURL().appendingPathComponent(foldername).appendingPathComponent(filename)
return fileURL
}
static func loadImageFromPath(moduleId: Int, imageId: Int) -> UIImage? {
let path = saveInDocumentsDirectory(foldername: String(describing: moduleId), filename: String(describing: imageId)).path
let image = UIImage(contentsOfFile: path)
if image == nil {
return nil // Remember to alert user
}
return image
}
I am using CIDetector to detect face in a UIImage. i am getting the face rect correctly but when i crop the image to detected face rect. it is not showing on my image view.
I have already checked. my image is not nil
Here is my code :-
#IBAction func detectFaceOnImageView(_: UIButton) {
let image = myImageView.getFaceImage()
myImageView.image = image
}
extension UIView {
func getFaceImage() -> UIImage? {
let faceDetectorOptions: [String: AnyObject] = [CIDetectorAccuracy: CIDetectorAccuracyHigh as AnyObject]
let faceDetector: CIDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: faceDetectorOptions)!
let viewScreenShotImage = generateScreenShot(scaleTo: 1.0)
if viewScreenShotImage.cgImage != nil {
let sourceImage = CIImage(cgImage: viewScreenShotImage.cgImage!)
let features = faceDetector.features(in: sourceImage)
if features.count > 0 {
var faceBounds = CGRect.zero
var faceImage: UIImage?
for feature in features as! [CIFaceFeature] {
faceBounds = feature.bounds
let faceCroped: CIImage = sourceImage.cropping(to: faceBounds)
faceImage = UIImage(ciImage: faceCroped)
}
return faceImage
} else {
return nil
}
} else {
return nil
}
}
func generateScreenShot(scaleTo: CGFloat = 3.0) -> UIImage {
let rect = self.bounds
UIGraphicsBeginImageContextWithOptions(rect.size, false, 0.0)
let context = UIGraphicsGetCurrentContext()
self.layer.render(in: context!)
let screenShotImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
let aspectRatio = screenShotImage.size.width / screenShotImage.size.height
let resizedScreenShotImage = screenShotImage.scaleImage(toSize: CGSize(width: self.bounds.size.height * aspectRatio * scaleTo, height: self.bounds.size.height * scaleTo))
return resizedScreenShotImage!
}
}
For More Information, I am attaching Screen Shots of values .
Screen Shot 1
Screen Shot 2
Screen Shot 3
Try this:
let faceCroped: CIImage = sourceImage.cropping(to: faceBounds)
//faceImage = UIImage(ciImage: faceCroped)
let cgImage: CGImage = {
let context = CIContext(options: nil)
return context.createCGImage(faceCroped, from: faceCroped.extent)!
}()
faceImage = UIImage(cgImage: cgImage)
I am trying to generate QR Code using iOS Core Image API:
func createQRForString(#data : NSData)->CIImage!{
var qrFilter = CIFilter(name: "CIQRCodeGenerator")
qrFilter.setValue(data, forKey: "inputMessage")
qrFilter.setValue("H", forKey:"inputCorrectionLevel")
return qrFilter.outputImage
}
func createNonInterpolatedImageFromCIImage(image : CIImage,withScale scale:CGFloat)->UIImage{
let cgImage = CIContext(options: nil).createCGImage(image, fromRect: image.extent())
UIGraphicsBeginImageContext(CGSizeMake(image.extent().size.width*scale, image.extent().size.height*scale))
let context = UIGraphicsGetCurrentContext()
CGContextSetInterpolationQuality(context, kCGInterpolationNone)
let scaledImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return scaledImage
}
And the following code in viewDidLoad method :
let data = "Hello World".dataUsingEncoding(NSUTF8StringEncoding)
if let image=createQRForString(data: data!){
let uiimage = createNonInterpolatedImageFromCIImage(image, withScale: 1.0)
imageView.image = uiimage
}
else{
println("Error loading image")
}
}
But it neither prints "Error" nor shows qr code in the imageView.
Here is the solution:
override func viewDidLoad() {
super.viewDidLoad()
self.imgView.image = generateCode()
}
func generateCode() -> UIImage {
let filter = CIFilter(name: "CIQRCodeGenerator")
let data = "Hello World".dataUsingEncoding(NSUTF8StringEncoding)
filter.setValue("H", forKey:"inputCorrectionLevel")
filter.setValue(data, forKey:"inputMessage")
let outputImage = filter.outputImage
let context = CIContext(options:nil)
let cgImage = context.createCGImage(outputImage, fromRect:outputImage.extent())
let image = UIImage(CGImage:cgImage, scale:1.0, orientation:UIImageOrientation.Up)
let resized = resizeImage(image!, withQuality:kCGInterpolationNone, rate:5.0)
return resized
}
func resizeImage(image: UIImage, withQuality quality: CGInterpolationQuality, rate: CGFloat) -> UIImage {
let width = image.size.width * rate
let height = image.size.height * rate
UIGraphicsBeginImageContextWithOptions(CGSizeMake(width, height), true, 0)
let context = UIGraphicsGetCurrentContext()
CGContextSetInterpolationQuality(context, quality)
image.drawInRect(CGRectMake(0, 0, width, height))
let resized = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return resized;
}