I am adding text to an array of images using swift. Below is my code for the loop:
var holderClass = HolderClass()
var newImages = [UIImage]()
var counter = 0
for (index, oldImage) in holderClass.oldImages.enumerated(){
let newImage = drawTextAtLoaction(text: "testing", image: oldImage)
newImages[index] = newImage
counter+=1
if(counter == self.storyModule.getImageCount()){
completionHandler(newImages)
}
}
Here is the adding text function:
func drawTextAtLoaction(text: String, image: UIImage) -> UIImage{
let textColor = UIColor.white
let textFont = UIFont(name: "Helvetica Bold", size: 12)!
let scale = UIScreen.main.scale
UIGraphicsBeginImageContextWithOptions(image.size, false, scale)
let textFontAttributes = [
NSAttributedString.Key.font: textFont,
NSAttributedString.Key.foregroundColor: textColor,
] as [NSAttributedString.Key : Any]
image.draw(in: CGRect(origin: CGPoint.zero, size: image.size))
let rect = CGRect(origin: CGPoint(x: 0, y: 0), size: image.size)
text.draw(in: rect, withAttributes: textFontAttributes)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
if newImage != nil{
return newImage!
}else{
return image
}
}
Intuitively this loop should take constant storage, however, as shown by the following image it takes linear storage and causes memory errors with larger numbers of images.
How can I use constant memory for such an operation?
edit: I think that I may have oversimplified this a bit. The oldImages array is really an array of images inside an object. So the initialization of old images looks as so.
class HolderClass{
private var oldImages: [UIImage]!
init(){
oldImages = [UIImage(), UIImage()]
}
}
edit 2:
This is how the image data is loaded. The following code is in viewDidLoad.
var holderClass = HolderClass()
DispatchQueue.global(qos: .background).async {
var dataIntermediate = [StoryData]()
let dataRequest:NSFetchRequest<StoryData> = StoryData.fetchRequest()
do {
dataIntermediate = try self.managedObjectContext.fetch(dataRequest)
for storyData in dataIntermediate{
var retrievedImageArray = [UIImage]()
if let loadedImage = DocumentSaveManager.loadImageFromPath(imageId: Int(storyData.id)){
retrievedImageArray.append(loadedImage)
}
holderData.oldImages = retrievedImageArray
}
}catch{
print("Failed to load data \(error.localizedDescription)")
}
}
This is the DocumentSaveManager class.
static func documentDirectoryURL() -> URL {
let documentsURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)[0]
return documentsURL
}
static func saveInDocumentsDirectory(foldername:String, filename: String) -> URL {
let fileURL = documentDirectoryURL().appendingPathComponent(foldername).appendingPathComponent(filename)
return fileURL
}
static func loadImageFromPath(moduleId: Int, imageId: Int) -> UIImage? {
let path = saveInDocumentsDirectory(foldername: String(describing: moduleId), filename: String(describing: imageId)).path
let image = UIImage(contentsOfFile: path)
if image == nil {
return nil // Remember to alert user
}
return image
}
Related
If I create a UIImage 'image' from a JPEG file selection in iOS using
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
let info = convertFromUIImagePickerControllerInfoKeyDictionary(info)
if let image = info[convertFromUIImagePickerControllerInfoKey(UIImagePickerController.InfoKey.originalImage)] as? UIImage {
}
}
And then create a data object from image using
imageData = image.jpegData(compressionQuality:1.0)
and then add some properties using
func addImageProperties(imageData: Data, properties: CFDictionary) -> Data {
if let source = CGImageSourceCreateWithData(imageData as CFData, nil) {
if let uti = CGImageSourceGetType(source) {
let destinationData = NSMutableData()
if let destination = CGImageDestinationCreateWithData(destinationData, uti, 1, nil) {
CGImageDestinationAddImageFromSource(destination, source, 0, properties)
if CGImageDestinationFinalize(destination) == false {
return imageData // return input data if error
}
let destinationDataResult = destinationData as Data
return destinationDataResult
}
}
}
return imageData // return input data if error
}
and then save the data to a file using
func saveImageDataAsImage(_ data: Data) {
var newImageIdentifier: String!
PHPhotoLibrary.shared().performChanges{
let assetRequest = PHAssetCreationRequest.forAsset()
assetRequest.addResource(with: .photo, data: data, options: nil)
newImageIdentifier = assetRequest.placeholderForCreatedAsset!.localIdentifier
} completionHandler: { (success, error) in
DispatchQueue.main.async(execute: {
if success, let newAsset = PHAsset.fetchAssets(withLocalIdentifiers: [newImageIdentifier], options: nil).firstObject {
// ...
} else {
// ...
}
})
}
}
The resulting file has twice the width and height in pixels of the original selected file. How can I retain the original file width and height while adding the required metadata?
The loaded image (with UIImage.scale = 1.0) was being processed before saved in the following code, which reset the UIImage.scale value to 2.0. Code to reset UIImage.scale after this it did not change the image.cgImage size, but changing the line let scale = UIScreen.main.scale to let scale:CGFloat = 1.0 in the code resulted in the saved image having the same pixel dimensions as the input image.
func textToImage(drawText: String, inImage: UIImage, atPoint: CGPoint, textColor: UIColor, textFont: UIFont, backColor: UIColor) -> UIImage{
// Setup the font specific variables
//var textColor = UIColor.white
//var textFont = UIFont(name: "Helvetica Bold", size: 12)!
// Setup the image context using the passed image
let scale = UIScreen.main.scale// change to let scale:CGFloat = 1.0 to fix problem
UIGraphicsBeginImageContextWithOptions(inImage.size, false, scale)
// Setup the font attributes that will be later used to dictate how the text should be drawn
let textFontAttributes = [
NSAttributedString.Key.font: textFont,
NSAttributedString.Key.foregroundColor: textColor,
NSAttributedString.Key.backgroundColor: backColor,
]
// Put the image into a rectangle as large as the original image
inImage.draw(in: CGRect(x: 0, y: 0, width: inImage.size.width, height: inImage.size.height))
// Create a point within the space that is as bit as the image
var rect = CGRect(x: atPoint.x, y: atPoint.y, width: inImage.size.width, height: inImage.size.height)
// Draw the text into an image
drawText.draw(in: rect, withAttributes: textFontAttributes)
// Create a new image out of the images we have created
var newImage = UIGraphicsGetImageFromCurrentImageContext()
// End the context now that we have the image we need
UIGraphicsEndImageContext()
//Pass the image back up to the caller
return newImage!
}
I have this code to convert a view to image. But i want my image to be in jpeg format.What chnages do i need to make in this code to get my image as JPEG format. I am not showing my view in device.
extension UIView{
func createTransparentImage() -> UIImage {
let renderFormat = UIGraphicsImageRendererFormat.default()
renderFormat.opaque = false
self.isOpaque = false
self.layer.isOpaque = true
self.backgroundColor = UIColor.clear
self.layer.backgroundColor = UIColor.clear.cgColor
let renderer = UIGraphicsImageRenderer(size: bounds.size, format: renderFormat)
return renderer.image{
(context) in
layer.render(in: context.cgContext)
}
}
}
So with your extension you get back a UIImage if you want you can save it as JPEG or PNG, by converting it to Data or NSData in Objective C:
let viewSnapshot = view.createTransparentImage()
//save as JPEG
if let data = viewSnapshot.jpegData(compressionQuality: 0.8) {
//variable data has the JPEG image with quality of 80%
}
//save as PNG
if let data = viewSnapshot.pngData() {
//variable data has the PNG data of the snapshot
}
If you wanted to save this image you can just do the following inside of the if let data
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
if let documentsDirectory = paths.first {
let fileName = documentsDirectory.appendingPathComponent("myImage.jpg")
try? data.write(to: fileName)
}
Given that you do not need UIImage but only JPEG data just do a slight modification on your extension as follows:
extension UIView{
func createImageData(quality: CGFloat = 0.8) -> Data {
let renderFormat = UIGraphicsImageRendererFormat.default()
renderFormat.opaque = false
self.isOpaque = false
self.layer.isOpaque = true
self.backgroundColor = UIColor.clear
self.layer.backgroundColor = UIColor.clear.cgColor
let renderer = UIGraphicsImageRenderer(size: bounds.size, format: renderFormat)
return renderer.jpegData(withCompressionQuality: quality, actions: { context in
layer.render(in: context.cgContext)
})
}
}
Notice the final line was changed to:
return renderer.jpegData(withCompressionQuality: quality, actions: { context in
Thus to use this you can simply do:
let viewSnapshotData = view.createImageData(quality: 0.9)
As per LeoDabus you can use the following extension to get snapshot of a UIView with alpha transparency if it is needed:
extension UIView {
var snapshot: UIImage {
isOpaque = false
backgroundColor = .clear
let format = UIGraphicsImageRendererFormat.default()
format.opaque = false
return UIGraphicsImageRenderer(size: bounds.size, format: format).image { _ in
drawHierarchy(in: bounds, afterScreenUpdates: true)
}
}
}
extension UIView{
func captureViewAsImage() -> UIImage? {
var screenshotImage :UIImage?
let layer = self.layer
UIGraphicsBeginImageContextWithOptions(layer.frame.size, false, UIScreen.main.scale);
guard let context = UIGraphicsGetCurrentContext() else {return nil}
layer.render(in:context)
screenshotImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return screenshotImage
}
}
and use this function as
let img = view.captureViewAsImage()
I am using UITapGestureRecognizer to get coordinates in a UIImageView, then use that coordinates as a position where to add text to image. The problem the text was not added exactly to where I touched on UIImageView
Could you help me to solve problem or give me some suggest
#IBOutlet var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
imageView.image = UIImage(named: documentsPath + "/bach.jpg")
let imageViewTapped = UITapGestureRecognizer(target: self, action: #selector(self.tapAction(_:)))
imageViewTapped.numberOfTouchesRequired = 1
imageViewTapped.delegate = self
imageView.addGestureRecognizer(imageViewTapped)
}
func tapAction(_ sender: UITapGestureRecognizer) {
let point = sender.location(in: self.imageView)
print("x \(point.x) y \(point.y) ")
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let image = UIImage(named: documentsPath + "/bach.jpg")!
let newImage = textToImage(drawText: "㊗️Hello", inImage: image, atPoint: CGPoint(x: point.x, y: point.y))
imageView.image = newImage
}
// Add text to image
func textToImage(drawText text: NSString, inImage image: UIImage, atPoint point: CGPoint) -> UIImage {
let textColor = UIColor.white
let textFont = UIFont(name: "Helvetica Bold", size: 300)!
let scale = UIScreen.main.scale
UIGraphicsBeginImageContextWithOptions(image.size, false, scale)
let textFontAttributes = [
NSFontAttributeName: textFont,
NSForegroundColorAttributeName: textColor,
] as [String : Any]
image.draw(in: CGRect(origin: CGPoint.zero, size: image.size))
text.draw(at: point, withAttributes: textFontAttributes)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
class ViewController1: UIViewController {
let lblNew = UILabel()
#IBOutlet weak var imageView: UIImageView!
override func viewDidLoad() {
super.viewDidLoad()
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
let documentsPath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0]
imageView.image = UIImage(named: documentsPath + "/bach.jpg")
imageView.isUserInteractionEnabled = true
let imageViewTapped = UITapGestureRecognizer(target: self, action: #selector(self.tapAction(_:)))
imageViewTapped.numberOfTouchesRequired = 1
imageViewTapped.delegate = self as? UIGestureRecognizerDelegate
imageView.addGestureRecognizer(imageViewTapped)
}
func tapAction(_ sender: UITapGestureRecognizer) {
let point = sender.location(in: self.imageView)
print("x \(point.x) y \(point.y) ")
textToImage(drawText: "㊗️Hello", atPoint: CGPoint(x: point.x, y: point.y))
}
// Add text to image
func textToImage(drawText text: NSString, atPoint point: CGPoint) {
lblNew.frame = CGRect(x: point.x, y: point.y, width: 200.0, height: 10)
lblNew.textAlignment = .left
lblNew.text = text as String
lblNew.textColor = UIColor.white
imageView.addSubview(lblNew)
}
}
Code to save image in document directly in png format. If you want to save it in jpeg formate then just change that UIImagePNGRepresentation line into UIImageJPEGRepresentation(image, 0.9).
func saveImage()
{
if let image = UIImage(named: "test.png") {
if let data = UIImagePNGRepresentation(image) {
let filename = getDocumentsDirectory().appendingPathComponent("copy.png")
try? data.write(to: filename)
}
}
}
func getDocumentsDirectory() -> URL {
let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
let documentsDirectory = paths[0]
return documentsDirectory
}
Replace your saveImage method with the following code snippet.
func saveImage()
{
UIGraphicsBeginImageContextWithOptions(imageView.bounds.size, false, UIScreen.main.scale)
imageView.layer.render(in: UIGraphicsGetCurrentContext()!)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
if let data = UIImagePNGRepresentation(image!) {
let filename = getDocumentsDirectory().appendingPathComponent("copy.png")
try? data.write(to: filename)
}
}
I am using CIDetector to detect face in a UIImage. i am getting the face rect correctly but when i crop the image to detected face rect. it is not showing on my image view.
I have already checked. my image is not nil
Here is my code :-
#IBAction func detectFaceOnImageView(_: UIButton) {
let image = myImageView.getFaceImage()
myImageView.image = image
}
extension UIView {
func getFaceImage() -> UIImage? {
let faceDetectorOptions: [String: AnyObject] = [CIDetectorAccuracy: CIDetectorAccuracyHigh as AnyObject]
let faceDetector: CIDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: faceDetectorOptions)!
let viewScreenShotImage = generateScreenShot(scaleTo: 1.0)
if viewScreenShotImage.cgImage != nil {
let sourceImage = CIImage(cgImage: viewScreenShotImage.cgImage!)
let features = faceDetector.features(in: sourceImage)
if features.count > 0 {
var faceBounds = CGRect.zero
var faceImage: UIImage?
for feature in features as! [CIFaceFeature] {
faceBounds = feature.bounds
let faceCroped: CIImage = sourceImage.cropping(to: faceBounds)
faceImage = UIImage(ciImage: faceCroped)
}
return faceImage
} else {
return nil
}
} else {
return nil
}
}
func generateScreenShot(scaleTo: CGFloat = 3.0) -> UIImage {
let rect = self.bounds
UIGraphicsBeginImageContextWithOptions(rect.size, false, 0.0)
let context = UIGraphicsGetCurrentContext()
self.layer.render(in: context!)
let screenShotImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
let aspectRatio = screenShotImage.size.width / screenShotImage.size.height
let resizedScreenShotImage = screenShotImage.scaleImage(toSize: CGSize(width: self.bounds.size.height * aspectRatio * scaleTo, height: self.bounds.size.height * scaleTo))
return resizedScreenShotImage!
}
}
For More Information, I am attaching Screen Shots of values .
Screen Shot 1
Screen Shot 2
Screen Shot 3
Try this:
let faceCroped: CIImage = sourceImage.cropping(to: faceBounds)
//faceImage = UIImage(ciImage: faceCroped)
let cgImage: CGImage = {
let context = CIContext(options: nil)
return context.createCGImage(faceCroped, from: faceCroped.extent)!
}()
faceImage = UIImage(cgImage: cgImage)
I'm having issues with the UIImage crashing my app whenever I run it. I think there is a possibility the images are somehow currupt perhaps. I ran debug and everything seems to run fine up until this line below:
let image = UIImage(named: photo!)?.decompressedImage
The image key is showing up and everything is running as it should. It is the next line that seems to make the app crash:
self.init(caption: caption!, comment: comment!, image: image!)
I know I am force unwrapping the image, however I know that the image exists. Do you also agree that maybe the image is corrupt? Is there a way to double check this or perhaps fix it? The images are JPG form.
Code:
import UIKit
class Photo {
class func allPhotos() -> [Photo] {
var photos = [Photo]()
if let URL = NSBundle.mainBundle().URLForResource("Photos", withExtension: "plist") {
if let photosFromPlist = NSArray(contentsOfURL: URL) {
for dictionary in photosFromPlist {
let photo = Photo(dictionary: dictionary as! NSDictionary)
photos.append(photo)
}
}
}
return photos
}
var caption: String
var comment: String
var image: UIImage
init(caption: String, comment: String, image: UIImage) {
self.caption = caption
self.comment = comment
self.image = image
}
convenience init(dictionary: NSDictionary) {
let caption = dictionary["Caption"] as? String
let comment = dictionary["Comment"] as? String
let photo = dictionary["Photo"] as? String
let image = UIImage(named: photo!)?.decompressedImage
self.init(caption: caption!, comment: comment!, image: image!)
}
func heightForComment(font: UIFont, width: CGFloat) -> CGFloat {
let rect = NSString(string: comment).boundingRectWithSize(CGSize(width: width, height: CGFloat(MAXFLOAT)), options: .UsesLineFragmentOrigin, attributes: [NSFontAttributeName: font], context: nil)
return ceil(rect.height)
}
}