Image data rotating 45 degrees after resizing - ios

I have a snippet of code that utilizes the ImageIO library to resize an image to make it smaller when uploading to Parse. Upon uploading, the picture becomes rotated 45 degrees counterclockwise.
let imageBytes = UIImageJPEGRepresentation(imageView.image, 1.0)
let size = CGSizeMake(1024, 1024)
if let imageSource = CGImageSourceCreateWithData(imageBytes, nil) {
let options: [NSString: AnyObject] = [
kCGImageSourceThumbnailMaxPixelSize: NSNumber(double: Double(max(size.width, size.height)) / 2.0),
kCGImageSourceCreateThumbnailFromImageIfAbsent: true,
kCGImageSourceCreateThumbnailWithTransform: false
]
//2. Recreate the Image that has been scaled
let scaledImage = UIImage(CGImage: CGImageSourceCreateThumbnailAtIndex(imageSource, 0, options))
let scaledImageBytes = UIImageJPEGRepresentation(scaledImage, 1.0)
let metaData = [
"imageBytes": scaledImageBytes, //for some reason, scaledImageBytes causes a 90 degree rotation on the picture. Using unscaled image for now.
]
//3. upload the image to Parse
PFCloud.callFunctionInBackground("uploadImage", withParameters: metaData) { message, error in
if let error = error {
println("error uploading: \(error)")
} else if let message = message as? String {
println("success: \(message)")
}
}
}
How can I resolve that problem?

check this function to normalize your image
func normalizeImage(raw: UIImage) -> UIImage {
if raw.imageOrientation == UIImageOrientation.Up {
return raw
}
UIGraphicsBeginImageContextWithOptions(raw.size, false, raw.scale)
raw.drawInRect(CGRect(origin: CGPointZero, size: raw.size))
var normalizedImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return normalizedImage
}

Related

How to make .accurate request for CoreML Segmentation

Im working on a bakground removal app , where Im doing the task using this function , but this is not so accurate . I found that Im not requesting accurate output here . I need to implement two line in my function .
Two line :
var segmentationRequest = VNGeneratePersonSegmentationRequest()
segmentationRequest.qualityLevel = . accurate
My Function :
func removeBackground(image:UIImage) -> UIImage?{
let resizedImage = image.resized(to: CGSize(width: 513, height: 513))
if let pixelBuffer = resizedImage.pixelBuffer(width:
Int(resizedImage.size.width), height: Int(resizedImage.size.height)){
if let outputImage = (try? modelCore.prediction(image:
pixelBuffer))?.semanticPredictions.image(min: 0, max: 1, axes: (0,0,1)),
let outputCIImage = CIImage(image:outputImage){
if let maskImage = removeWhitePixels(image:outputCIImage),
let resizedCIImage = CIImage(image: resizedImage), let compositedImage =
composite(image: resizedCIImage, mask: maskImage){
return UIImage(ciImage: compositedImage).resized(to: CGSize(width:
image.size.width, height: image.size.height))
}
}
}
return nil
}
You Can use .accurate Quality level in your request like this
but if you will use .accurate it will take more time to give you proper output. and I geuss the quality can be used only in non-Custom Models .
func generatePhoto(Image: Image?) {
//generating instance of request
let request = VNGeneratePersonSegmentationRequest()
guard
//Convert Input image to CgImg
let originalImage = Image?.foregroundImage.cgImage else {
print("missing require image")
return
}
//Setting quality level
request.qualityLevel = .accurate
request.revision = VNGeneratePersonSegmentationRequestRevision1
request.outputPixelFormat = kCVPixelFormatType_OneComponent8
//Create reqhandler
let requestHandler = VNImageRequestHandler(cgImage: originalImage, options: [:])
do{
//Process request
try requestHandler.perform([request])
guard let mask = request.results?.first else {
print("error")
return
}
//Convert the pixelbuffer to maskImg to get maskImg
let maskImage = CIImage(cvPixelBuffer: mask.pixelBuffer)
print(maskImage)
self.mskImage = maskImage
let foreground = CIImage(cgImage: originalImage).oriented(.up)
guard let output = blendImages(foreground: foreground, mask: maskImage) else {
print("Error4")
return
}
//update photoOutput
print(output)
//Convert the CIImg to UIImg
if let photoResult = renderAsUIImage(output){
print("output",photoResult.size)
self.outputImage = photoResult
print(outputImage)
}
}
catch {
print("Error processing person segmentation request")
}
}

CGImageDestinationCreateWithData doubles pixel dimension

If I create a UIImage 'image' from a JPEG file selection in iOS using
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
let info = convertFromUIImagePickerControllerInfoKeyDictionary(info)
if let image = info[convertFromUIImagePickerControllerInfoKey(UIImagePickerController.InfoKey.originalImage)] as? UIImage {
}
}
And then create a data object from image using
imageData = image.jpegData(compressionQuality:1.0)
and then add some properties using
func addImageProperties(imageData: Data, properties: CFDictionary) -> Data {
if let source = CGImageSourceCreateWithData(imageData as CFData, nil) {
if let uti = CGImageSourceGetType(source) {
let destinationData = NSMutableData()
if let destination = CGImageDestinationCreateWithData(destinationData, uti, 1, nil) {
CGImageDestinationAddImageFromSource(destination, source, 0, properties)
if CGImageDestinationFinalize(destination) == false {
return imageData // return input data if error
}
let destinationDataResult = destinationData as Data
return destinationDataResult
}
}
}
return imageData // return input data if error
}
and then save the data to a file using
func saveImageDataAsImage(_ data: Data) {
var newImageIdentifier: String!
PHPhotoLibrary.shared().performChanges{
let assetRequest = PHAssetCreationRequest.forAsset()
assetRequest.addResource(with: .photo, data: data, options: nil)
newImageIdentifier = assetRequest.placeholderForCreatedAsset!.localIdentifier
} completionHandler: { (success, error) in
DispatchQueue.main.async(execute: {
if success, let newAsset = PHAsset.fetchAssets(withLocalIdentifiers: [newImageIdentifier], options: nil).firstObject {
// ...
} else {
// ...
}
})
}
}
The resulting file has twice the width and height in pixels of the original selected file. How can I retain the original file width and height while adding the required metadata?
The loaded image (with UIImage.scale = 1.0) was being processed before saved in the following code, which reset the UIImage.scale value to 2.0. Code to reset UIImage.scale after this it did not change the image.cgImage size, but changing the line let scale = UIScreen.main.scale to let scale:CGFloat = 1.0 in the code resulted in the saved image having the same pixel dimensions as the input image.
func textToImage(drawText: String, inImage: UIImage, atPoint: CGPoint, textColor: UIColor, textFont: UIFont, backColor: UIColor) -> UIImage{
// Setup the font specific variables
//var textColor = UIColor.white
//var textFont = UIFont(name: "Helvetica Bold", size: 12)!
// Setup the image context using the passed image
let scale = UIScreen.main.scale// change to let scale:CGFloat = 1.0 to fix problem
UIGraphicsBeginImageContextWithOptions(inImage.size, false, scale)
// Setup the font attributes that will be later used to dictate how the text should be drawn
let textFontAttributes = [
NSAttributedString.Key.font: textFont,
NSAttributedString.Key.foregroundColor: textColor,
NSAttributedString.Key.backgroundColor: backColor,
]
// Put the image into a rectangle as large as the original image
inImage.draw(in: CGRect(x: 0, y: 0, width: inImage.size.width, height: inImage.size.height))
// Create a point within the space that is as bit as the image
var rect = CGRect(x: atPoint.x, y: atPoint.y, width: inImage.size.width, height: inImage.size.height)
// Draw the text into an image
drawText.draw(in: rect, withAttributes: textFontAttributes)
// Create a new image out of the images we have created
var newImage = UIGraphicsGetImageFromCurrentImageContext()
// End the context now that we have the image we need
UIGraphicsEndImageContext()
//Pass the image back up to the caller
return newImage!
}

How to get thumbnail and Original image from UIImagePickerViewcontroller?

After captured photo from camera, I was doing image compression For (400kb and 1 Mb), it look almost 3 seconds in iPhone 6 and less than a second in iPhone 6s.
Is there any way to get thumbnail and original image without doing manual compression?
Code used for image compression
Extension for UIImage
extension UIImage {
// MARK: - UIImage+Resize
func compressTo(_ expectedSizeInMb:Int) -> Data? {
let sizeInBytes = expectedSizeInMb * 1024 * 1024
var needCompress:Bool = true
var imgData:Data?
var compressingValue:CGFloat = 1.0
while (needCompress && compressingValue > 0.0) {
if let data:Data = jpegData(compressionQuality: compressingValue) {
if data.count < sizeInBytes {
needCompress = false
imgData = data
} else {
compressingValue -= 0.1
}
}
}
if let data = imgData {
if (data.count < sizeInBytes) {
return data
}
}
return nil
}
}
usage:
if let imageData = image.compressTo(1) {
print(imageData)
}
For images saved in Photos Library :
Try :
let phAsset = info[UIImagePickerController.InfoKey.phAsset] as! PHAsset
let options = PHImageRequestOptions()
options.deliveryMode = .fastFormat
options.isSynchronous = false
// you can change your target size to CGSize(width: Int , height: Int) any number you want.
PHImageManager.default().requestImage(for: phAsset, targetSize: PHImageManagerMaximumSize, contentMode: .default, options: options, resultHandler: { image , _ in
let thumbnail = image
// use your thumbnail
})
For Captured images from Camera, you can get image pixels without recalculating data count :
let image = info[UIImagePickerController.InfoKey.originalImage] as! UIImage
// pixels are the same on each device’s camera
let widthPixels = image.size.width * image.scale
let heightPixels = image.size.height * image.scale
let sizeInBytes = 1024 * 1024
var thumbnail : UIImage! = nil
if Int(widthPixels * heightPixels) > sizeInBytes {
// assign custom width and height you need
let rect = CGRect(x: 0.0, y: 0.0, width: 100, height: 100)
UIGraphicsBeginImageContextWithOptions(rect.size, false, 1)
let context = UIGraphicsGetCurrentContext()
context?.interpolationQuality = .low
image.draw(in: rect)
let resizedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
thumbnail = resizedImage
} else {
thumbnail = image
}

How to set image convert from base64 string width and height?

I have a question about UIImage convert to base64 String.
And I already resize my image width and height (1024 x 768).
Then I convert this image to base64 String.
But when I use this base64 String to UIImage and look image width and height not I resize before(2304.0 x 3072.0).
How to make my base64 String image size correctly?
guard let image = image else { return }
print("image => \(image)")
//image => <UIImage:0x283cce7f0 anonymous {768, 1024}>
guard let base64ImageString = image.toBase64(format: .jpeg(0.2)) else { return }
let dataDecoded : Data = Data(base64Encoded: base64ImageString, options: .ignoreUnknownCharacters)!
let decodedimage = UIImage(data: dataDecoded)
let height = decodedimage?.size.height
let width = decodedimage?.size.width
print("====) \(String(describing: width)) / \(String(describing: height))")
//====) Optional(2304.0) / Optional(3072.0)
public enum ImageFormat {
case png
case jpeg(CGFloat)
}
extension UIImage {
public func toBase64(format: ImageFormat) -> String? {
var imageData: Data?
switch format {
case .png:
imageData = self.pngData()
case .jpeg(let compression):
imageData = self.jpegData(compressionQuality: compression)
}
return imageData?.base64EncodedString()
}
}
Try this way, the below code resizes your image, and should be added before you change it to base64.
guard let image = image else { return }
print("image => \(image)")
//image => <UIImage:0x283cce7f0 anonymous {768, 1024}>
let desiredSize = CGSize(width: 768, height: 1024)
UIGraphicsBeginImageContextWithOptions(desiredSize, false, 1.0)
image.draw(in: CGRect(origin: CGPoint.zero, size: CGSize(width:
desiredSize.width, height: desiredSize.height)))
let resizedImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
guard let base64ImageString = resizedImage.toBase64(format: .jpeg(0.2)) else { return }

CIDetector , detected face image is not showing?

I am using CIDetector to detect face in a UIImage. i am getting the face rect correctly but when i crop the image to detected face rect. it is not showing on my image view.
I have already checked. my image is not nil
Here is my code :-
#IBAction func detectFaceOnImageView(_: UIButton) {
let image = myImageView.getFaceImage()
myImageView.image = image
}
extension UIView {
func getFaceImage() -> UIImage? {
let faceDetectorOptions: [String: AnyObject] = [CIDetectorAccuracy: CIDetectorAccuracyHigh as AnyObject]
let faceDetector: CIDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: faceDetectorOptions)!
let viewScreenShotImage = generateScreenShot(scaleTo: 1.0)
if viewScreenShotImage.cgImage != nil {
let sourceImage = CIImage(cgImage: viewScreenShotImage.cgImage!)
let features = faceDetector.features(in: sourceImage)
if features.count > 0 {
var faceBounds = CGRect.zero
var faceImage: UIImage?
for feature in features as! [CIFaceFeature] {
faceBounds = feature.bounds
let faceCroped: CIImage = sourceImage.cropping(to: faceBounds)
faceImage = UIImage(ciImage: faceCroped)
}
return faceImage
} else {
return nil
}
} else {
return nil
}
}
func generateScreenShot(scaleTo: CGFloat = 3.0) -> UIImage {
let rect = self.bounds
UIGraphicsBeginImageContextWithOptions(rect.size, false, 0.0)
let context = UIGraphicsGetCurrentContext()
self.layer.render(in: context!)
let screenShotImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
let aspectRatio = screenShotImage.size.width / screenShotImage.size.height
let resizedScreenShotImage = screenShotImage.scaleImage(toSize: CGSize(width: self.bounds.size.height * aspectRatio * scaleTo, height: self.bounds.size.height * scaleTo))
return resizedScreenShotImage!
}
}
For More Information, I am attaching Screen Shots of values .
Screen Shot 1
Screen Shot 2
Screen Shot 3
Try this:
let faceCroped: CIImage = sourceImage.cropping(to: faceBounds)
//faceImage = UIImage(ciImage: faceCroped)
let cgImage: CGImage = {
let context = CIContext(options: nil)
return context.createCGImage(faceCroped, from: faceCroped.extent)!
}()
faceImage = UIImage(cgImage: cgImage)

Resources