UIImage Orientation Swift - ios

I have written this code to capture an image using the AVFoundation library in Swift:
#IBAction func cameraButtonWasPressed(sender: AnyObject) {
if let videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo){
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection){
(imageSampleBuffer : CMSampleBuffer!, _) in
let imageDataJpeg = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageSampleBuffer)
var pickedImage: UIImage = UIImage(data: imageDataJpeg)!
let library = ALAssetsLibrary()
library.writeImageToSavedPhotosAlbum(pickedImage.CGImage,
metadata:nil,
completionBlock:nil)
}
}
}
It works fine, but when I go to the photo library the image shows rotated 90 degrees counter clockwise.
Can someone give me an hint on where to dig to fix this?

Maybe this Swift code can help you.
//correctlyOrientedImage.swift
import UIKit
extension UIImage {
public func correctlyOrientedImage() -> UIImage {
if self.imageOrientation == UIImageOrientation.Up {
return self
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.drawInRect(CGRectMake(0, 0, self.size.width, self.size.height))
var normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return normalizedImage;
}
}
I saw it somewhere in stack overflow and incorporated in my project as well.

You should be using a slightly different writeImage method:
(1) get the orientation from the UIImage imageOrientation property (an enum), and cast it to ALAssetOrientation (an enum with the same Int values as UIImageOrientation)
var orientation : ALAssetOrientation = ALAssetOrientation(rawValue:
pickedImage.imageOrientation.rawValue)!
(2) use a similar-but-different method on ALAssetLibrary
library.writeImageToSavedPhotosAlbum(
pickedImage.CGImage,
orientation: orientation,
completionBlock:nil)
This works for me in Objective-C ... I have had a quick go at translating to Swift (as above) but I am getting compiler warnings.
Cannot invoke 'writeImageToSavedPhotosAlbum' with an argument list of type '(CGImage!, orientation: ALAssetOrientation, completionBlock: NilLiteralConvertible)'
Perhaps you could try (I don't have the time to construct a full AVFoundation pipeline in Swift to test this definitively)
If you can't get that to work, the other solution is to extract the exif metadata from the sampleBuffer and pass it through to the method you are already using
library.writeImageToSavedPhotosAlbum(pickedImage.CGImage,
metadata:nil,
completionBlock:nil

Swift 5
extension UIImage {
public func correctlyOrientedImage() -> UIImage {
if self.imageOrientation == UIImage.Orientation.up {
return self
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
let normalizedImage = UIGraphicsGetImageFromCurrentImageContext()!;
UIGraphicsEndImageContext();
return normalizedImage;
}
}

Related

Rotating UIImage for Google ML Vision framework on Swift 4

When an image gets captured it defaults to left orientation. So when you feed it into the textDetector inside the Google Vision framework, it comes all jumbled, unless you take the photo oriented left (home button on the right). I want my app to support both orientations.
let visionImage = VisionImage(image: image)
self.textDetector?.detect(in: visionImage, completion: { (features, error) in
//2
guard error == nil, let features = features, !features.isEmpty else {
print("Could not recognize any text")
self.dismiss(animated: true, completion: nil)
return
}
//3
print("Detected Text Has \(features.count) Blocks:\n\n")
for block in features {
//4
print("\(block.text)\n\n")
}
})
I have tried to recreate the Image with a new orientation and that won't change it.
Does anyone know what to do?
I have tried all of these suggestions
How to rotate image in Swift?
Try normalizing the UIImage object, using this function:
import UIKit
public extension UIImage {
public func normalizeImage() -> UIImage {
if self.imageOrientation == UIImageOrientation.up {
return self
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.draw(in: CGRect(x: 0, y: 0, width: self.size.width, height: self.size.height))
let normalizedImage: UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext()
return normalizedImage
}
}
I hope this helps, cause I usually use this whenever there's an orientation problem.

Distorted CVImageBuffer to UIImage

I have the following function that converts CVImageBugger to UIImage. The image that comes out is always a little bit distorted. I display the return value of this function in a UIImageView, which is set to 'aspect fill'. What gives?...
private func convert(buffer: CVImageBuffer) -> UIImage? {
let cmage: CIImage = CIImage(cvPixelBuffer: buffer)
let context: CIContext = CIContext(options: nil)
if let cgImage: CGImage = context.createCGImage(cmage, from: cmage.extent) {
return UIImage(cgImage: cgImage)
}
return nil
}
The CVImageBuffer doesn't contain the orientation information, maybe that's why the final UIImage is distorted.
The default orientation of CVImageBuffer is always Landscape(like the iPhone's Home button is at right side), no matter if you capture a video with portrait way or not.
So we need to add good orientation information to the image:
extension CIImage {
func orientationCorrectedImage() -> UIImage? {
var imageOrientation = UIImageOrientation.up
switch UIApplication.shared.statusBarOrientation {
case UIInterfaceOrientation.portrait:
imageOrientation = UIImageOrientation.right
case UIInterfaceOrientation.landscapeLeft:
imageOrientation = UIImageOrientation.down
case UIInterfaceOrientation.landscapeRight:
imageOrientation = UIImageOrientation.up
case UIInterfaceOrientation.portraitUpsideDown:
imageOrientation = UIImageOrientation.left
default:
break;
}
var w = self.extent.size.width
var h = self.extent.size.height
if imageOrientation == .left || imageOrientation == .right || imageOrientation == .leftMirrored || imageOrientation == .rightMirrored {
swap(&w, &h)
}
UIGraphicsBeginImageContext(CGSize(width: w, height: h));
UIImage.init(ciImage: self, scale: 1.0, orientation: imageOrientation).draw(in: CGRect(x: 0, y: 0, width: w, height: h))
let uiImage:UIImage? = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext();
return uiImage
}
}
Then use it with your code:
private func convert(buffer: CVImageBuffer) -> UIImage? {
let ciImage: CIImage = CIImage(cvPixelBuffer: buffer)
return ciImage.orientationCorrectedImage()
}

CGImageCreateWithImageInRect() returning nil

I'm trying to crop an image into a square, but once I actually try to do the crop by using CGImageCreateWithImageInRect(), this line crashes. I set breakpoints and made sure that the arguments passed into this function are not nil.
I'm fairly new to programming and Swift, but have searched around and haven't found any solution to my problem.
The failure reason:
fatal error: unexpectedly found nil while unwrapping an Optional value
func cropImageToSquare(imageData: NSData) -> NSData {
let image = UIImage(data: imageData)
let contextImage : UIImage = UIImage(CGImage: image!.CGImage!)
let contextSize: CGSize = contextImage.size
let imageDimension: CGFloat = contextSize.height
let posY : CGFloat = (contextSize.height + (contextSize.width - contextSize.height)/2)
let rect: CGRect = CGRectMake(0, posY, imageDimension, imageDimension)
// error on line below: fatal error: unexpectedly found nil while unwrapping an Optional value
let imageRef: CGImageRef = CGImageCreateWithImageInRect(contextImage.CGImage, rect)!
let croppedImage : UIImage = UIImage(CGImage: imageRef, scale: 1.0, orientation: image!.imageOrientation)
let croppedImageData = UIImageJPEGRepresentation(croppedImage, 1.0)
return croppedImageData!
}
Your code uses a lot of force-unwrapping with !s. I would recommend avoiding this — the compiler is trying to help you write code that won't crash. Use optional chaining with ?, and if let / guard let, instead.
The ! on that particular line is hiding an issue where CGImageCreateWithImageInRect might return nil. The documentation explains that this happens when the rect is not correctly inside the image bounds. Your code works for images in portrait orientation, but not landscape.
Furthermore, there's a convenient function provided by AVFoundation which can automatically find the right rectangle for you to use, called AVMakeRectWithAspectRatioInsideRect. No need to do the calculations manually :-)
Here's what I would recommend:
import AVFoundation
extension UIImage
{
func croppedToSquare() -> UIImage
{
guard let cgImage = self.CGImage else { return self }
// Note: self.size depends on self.imageOrientation, so we use CGImageGetWidth/Height here.
let boundingRect = CGRect(
x: 0, y: 0,
width: CGImageGetWidth(cgImage),
height: CGImageGetHeight(cgImage))
// Crop to square (1:1 aspect ratio) and round the resulting rectangle to integer coordinates.
var cropRect = AVMakeRectWithAspectRatioInsideRect(CGSize(width: 1, height: 1), boundingRect)
cropRect.origin.x = ceil(cropRect.origin.x)
cropRect.origin.y = ceil(cropRect.origin.y)
cropRect.size.width = floor(cropRect.size.width)
cropRect.size.height = floor(cropRect.size.height)
guard let croppedImage = CGImageCreateWithImageInRect(cgImage, cropRect) else {
assertionFailure("cropRect \(cropRect) was not inside \(boundingRect)")
return self
}
return UIImage(CGImage: croppedImage, scale: self.scale, orientation: self.imageOrientation)
}
}
// then:
let croppedImage = myUIImage.croppedToSquare()

Tesseract OCR w/ iOS & Swift returns error or gibberish

I used this tutorial to get Tesseract OCR working with Swift: http://www.piterwilson.com/blog/2014/10/18/minimal-tesseact-ocr-setup-in-swift/
It works fine if I upload the demo image and call
tesseract.image = UIImage(named: "image_sample.jpg");
But if I use my camera code and take a picture of that same image and call
tesseract.image = self.image.blackAndWhite();
the result is either gibberish like
s I 5E251 :Ec
‘-. —7.//:E*髧
a g :_{:7 IC‘
J 7 iii—1553‘
: fizzle —‘;-—:
; ~:~./: -:-‘-
‘- :~£:': _-'~‘:
: 37%; §:‘—_
: ::::E 7,;.
1f:,:~ ——,
Or it returns a BAD_EXC_ACCESS error. I haven't been able to reproduce the reasoning behind why it gives the error or the gibberish. This is the code of my camera capture (photo taken()) and the processing step (nextStepTapped()):
#IBAction func photoTaken(sender: UIButton) {
var videoConnection = stillImageOutput.connectionWithMediaType(AVMediaTypeVideo)
if videoConnection != nil {
// Show next step button
self.view.bringSubviewToFront(self.nextStep)
self.nextStep.hidden = false
// Secure image
stillImageOutput.captureStillImageAsynchronouslyFromConnection(videoConnection) {
(imageDataSampleBuffer, error) -> Void in
var imageData = AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(imageDataSampleBuffer)
self.image = UIImage(data: imageData)
//var dataProvider = CGDataProviderCreateWithCFData(imageData)
//var cgImageRef = CGImageCreateWithJPEGDataProvider(dataProvider, nil, true, kCGRenderingIntentDefault)
//self.image = UIImage(CGImage: cgImageRef, scale: 1.0, orientation: UIImageOrientation.Right)
}
// Freeze camera preview
captureSession.stopRunning()
}
}
#IBAction func nextStepTapped(sender: UIButton) {
// Save to camera roll & proceeed
//UIImageWriteToSavedPhotosAlbum(self.image.blackAndWhite(), nil, nil, nil)
//UIImageWriteToSavedPhotosAlbum(self.image, nil, nil, nil)
// OCR
var tesseract:Tesseract = Tesseract();
tesseract.language = "eng";
tesseract.delegate = self;
tesseract.image = self.image.blackAndWhite();
tesseract.recognize();
NSLog("%#", tesseract.recognizedText);
}
The image saves to the Camera Roll and is completely legible if I uncomment the commented lines. Not sure why it won't work. It has no problem reading the text on the image if it's uploaded directly into Xcode as a supporting file, but if I take a picture of the exact same image on my screen then it can't read it.
Stumbled upon this tutorial: http://www.raywenderlich.com/93276/implementing-tesseract-ocr-ios
It happened to mention scaling the image. They chose the max dimension as 640. I was taking my pictures as 640x480, so I figured I didn't need to scale them, but I think this code essentially redraws the image. For some reason now my photos OCR fairly well. I still need to work on image processing for smaller text, but it works perfectly for large text. Run my image through this scaling function and I'm good to go.
func scaleImage(image: UIImage, maxDimension: CGFloat) -> UIImage {
var scaledSize = CGSize(width: maxDimension, height: maxDimension)
var scaleFactor: CGFloat
if image.size.width > image.size.height {
scaleFactor = image.size.height / image.size.width
scaledSize.width = maxDimension
scaledSize.height = scaledSize.width * scaleFactor
} else {
scaleFactor = image.size.width / image.size.height
scaledSize.height = maxDimension
scaledSize.width = scaledSize.height * scaleFactor
}
UIGraphicsBeginImageContext(scaledSize)
image.drawInRect(CGRectMake(0, 0, scaledSize.width, scaledSize.height))
let scaledImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return scaledImage
}

Custom CIFilter subclass returns image with scale out of whack

I'm new to writing CIFilters, and I'm stuck on this problem. Here is my source image being displayed in a UIImageView with contentMode set to Aspect Fit:
Here is the image returned from my CIFilter object being displayed in the same UIImageView:
I've tried copying over the original scale and orientation from my source image to the UIImage being constructed from the CIImage returned from the filter with no luck.
What might be causing this?
I'm thinking I'm doing something wrong in my CIFilter class. I am starting to suspect something in my outputImage method:?
func outputImage() -> CIImage? {
if let inputImage = inputImage {
let dod = inputImage.extent()
if let kernel = kernel {
let args = [inputImage as AnyObject]
let dod = inputImage.extent().rectByInsetting(dx: -1, dy: -1)
return kernel.applyWithExtent(dod, roiCallback: {
(index, rect) in
return rect.rectByInsetting(dx: -1, dy: -1)
}, arguments: args)
}
}
return nil
}

Resources