Camera view rotate 90 degree in Swift - ios

When I'm making a customized camera app in Swift.
But when I try to access the camera, the view in camera rotated 90 degrees. I try to find the solution. One solution I find is adding a fixOrientation function to fix the view. But not working...
Here is my full code:
let CIHueAdjust = "CIHueAdjust"
let CIHueAdjustFilter = CIFilter(name: "CIHueAdjust", withInputParameters: ["inputAngle" : 1.24])
let Filters = [CIHueAdjust: CIHueAdjustFilter]
let FilterNames = [String](Filters.keys).sort()
class LiveCamViewController : UIViewController,AVCaptureVideoDataOutputSampleBufferDelegate{
let mainGroup = UIStackView()
let imageView = UIImageView(frame: CGRectZero)
let filtersControl = UISegmentedControl(items: FilterNames)
override func viewDidLoad()
{
super.viewDidLoad()
view.addSubview(mainGroup)
mainGroup.axis = UILayoutConstraintAxis.Vertical
mainGroup.distribution = UIStackViewDistribution.Fill
mainGroup.addArrangedSubview(imageView)
mainGroup.addArrangedSubview(filtersControl)
imageView.contentMode = UIViewContentMode.ScaleAspectFit
filtersControl.selectedSegmentIndex = 0
let captureSession = AVCaptureSession()
captureSession.sessionPreset = AVCaptureSessionPresetPhoto
let backCamera = AVCaptureDevice.defaultDeviceWithMediaType(AVMediaTypeVideo)
do
{
let input = try AVCaptureDeviceInput(device: backCamera)
captureSession.addInput(input)
}
catch
{
print("can't access camera")
return
}
//get captureOutput invoked
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dispatch_queue_create("sample buffer delegate", DISPATCH_QUEUE_SERIAL))
if captureSession.canAddOutput(videoOutput)
{
captureSession.addOutput(videoOutput)
}
captureSession.startRunning()
}
func captureOutput(captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, fromConnection connection: AVCaptureConnection!)
{
guard let filter = Filters[FilterNames[filtersControl.selectedSegmentIndex]] else
{
return
}
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(CVPixelBuffer: pixelBuffer!)
filter!.setValue(cameraImage, forKey: kCIInputImageKey)
let filteredImage = UIImage(CIImage: filter!.valueForKey(kCIOutputImageKey) as! CIImage!)
let fixedImage = fixOrientation(filteredImage)
dispatch_async(dispatch_get_main_queue())
{
self.imageView.image = fixedImage
}
}
func fixOrientation(image: UIImage) -> UIImage {
if (image.imageOrientation == UIImageOrientation.Up) {
return image;
}
print(image.imageOrientation)
var transform = CGAffineTransformIdentity
switch (image.imageOrientation) {
case .Down, .DownMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, image.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
break
case .Left, .LeftMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
break
case .Right, .RightMirrored:
transform = CGAffineTransformTranslate(transform, 0, image.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
break
case .Up, .UpMirrored:
break
}
switch (image.imageOrientation) {
case .UpMirrored, .DownMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break
case .LeftMirrored, .RightMirrored:
transform = CGAffineTransformTranslate(transform, image.size.height, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break
case .Up, .Down, .Left, .Right:
break
}
//Draw the underlying CGImage into a new context, applying the transform
let ctx = CGBitmapContextCreate(nil, Int(image.size.width), Int(image.size.height), CGImageGetBitsPerComponent(image.CGImage), 0, CGImageGetColorSpace(image.CGImage), UInt32(CGImageGetBitmapInfo(image.CGImage).rawValue))
CGContextConcatCTM(ctx, transform);
switch (image.imageOrientation) {
case .Left, .LeftMirrored, .Right, .RightMirrored:
CGContextDrawImage(ctx, CGRectMake(0, 0, image.size.height, image.size.width), image.CGImage)
break
default:
CGContextDrawImage(ctx, CGRectMake(0, 0, image.size.width, image.size.height), image.CGImage)
break
}
let cgimg = CGBitmapContextCreateImage(ctx)
let img = UIImage(CGImage:cgimg!)
return img
}
override func viewDidLayoutSubviews()
{
mainGroup.frame = CGRect(x: 37, y: 115, width: 301, height: 481)
}
}
I set a breakpoint to test, the code seems only run until
if (image.imageOrientation == UIImageOrientation.Up) {
return image;
}
then it returns the same view...
Can anyone help me?
Thanks!!!

In capture() where you get your CIImage from CMSampleBuffer do this after:
yourCIImage = yourCIImage.applyingOrientation(6)
Change the number from 1-8 depending on how you want to rotate and mirror.
https://developer.apple.com/documentation/imageio/kcgimagepropertyorientation

For those who need the code from #LoVo in Swift 3:
func rotateCameraImageToProperOrientation(imageSource : UIImage, maxResolution : CGFloat) -> UIImage {
guard let imgRef = imageSource.cgImage else {
return imageSource
}
let width = CGFloat(imgRef.width);
let height = CGFloat(imgRef.height);
var bounds = CGRect(x: 0, y: 0, width: width, height: height)
var scaleRatio : CGFloat = 1
if (width > maxResolution || height > maxResolution) {
scaleRatio = min(maxResolution / bounds.size.width, maxResolution / bounds.size.height)
bounds.size.height = bounds.size.height * scaleRatio
bounds.size.width = bounds.size.width * scaleRatio
}
var transform = CGAffineTransform.identity
let orient = imageSource.imageOrientation
let imageSize = CGSize(width: CGFloat(imgRef.width), height: CGFloat(imgRef.height))
switch(imageSource.imageOrientation) {
case .up :
transform = CGAffineTransform.identity
case .upMirrored :
transform = CGAffineTransform(translationX: imageSize.width, y: 0.0)
transform = transform.scaledBy(x: -1.0, y: 1.0)
case .down :
transform = CGAffineTransform(translationX: imageSize.width, y: imageSize.height)
transform = transform.rotated(by: CGFloat(M_PI))
case .downMirrored :
transform = CGAffineTransform(translationX: 0.0, y: imageSize.height)
transform = transform.scaledBy(x: 1.0, y: -1.0)
case .left :
let storedHeight = bounds.size.height
bounds.size.height = bounds.size.width
bounds.size.width = storedHeight
transform = CGAffineTransform(translationX: 0.0, y: imageSize.width)
transform = transform.rotated(by: 3.0 * CGFloat(M_PI) / 2.0)
case .leftMirrored :
let storedHeight = bounds.size.height
bounds.size.height = bounds.size.width
bounds.size.width = storedHeight
transform = CGAffineTransform(translationX: imageSize.height, y: imageSize.width)
transform = transform.scaledBy(x: -1.0, y: 1.0)
transform = transform.rotated(by: 3.0 * CGFloat(M_PI) / 2.0)
case .right :
let storedHeight = bounds.size.height
bounds.size.height = bounds.size.width
bounds.size.width = storedHeight
transform = CGAffineTransform(translationX: imageSize.height, y: 0.0)
transform = transform.rotated(by: CGFloat(M_PI) / 2.0)
case .rightMirrored :
let storedHeight = bounds.size.height
bounds.size.height = bounds.size.width
bounds.size.width = storedHeight
transform = CGAffineTransform(scaleX: -1.0, y: 1.0)
transform = transform.rotated(by: CGFloat(M_PI) / 2.0)
}
UIGraphicsBeginImageContext(bounds.size)
guard let context = UIGraphicsGetCurrentContext() else {
return imageSource
}
if orient == .right || orient == .left {
context.scaleBy(x: -scaleRatio, y: scaleRatio)
context.translateBy(x: -height, y: 0)
} else {
context.scaleBy(x: scaleRatio, y: -scaleRatio)
context.translateBy(x: 0, y: -height)
}
context.concatenate(transform);
context.draw(imgRef, in: CGRect(x: 0, y: 0, width: width, height: height))
guard let imageCopy = UIGraphicsGetImageFromCurrentImageContext() else {
return imageSource
}
UIGraphicsEndImageContext();
return imageCopy;
}

Try this method instead: (taken from here)
import Darwin
class func rotateCameraImageToProperOrientation(imageSource : UIImage, maxResolution : CGFloat) -> UIImage {
let imgRef = imageSource.CGImage;
let width = CGFloat(CGImageGetWidth(imgRef));
let height = CGFloat(CGImageGetHeight(imgRef));
var bounds = CGRectMake(0, 0, width, height)
var scaleRatio : CGFloat = 1
if (width > maxResolution || height > maxResolution) {
scaleRatio = min(maxResolution / bounds.size.width, maxResolution / bounds.size.height)
bounds.size.height = bounds.size.height * scaleRatio
bounds.size.width = bounds.size.width * scaleRatio
}
var transform = CGAffineTransformIdentity
let orient = imageSource.imageOrientation
let imageSize = CGSizeMake(CGFloat(CGImageGetWidth(imgRef)), CGFloat(CGImageGetHeight(imgRef)))
switch(imageSource.imageOrientation) {
case .Up :
transform = CGAffineTransformIdentity
case .UpMirrored :
transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
case .Down :
transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
transform = CGAffineTransformRotate(transform, CGFloat(M_PI));
case .DownMirrored :
transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
transform = CGAffineTransformScale(transform, 1.0, -1.0);
case .Left :
let storedHeight = bounds.size.height
bounds.size.height = bounds.size.width;
bounds.size.width = storedHeight;
transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
transform = CGAffineTransformRotate(transform, 3.0 * CGFloat(M_PI) / 2.0);
case .LeftMirrored :
let storedHeight = bounds.size.height
bounds.size.height = bounds.size.width;
bounds.size.width = storedHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
transform = CGAffineTransformScale(transform, -1.0, 1.0);
transform = CGAffineTransformRotate(transform, 3.0 * CGFloat(M_PI) / 2.0);
case .Right :
let storedHeight = bounds.size.height
bounds.size.height = bounds.size.width;
bounds.size.width = storedHeight;
transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
transform = CGAffineTransformRotate(transform, CGFloat(M_PI) / 2.0);
case .RightMirrored :
let storedHeight = bounds.size.height
bounds.size.height = bounds.size.width;
bounds.size.width = storedHeight;
transform = CGAffineTransformMakeScale(-1.0, 1.0);
transform = CGAffineTransformRotate(transform, CGFloat(M_PI) / 2.0);
default : ()
}
UIGraphicsBeginImageContext(bounds.size)
let context = UIGraphicsGetCurrentContext()
if orient == .Right || orient == .Left {
CGContextScaleCTM(context, -scaleRatio, scaleRatio);
CGContextTranslateCTM(context, -height, 0);
} else {
CGContextScaleCTM(context, scaleRatio, -scaleRatio);
CGContextTranslateCTM(context, 0, -height);
}
CGContextConcatCTM(context, transform);
CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef);
let imageCopy = UIGraphicsGetImageFromCurrentImageContext();
UIGraphicsEndImageContext();
return imageCopy;
}

Related

Slow image uploading on s3 via iOS amazon sdk

Uploading images on multiple threads on s3 via amazon SDK takes a hell lot of time. Let me know if you are facing the same problem and found any solution.
Without your code, it is difficult to answer this. But as you said that it takes a lot of time to upload, I think your issue is with the size of the image. When you pick an image from an iPhone, the image quality is high. Its high resolution and file size is the reason for slow uploading. So before uploading to AWS bucket, compress the image and reduce the resolution as per your requirement.
When you pick the image,
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
let fileManager = FileManager.default
let documentsPath = fileManager.urls(for: .documentDirectory, in: .userDomainMask).first
let imagePath = documentsPath?.appendingPathComponent("image.jpg")
// extract image from the picker and save it
if let pickedImage = self.scale(image: (info[UIImagePickerController.InfoKey.originalImage] as? UIImage)!, toLessThan: 320) {
let imageData = pickedImage.jpegData(compressionQuality: 0.5)
try! imageData!.write(to: imagePath!)
DispatchQueue.main.async {
self.imgProfileImageView.image = pickedImage
}
}
picker.dismiss(animated: true) {
}
}
You can reduce the resolution by using this function
private func scale(image originalImage: UIImage, toLessThan maxResolution: CGFloat) -> UIImage? {
guard let imageReference = originalImage.cgImage else { return nil }
let rotate90 = CGFloat.pi/2.0 // Radians
let rotate180 = CGFloat.pi // Radians
let rotate270 = 3.0*CGFloat.pi/2.0 // Radians
let originalWidth = CGFloat(imageReference.width)
let originalHeight = CGFloat(imageReference.height)
let originalOrientation = originalImage.imageOrientation
var newWidth = originalWidth
var newHeight = originalHeight
if originalWidth > maxResolution || originalHeight > maxResolution {
let aspectRatio: CGFloat = originalWidth / originalHeight
newWidth = aspectRatio > 1 ? maxResolution : maxResolution * aspectRatio
newHeight = aspectRatio > 1 ? maxResolution / aspectRatio : maxResolution
}
let scaleRatio: CGFloat = newWidth / originalWidth
var scale: CGAffineTransform = .init(scaleX: scaleRatio, y: -scaleRatio)
scale = scale.translatedBy(x: 0.0, y: -originalHeight)
var rotateAndMirror: CGAffineTransform
switch originalOrientation {
case .up:
rotateAndMirror = .identity
case .upMirrored:
rotateAndMirror = .init(translationX: originalWidth, y: 0.0)
rotateAndMirror = rotateAndMirror.scaledBy(x: -1.0, y: 1.0)
case .down:
rotateAndMirror = .init(translationX: originalWidth, y: originalHeight)
rotateAndMirror = rotateAndMirror.rotated(by: rotate180 )
case .downMirrored:
rotateAndMirror = .init(translationX: 0.0, y: originalHeight)
rotateAndMirror = rotateAndMirror.scaledBy(x: 1.0, y: -1.0)
case .left:
(newWidth, newHeight) = (newHeight, newWidth)
rotateAndMirror = .init(translationX: 0.0, y: originalWidth)
rotateAndMirror = rotateAndMirror.rotated(by: rotate270)
scale = .init(scaleX: -scaleRatio, y: scaleRatio)
scale = scale.translatedBy(x: -originalHeight, y: 0.0)
case .leftMirrored:
(newWidth, newHeight) = (newHeight, newWidth)
rotateAndMirror = .init(translationX: originalHeight, y: originalWidth)
rotateAndMirror = rotateAndMirror.scaledBy(x: -1.0, y: 1.0)
rotateAndMirror = rotateAndMirror.rotated(by: rotate270)
case .right:
(newWidth, newHeight) = (newHeight, newWidth)
rotateAndMirror = .init(translationX: originalHeight, y: 0.0)
rotateAndMirror = rotateAndMirror.rotated(by: rotate90)
scale = .init(scaleX: -scaleRatio, y: scaleRatio)
scale = scale.translatedBy(x: -originalHeight, y: 0.0)
case .rightMirrored:
(newWidth, newHeight) = (newHeight, newWidth)
rotateAndMirror = .init(scaleX: -1.0, y: 1.0)
rotateAndMirror = rotateAndMirror.rotated(by: CGFloat.pi/2.0)
}
UIGraphicsBeginImageContext(CGSize(width: newWidth, height: newHeight))
guard let context = UIGraphicsGetCurrentContext() else { return nil }
context.concatenate(scale)
context.concatenate(rotateAndMirror)
context.draw(imageReference, in: CGRect(x: 0, y: 0, width: originalWidth, height: originalHeight))
let copy = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return copy
}

UIImage from camera is rotated 90 degrees - using extensions

When Image data is returned and applied to a UIImage, if the data comes from the camera then the image appears rotated 90 degrees.
I tried adding the
extension UIImage {
func correctlyOrientedImage() -> UIImage {
if self.imageOrientation == UIImageOrientation.up {
return self
}
UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale)
self.draw(in: CGRect(origin: CGPoint(x: 0,y :0), size: CGSize(width: self.size.width, height: self.size.height))
)
let normalizedImage:UIImage = UIGraphicsGetImageFromCurrentImageContext()!
UIGraphicsEndImageContext();
return normalizedImage;
}
}
in my code, I check to see if I have data saved for a specific user and if so I load the image data into profile_image, a UIImageView.
//profile_image
if let profile_imager = UserDefaults.standard.object(forKey: String(UserID)) as? Data {
let data = UserDefaults.standard.object(forKey: String(UserID)) as? Data
print("profile_imager: \(profile_imager)")
profile_image.image = UIImage(data: data!)
profile_image.backgroundColor = .white
}
How would I go about to use correctlyOrientedImage correctly
Thank you
Swift 4.2,
Created an UIImage extension to keep the image in the original position,
extension UIImage {
func fixedOrientation() -> UIImage {
if imageOrientation == .up {
return self
}
var transform: CGAffineTransform = CGAffineTransform.identity
switch imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: size.width, y: size.height)
transform = transform.rotated(by: CGFloat.pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: size.width, y: 0)
transform = transform.rotated(by: CGFloat.pi / 2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: size.height)
transform = transform.rotated(by: CGFloat.pi / -2)
case .up, .upMirrored:
break
}
switch imageOrientation {
case .upMirrored, .downMirrored:
transform.translatedBy(x: size.width, y: 0)
transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform.translatedBy(x: size.height, y: 0)
transform.scaledBy(x: -1, y: 1)
case .up, .down, .left, .right:
break
}
if let cgImage = self.cgImage, let colorSpace = cgImage.colorSpace,
let ctx: CGContext = CGContext(data: nil, width: Int(size.width), height: Int(size.height), bitsPerComponent: cgImage.bitsPerComponent, bytesPerRow: 0, space: colorSpace, bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue) {
ctx.concatenate(transform)
switch imageOrientation {
case .left, .leftMirrored, .right, .rightMirrored:
ctx.draw(cgImage, in: CGRect(x: 0, y: 0, width: size.height, height: size.width))
default:
ctx.draw(cgImage, in: CGRect(x: 0, y: 0, width: size.width, height: size.height))
}
if let ctxImage: CGImage = ctx.makeImage() {
return UIImage(cgImage: ctxImage)
} else {
return self
}
} else {
return self
}
}
}
Then set profile_image from fixedOrientation() method,
//profile_image
if let profile_imager = UserDefaults.standard.object(forKey: String(UserID)) as? Data {
let data = UserDefaults.standard.object(forKey: String(UserID)) as? Data
print("profile_imager: \(profile_imager)")
let actualImage = UIImage(data: data!)?.fixedOrientation()
profile_image.image = actualImage
profile_image.backgroundColor = .white
}

camera image resizing

I have an app I'm working on that has a function that allows the user to either take a photo or pick an image from the photo library. The code works but the image always displays larger than the UIImageView box. The box is set to "scale aspect fit" in the storyboard but the camera image refuses to conform.
Here's the code:
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
if let chosenImage = info[UIImagePickerControllerOriginalImage] as? UIImage{
imageField.image = chosenImage
} else{
//error message goes here
}
self.dismiss(animated: true, completion: nil)
}
How do I get chosenImage to conform to the box or to aspect fit?
Try this way please :
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
if let chosenImage = info[UIImagePickerControllerOriginalImage] as? UIImage{
imageField.image = chosenImage
imageField.contentMode = UIViewContentMode.scaleToFill
imageField.clipsToBounds = true
} else{
//error message goes here
}
self.dismiss(animated: true, completion: nil)
}
why ScaleAspectFill and clipsToBounds = true have look this question Why scale to fill give bigger image than UIImageVIew size? (using swift)
if you want to more learn then check this How to scale a UIImageView proportionally?
The below code will resize the image captured by camera which is compatible to swift 4.0.
It checks for EXIF property of image using UIImageOrientaiton and accordind to the value of orientation, it transforms & scales the image so you will get the same return image with same orientation as your camera view orientation.
func scaleAndRotateImage(image: UIImage, MaxResolution iIntMaxResolution: Int) -> UIImage {
let kMaxResolution = iIntMaxResolution
let imgRef = image.cgImage!
let width: CGFloat = CGFloat(imgRef.width)
let height: CGFloat = CGFloat(imgRef.height)
var transform = CGAffineTransform.identity
var bounds = CGRect.init(x: 0, y: 0, width: width, height: height)
if Int(width) > kMaxResolution || Int(height) > kMaxResolution {
let ratio: CGFloat = width / height
if ratio > 1 {
bounds.size.width = CGFloat(kMaxResolution)
bounds.size.height = bounds.size.width / ratio
}
else {
bounds.size.height = CGFloat(kMaxResolution)
bounds.size.width = bounds.size.height * ratio
}
}
let scaleRatio: CGFloat = bounds.size.width / width
let imageSize = CGSize.init(width: CGFloat(imgRef.width), height: CGFloat(imgRef.height))
var boundHeight: CGFloat
let orient = image.imageOrientation
// The output below is limited by 1 KB.
// Please Sign Up (Free!) to remove this limitation.
switch orient {
case .up:
//EXIF = 1
transform = CGAffineTransform.identity
case .upMirrored:
//EXIF = 2
transform = CGAffineTransform.init(translationX: imageSize.width, y: 0.0)
transform = transform.scaledBy(x: -1.0, y: 1.0)
case .down:
//EXIF = 3
transform = CGAffineTransform.init(translationX: imageSize.width, y: imageSize.height)
transform = transform.rotated(by: CGFloat(Double.pi / 2))
case .downMirrored:
//EXIF = 4
transform = CGAffineTransform.init(translationX: 0.0, y: imageSize.height)
transform = transform.scaledBy(x: 1.0, y: -1.0)
case .leftMirrored:
//EXIF = 5
boundHeight = bounds.size.height
bounds.size.height = bounds.size.width
bounds.size.width = boundHeight
transform = CGAffineTransform.init(translationX: imageSize.height, y: imageSize.width)
transform = transform.scaledBy(x: -1.0, y: 1.0)
transform = transform.rotated(by: CGFloat(Double.pi / 2) / 2.0)
break
default: print("Error in processing image")
}
UIGraphicsBeginImageContext(bounds.size)
let context = UIGraphicsGetCurrentContext()
if orient == .right || orient == .left {
context?.scaleBy(x: -scaleRatio, y: scaleRatio)
context?.translateBy(x: -height, y: 0)
}
else {
context?.scaleBy(x: scaleRatio, y: -scaleRatio)
context?.translateBy(x: 0, y: -height)
}
context?.concatenate(transform)
context?.draw(imgRef, in: CGRect.init(x: 0, y: 0, width: width, height: height))
let imageCopy = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return imageCopy!
}

Resizing Images using Core Graphics / Quartz 2D in Swift

I'd like to use Core Graphics to resize some images because I have an image heavy app. I'm using this method below:
func createContext(image: UIImage) -> UIImage? {
let cgImage = image.CGImage
let width = Int(image.size.width)
let height = Int(image.size.height)
let bitsPerComponent = CGImageGetBitsPerComponent(cgImage)
let bytesPerRow = CGImageGetBytesPerRow(cgImage)
let colorSpace = CGImageGetColorSpace(cgImage)
let bitmapInfo = CGImageGetBitmapInfo(cgImage)
let context = CGBitmapContextCreate(nil, width, height, bitsPerComponent, bytesPerRow, colorSpace, bitmapInfo.rawValue)
CGContextSetInterpolationQuality(context, .High)
CGContextDrawImage(context, CGRect(origin: CGPointZero, size: CGSize(width: image.size.width, height: image.size.height)), cgImage)
let scaledImage = CGBitmapContextCreateImage(context).flatMap { UIImage(CGImage: $0) }
return scaledImage
}
and the returned image looks great on landscape images, but for portrait images, it looks like this:
I can change the code to bring in UIKit elements:
func createContext(image: UIImage) -> UIImage? {
let cgImage = image.CGImage
let rect = CGRectMake(0, 0, image.size.width, image.size.height)
let size = image.size
let scale: CGFloat = 0.0
let width = CGImageGetWidth(cgImage) / 2
let height = CGImageGetHeight(cgImage) / 2
let bitsPerComponent = CGImageGetBitsPerComponent(cgImage)
let bytesPerRow = CGImageGetBytesPerRow(cgImage)
let colorSpace = CGImageGetColorSpace(cgImage)
let bitmapInfo = CGImageGetBitmapInfo(cgImage)
UIGraphicsBeginImageContextWithOptions(size, false, scale)
image.drawInRect(CGRect(origin: CGPointZero, size: size))
let context = CGBitmapContextCreate(nil, width, height, bitsPerComponent, bytesPerRow, colorSpace, bitmapInfo.rawValue)
CGContextSetInterpolationQuality(context, .High)
let myGeneratedImage = CGBitmapContextCreateImage(context)
CGContextDrawImage(context, rect, myGeneratedImage)
let scaledImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return scaledImage
}
and it looks like this, which is what I want.
However, I want to do my best to avoid bringing in UIKit and keep the code lower level by using only Core Graphics, can someone point out what I'm doing incorrectly? Feel free to reply in objC or Swift code if you're able to see my errors.
Here's the solution:
func fixOrientation() -> UIImage {
if imageOrientation == .Up {
return self
}
var transform = CGAffineTransformIdentity
switch imageOrientation {
case .Down, .DownMirrored:
transform = CGAffineTransformTranslate(transform, size.width, size.height)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
case .Left, .LeftMirrored:
transform = CGAffineTransformTranslate(transform, size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
case .Right, .RightMirrored:
transform = CGAffineTransformTranslate(transform, 0, size.height)
transform = CGAffineTransformRotate(transform, -CGFloat(M_PI_2))
case .Up, .UpMirrored:
break
}
switch imageOrientation {
case .UpMirrored, .DownMirrored:
transform = CGAffineTransformTranslate(transform, size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
case .LeftMirrored, .RightMirrored:
transform = CGAffineTransformTranslate(transform, size.height, 0)
transform = CGAffineTransformScale(transform, -1, 1)
case .Up, .Down, .Left, .Right:
break
}
let context = CGBitmapContextCreate(nil, Int(size.width), Int(size.height), CGImageGetBitsPerComponent(CGImage), 0, CGImageGetColorSpace(CGImage), CGImageGetBitmapInfo(CGImage).rawValue)
CGContextConcatCTM(context, transform)
switch imageOrientation {
case .Left, .LeftMirrored, .Right, .RightMirrored:
CGContextDrawImage(context, CGRectMake(0, 0, size.height, size.width), CGImage)
default:
CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), CGImage)
break
}
guard let cgImage = CGBitmapContextCreateImage(context) else {
return UIImage()
}
let image = UIImage(CGImage: cgImage)
return image
}

AVFoundation Image orientation off by 90 degrees in the preview but fine in Camera roll

Something really strange is happening, I am trying to capture an image using AVFoundation, the Camera roll image seems just fine, but the image preview has the image rotated by 90 degrees.
This is the code I am using to capture an image
AVCaptureConnection *videoConnection = nil;
for (AVCaptureConnection *connection in stillImageOutput.connections)
{
for (AVCaptureInputPort *port in [connection inputPorts])
{
if ([[port mediaType] isEqual:AVMediaTypeVideo] )
{
videoConnection = connection;
break;
}
}
if (videoConnection)
{
break;
}
}
//NSLog(#"about to request a capture from: %#", stillImageOutput);
[stillImageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler: ^(CMSampleBufferRef imageSampleBuffer, NSError *error)
{
CFDictionaryRef exifAttachments = CMGetAttachment( imageSampleBuffer, kCGImagePropertyExifDictionary, NULL);
if (exifAttachments)
{
// Do something with the attachments.
//NSLog(#"attachements: %#", exifAttachments);
} else {
NSLog(#"no attachments");
}
NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer];
UIImage *image = [[UIImage alloc] initWithData:imageData];
self.vImage.image = image;
UIImageWriteToSavedPhotosAlbum(image, nil, nil, nil);
}];
yes ,it happens when you capture image in Portrait orientation of your device and use that image in your application, because the default orientation of image is Landscape in any IOS Device, so you need to change the orientation of image after picking from Gallery to use in your app.
I have put code to achieve this
Objective-C code
- (UIImage *)fixOrientationOfImage:(UIImage *)image {
// No-op if the orientation is already correct
if (image.imageOrientation == UIImageOrientationUp) return image;
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
CGAffineTransform transform = CGAffineTransformIdentity;
switch (image.imageOrientation) {
case UIImageOrientationDown:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, image.size.height);
transform = CGAffineTransformRotate(transform, M_PI);
break;
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, 0);
transform = CGAffineTransformRotate(transform, M_PI_2);
break;
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, 0, image.size.height);
transform = CGAffineTransformRotate(transform, -M_PI_2);
break;
case UIImageOrientationUp:
case UIImageOrientationUpMirrored:
break;
}
switch (image.imageOrientation) {
case UIImageOrientationUpMirrored:
case UIImageOrientationDownMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
case UIImageOrientationLeftMirrored:
case UIImageOrientationRightMirrored:
transform = CGAffineTransformTranslate(transform, image.size.height, 0);
transform = CGAffineTransformScale(transform, -1, 1);
break;
case UIImageOrientationUp:
case UIImageOrientationDown:
case UIImageOrientationLeft:
case UIImageOrientationRight:
break;
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
CGContextRef ctx = CGBitmapContextCreate(NULL, image.size.width, image.size.height,
CGImageGetBitsPerComponent(image.CGImage), 0,
CGImageGetColorSpace(image.CGImage),
CGImageGetBitmapInfo(image.CGImage));
CGContextConcatCTM(ctx, transform);
switch (image.imageOrientation) {
case UIImageOrientationLeft:
case UIImageOrientationLeftMirrored:
case UIImageOrientationRight:
case UIImageOrientationRightMirrored:
// Grr...
CGContextDrawImage(ctx, CGRectMake(0,0,image.size.height,image.size.width), image.CGImage);
break;
default:
CGContextDrawImage(ctx, CGRectMake(0,0,image.size.width,image.size.height), image.CGImage);
break;
}
// And now we just create a new UIImage from the drawing context
CGImageRef cgimg = CGBitmapContextCreateImage(ctx);
UIImage *img = [UIImage imageWithCGImage:cgimg];
CGContextRelease(ctx);
CGImageRelease(cgimg);
return img;
}
Swift code
func fixOrientationOfImage(image: UIImage) -> UIImage? {
if image.imageOrientation == .Up {
return image
}
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
var transform = CGAffineTransformIdentity
switch image.imageOrientation {
case .Down, .DownMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, image.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
case .Left, .LeftMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
case .Right, .RightMirrored:
transform = CGAffineTransformTranslate(transform, 0, image.size.height)
transform = CGAffineTransformRotate(transform, -CGFloat(M_PI_2))
default:
break
}
switch image.imageOrientation {
case .UpMirrored, .DownMirrored:
transform = CGAffineTransformTranslate(transform, image.size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
case .LeftMirrored, .RightMirrored:
transform = CGAffineTransformTranslate(transform, image.size.height, 0)
transform = CGAffineTransformScale(transform, -1, 1)
default:
break
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
guard let context = CGBitmapContextCreate(nil, Int(image.size.width), Int(image.size.height), CGImageGetBitsPerComponent(image.CGImage), 0, CGImageGetColorSpace(image.CGImage), CGImageGetBitmapInfo(image.CGImage).rawValue) else {
return nil
}
CGContextConcatCTM(context, transform)
switch image.imageOrientation {
case .Left, .LeftMirrored, .Right, .RightMirrored:
CGContextDrawImage(context, CGRect(x: 0, y: 0, width: image.size.height, height: image.size.width), image.CGImage)
default:
CGContextDrawImage(context, CGRect(origin: .zero, size: image.size), image.CGImage)
}
// And now we just create a new UIImage from the drawing context
guard let CGImage = CGBitmapContextCreateImage(context) else {
return nil
}
return UIImage(CGImage: CGImage)
}
Swift 3.0
func fixOrientationOfImage(image: UIImage) -> UIImage? {
if image.imageOrientation == .up {
return image
}
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
var transform = CGAffineTransform.identity
switch image.imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: image.size.height)
transform = transform.rotated(by: CGFloat(Double.pi))
case .left, .leftMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.rotated(by: CGFloat(Double.pi / 2))
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: image.size.height)
transform = transform.rotated(by: -CGFloat(Double.pi / 2))
default:
break
}
switch image.imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: image.size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: image.size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
default:
break
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
guard let context = CGContext(data: nil, width: Int(image.size.width), height: Int(image.size.height), bitsPerComponent: image.cgImage!.bitsPerComponent, bytesPerRow: 0, space: image.cgImage!.colorSpace!, bitmapInfo: image.cgImage!.bitmapInfo.rawValue) else {
return nil
}
context.concatenate(transform)
switch image.imageOrientation {
case .left, .leftMirrored, .right, .rightMirrored:
context.draw(image.cgImage!, in: CGRect(x: 0, y: 0, width: image.size.height, height: image.size.width))
default:
context.draw(image.cgImage!, in: CGRect(origin: .zero, size: image.size))
}
// And now we just create a new UIImage from the drawing context
guard let CGImage = context.makeImage() else {
return nil
}
return UIImage(cgImage: CGImage)
}
Swift 5.5 +
you should set the orientation of the output before you capture the image.
// set the image orientation in output
if let photoOutputConnection = self.photoOutput.connection(with: .video) {
photoOutputConnection.videoOrientation = videoPreviewLayerOrientation!
}
self.photoOutput.capturePhoto(with: photoSettings, delegate: photoCaptureProcessor) // capture image
The accepted answer works, but is much more complicated than it needs to be. You can use the following to rotate the image.
- (UIImage *)cropImage:(UIImage*)image toRect:(CGRect)rect {
CGFloat (^rad)(CGFloat) = ^CGFloat(CGFloat deg) {
return deg / 180.0f * (CGFloat) M_PI;
};
// determine the orientation of the image and apply a transformation to the crop rectangle to shift it to the correct position
CGAffineTransform rectTransform;
switch (image.imageOrientation) {
case UIImageOrientationLeft:
rectTransform = CGAffineTransformTranslate(CGAffineTransformMakeRotation(rad(90)), 0, -image.size.height);
break;
case UIImageOrientationRight:
rectTransform = CGAffineTransformTranslate(CGAffineTransformMakeRotation(rad(-90)), -image.size.width, 0);
break;
case UIImageOrientationDown:
rectTransform = CGAffineTransformTranslate(CGAffineTransformMakeRotation(rad(-180)), -image.size.width, -image.size.height);
break;
default:
rectTransform = CGAffineTransformIdentity;
};
// adjust the transformation scale based on the image scale
rectTransform = CGAffineTransformScale(rectTransform, image.scale, image.scale);
// apply the transformation to the rect to create a new, shifted rect
CGRect transformedCropSquare = CGRectApplyAffineTransform(rect, rectTransform);
// use the rect to crop the image
CGImageRef imageRef = CGImageCreateWithImageInRect(image.CGImage, transformedCropSquare);
// create a new UIImage and set the scale and orientation appropriately
UIImage *result = [UIImage imageWithCGImage:imageRef scale:image.scale orientation:image.imageOrientation];
// memory cleanup
CGImageRelease(imageRef);
return result;
}
To just rotate the image and not crop, you can simply call it like this:
UIImage *image;
[self cropImage:image toRect:rect.bounds];
Dipen Panchasara's answer is great, but there could be one problem with it. When you process big images (for example from iPhoneX) there will be massive memory peak which can be an issue in some cases.
So, maybe you want to change that line:
context.draw(image.cgImage!, in: CGRect(x: 0, y: 0, width: image.size.height, height: image.size.width))
for something with some memory optimization. For example, that will break image drawing in 16 (4*4) steps, which decrease memory consumption significantly:
let partInAxis: CGFloat = 4
let partWidth = image.size.height/partInAxis
let partHeight = image.size.width/partInAxis
for i in 0...Int(partInAxis)-1 {
for j in 0...Int(partInAxis)-1 {
let partialImage = image.cgImage?.cropping(to: CGRect(x: CGFloat(i)*partWidth, y: CGFloat(j)*partHeight, width: partWidth, height: partHeight))
context.draw(partialImage!, in: CGRect(x: CGFloat(i)*partWidth, y: CGFloat(Int(partInAxis)-1-j)*partHeight, width: partWidth, height: partHeight))
}
}
Be aware, that in this case height and width of image must be divided by 4.
I'd recommend using Yodagama's answer, since it's a one-liner and much cleaner. However, if you need to correct the orientation in another way, here's Dipen's answer without forced optionals, as a UIImage extension. For better memory optimization, mix it with this answer.
extension UIImage {
var fixedOrientation: UIImage? {
if imageOrientation == .up {
return self
}
// We need to calculate the proper transformation to make the image upright.
// We do it in 2 steps: Rotate if Left/Right/Down, and then flip if Mirrored.
var transform = CGAffineTransform.identity
switch imageOrientation {
case .down, .downMirrored:
transform = transform.translatedBy(x: size.width, y: size.height)
transform = transform.rotated(by: .pi)
case .left, .leftMirrored:
transform = transform.translatedBy(x: size.width, y: 0)
transform = transform.rotated(by: .pi / 2)
case .right, .rightMirrored:
transform = transform.translatedBy(x: 0, y: size.height)
transform = transform.rotated(by: -.pi / 2)
default: break
}
switch imageOrientation {
case .upMirrored, .downMirrored:
transform = transform.translatedBy(x: size.width, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
case .leftMirrored, .rightMirrored:
transform = transform.translatedBy(x: size.height, y: 0)
transform = transform.scaledBy(x: -1, y: 1)
default: break
}
guard let cgImage = cgImage,
let colorSpace = cgImage.colorSpace else {
return nil
}
// Now we draw the underlying CGImage into a new context, applying the transform
// calculated above.
let context = CGContext(
data: nil,
width: Int(size.width),
height: Int(size.height),
bitsPerComponent: cgImage.bitsPerComponent,
bytesPerRow: 0,
space: colorSpace,
bitmapInfo: cgImage.bitmapInfo.rawValue
)
guard let context = context else {
return nil
}
context.concatenate(transform)
switch imageOrientation {
case .left, .leftMirrored, .right, .rightMirrored:
context.draw(cgImage, in: .init(x: 0, y: 0, width: size.height, height: size.width))
default:
context.draw(cgImage, in: .init(origin: .zero, size: size))
}
// And now we just create a new UIImage from the drawing context
guard let newCgImage = context.makeImage() else {
return nil
}
return .init(cgImage: newCgImage)
}
}

Resources