How to add image as title in UIalertController? - ios

I am new to iOS, I included image as title and message, both are displaying but message not properly populate. How to set message exactly below of title image, I tried but not happening. Please can anyone help me?
called in func:
CommonMethods.alertWithImage(msg:"Allow XXX to access your device's location?",
Icon: UIImage(named: "loc.png")!,
fromView: self)
Here is my code:
let alrt = UIAlertController(title:nil, message: msg, preferredStyle:.actionSheet)
alrt.setMessage(font: UIFont(name: "Poppins-medium", size: 20), color: UIColor.white)
let btn1 = UIAlertAction(title: "While using the app.", style: .default,handler: nil)
let btn2 = UIAlertAction(title: "Only this time.", style: .default,handler: nil)
let btn3 = UIAlertAction(title: "Deny", style: .default,handler: nil)
alrt.addAction(btn1)
alrt.addAction(btn2)
alrt.addAction(btn3)
alrt.view.tintColor = UIColor.blue
let subview = (alrt.view.subviews.first?.subviews.first?.subviews.first!)! as UIView
alrt.view.subviews.first?.subviews.first?.subviews.first?.backgroundColor = UIColor.black
let imageView = UIImageView(frame: CGRect(x: 180, y: 0, width: 35, height: 35))
imageView.image = Icon
alrt.view.addSubview(imageView)
fromView.present(alrt, animated: true, completion: nil)

as #Larme said, you can use a custom one ( a third party lib ).
Here is the solution of customize UIAlertController
class Alert: UIAlertController{
let offset: CGFloat = 20
override func viewDidLayoutSubviews() {
super.viewDidLayoutSubviews()
if let title = getLabel(for: view){
var f = title.frame
f.origin = CGPoint(x: f.origin.x, y: f.origin.y - offset)
title.frame = f
}
}
func getLabel(for v: UIView?) -> UILabel?{
guard let vue = v else{ return nil }
var result: UILabel?
for sub in vue.subviews{
sub.clipsToBounds = false
if sub.isKind(of: UILabel.self){
return sub as? UILabel
}
else {
result = getLabel(for: sub)
if result != nil{
return result
}
}
}
return result
}
}
use like this:
let alrt = Alert(title:nil, message: msg, preferredStyle:.actionSheet)

Related

TextClassification/ Extraction from image How to get single text frame and string Using Core ML from a Image

Need to mark the rec boxes around string and then to get that string after tapping
import UIKit
import Vision
class ViewController: UIViewController, ImageGet {
//MARK: OUTLETS
#IBOutlet weak var selectButton: UIButton!
//MARK: VARIABLES
var objU = UtilityClass()
var image:UIImage?
var str:String?
var uiButton : UIButton?
var arrayString = [String]()
var imageView : UIImageView = UIImageView()
//MARK: DELEGATE FUNCTION
func img(image: UIImage) {
self.image = image
imageView.image = image
setUp()
}
override func viewDidLoad() {
super.viewDidLoad()
imageView.isUserInteractionEnabled = true
// Do any additional setup after loading the view.
}
//MARK: SETUPUI
func setUp() {
let realImg = resizeImage(image: (imageView.image!) , targetSize:CGSize(width: view.frame.width, height: view.frame.height) )
self.image = realImg
self.imageView .image = self.image
imageView.isUserInteractionEnabled = true
self.imageView.frame = CGRect(x: 0, y: 0, width: realImg.size.width, height: realImg.size.height)
view.addSubview(imageView)
guard let cgimg = realImg.cgImage else {return}
let requestHandler = VNImageRequestHandler(cgImage: cgimg)
let req = VNRecognizeTextRequest(completionHandler: recognizeTextHandler)
req.recognitionLevel = .accurate
do {
try requestHandler.perform([req])
} catch {
print("Unable to perform the request: \(error)")
}
}
//MARK: SELECT THE IMAGE
#IBAction func selectButtontapped(_ sender: Any) {
objU.delegate = self
objU.obj = self
objU.ImageGet()
}
func recognizeTextHandler(request : VNRequest , error:Error?) {
guard let observation = request.results as? [VNRecognizedTextObservation], error == nil else {
return
}
_ = observation.compactMap({
$0.topCandidates(1).first?.string
}).joined(separator: "/n")
for subView in imageView.subviews {
subView.removeFromSuperview()
}
let boundingRect :[CGRect] = observation.compactMap{
observation in
guard let candidate = observation.topCandidates(1).first else {return .zero}
//find the bounding box observation
let stringRange = candidate.string.startIndex..<candidate.string.endIndex
let boxObservation = try? candidate.boundingBox(for: stringRange)
let boundingBox = boxObservation?.boundingBox ?? .zero
str = candidate.string
self.arrayString.append(str!)
let rectInImg = VNImageRectForNormalizedRect(boundingBox, Int((imageView.frame.size.width)), Int((imageView.frame.size.height)))
let convertedRect = self.getConvertedRect(boundingBox: observation.boundingBox, inImage:image!.size , containedIn: (imageView.bounds.size))
drawBoundBox(rect: convertedRect)
return rectInImg
}
print(arrayString)
print(boundingRect)
}
func drawBoundBox(rect: CGRect) {
uiButton = UIButton(type: .custom)
uiButton?.frame = rect
uiButton?.layer.borderColor = UIColor.systemPink.cgColor
uiButton?.setTitle("", for: .normal)
uiButton?.layer.borderWidth = 2
uiButton?.tag = arrayString.count
imageView.addSubview(uiButton ?? UIButton())
uiButton?.addTarget(self, action: #selector(pressed(_:)), for: .touchUpInside)
}
#objc func pressed(_ sender : UIButton) {
alert(key: arrayString[sender.tag - 1])
}
//MARK: CONVERT THE NORMALISED BOUNDING RECT
func getConvertedRect(boundingBox: CGRect, inImage imageSize: CGSize, containedIn containerSize: CGSize) -> CGRect {
let rectOfImage: CGRect
let imageAspect = imageSize.width / imageSize.height
let containerAspect = containerSize.width / containerSize.height
if imageAspect > containerAspect { /// image extends left and right
let newImageWidth = containerSize.height * imageAspect /// the width of the overflowing image
let newX = -(newImageWidth - containerSize.width) / 2
rectOfImage = CGRect(x: newX, y: 0, width: newImageWidth, height: containerSize.height)
} else { /// image extends top and bottom
let newImageHeight = containerSize.width * (1 / imageAspect) /// the width of the overflowing image
let newY = -(newImageHeight - containerSize.height) / 2
rectOfImage = CGRect(x: 0, y: newY, width: containerSize.width, height: newImageHeight)
}
let newOriginBoundingBox = CGRect(
x: boundingBox.origin.x,
y: 1 - boundingBox.origin.y - boundingBox.height,
width: boundingBox.width,
height: boundingBox.height
)
var convertedRect = VNImageRectForNormalizedRect(newOriginBoundingBox, Int(rectOfImage.width), Int(rectOfImage.height))
/// add the margins
convertedRect.origin.x += rectOfImage.origin.x
convertedRect.origin.y += rectOfImage.origin.y
return convertedRect
}
//MARK: RESIZE THE IMAGE ACCORD TO DEVICE
func resizeImage(image: UIImage, targetSize: CGSize) -> UIImage {
let size = image.size
let widthRatio = targetSize.width / image.size.width
let heightRatio = targetSize.height / image.size.height
// Figure out what our orientation is, and use that to form the rectangle
var newSize: CGSize
if(widthRatio > heightRatio) {
newSize = CGSize(width: size.width * heightRatio, height: size.height * heightRatio)
} else {
newSize = CGSize(width: size.width * widthRatio, height: size.height * widthRatio)
}
// This is the rect that we've calculated out and this is what is actually used below
let rect = CGRect(x: 0, y: 0, width: newSize.width, height: newSize.height)
// Actually do the resizing to the rect using the ImageContext stuff
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
image.draw(in: rect)
let newImage = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
return newImage!
}
//MARK: POPPING ALERT WITH STRING
func alert(key:String){
let alertController = UIAlertController(title: "String", message: key, preferredStyle: .alert)
let OKAction = UIAlertAction(title: "OK", style: .default) {
(action: UIAlertAction!) in
// Code in this block will trigger when OK button tapped.
}
let copyAction = UIAlertAction(title: "Copy", style: .default) {
(action: UIAlertAction!) in
UIPasteboard.general.string = key
}
alertController.addAction(copyAction)
alertController.addAction(OKAction)
self.present(alertController, animated: true, completion: nil)
}
}

Image/Video sharing on iOS, preparing alert progress box, what component is this in Swift library?

I always see apps that provide this "Preparing" screen to show its progress whenever I share a big video. I know Swift has UIAlertController that can show a popup, but it's bare so it leads to think that this Preparing alert box gotta be an Apple built-in framework because many other apps have the exact same UI when sharing a large asset. Anyone know what this alert box is call?
Preparing screen
There is no default dialog box to display Progress HUD. But, you can customise the UIAlertController. Even others also will do the same.
#IBAction func showPopover(_ sender: UIBarButtonItem) {
let alertTitle = "Preparing...\n\n\n\n\n"
let alertController = UIAlertController(title: alertTitle, message: nil, preferredStyle: .alert)
let rect = CGRect(x: 10, y: 50, width: 250, height: 110)
let customView = UIView(frame: rect)
alertController.view.addSubview(customView)
let progressLabel = ProgressLabel()
let kLabelSize = 60.0
progressLabel.frame = CGRect(origin: CGPoint(x: (customView.frame.width/2.0)-(kLabelSize/2.0), y: (customView.frame.height/2.0)-(kLabelSize/2.0)), size: CGSize(width: kLabelSize, height: kLabelSize))
progressLabel.font = .systemFont(ofSize: 12)
progressLabel.textAlignment = .center
progressLabel.layer.cornerRadius = 30
progressLabel.layer.borderColor = UIColor.gray.cgColor
progressLabel.layer.borderWidth = 3
progressLabel.layer.masksToBounds = true
customView.addSubview(progressLabel)
let cancelAction = UIAlertAction(title: "Cancel", style: .cancel, handler: {(alert: UIAlertAction!) in print("cancel")})
alertController.addAction(cancelAction)
self.present(alertController, animated: true, completion:{})
updateProgress(progressLabel)
}
var progress = 1.0
func updateProgress(_ label: ProgressLabel) {
DispatchQueue.main.asyncAfter(deadline: .now()+1) {
label.text = "\(self.progress)%"
self.progress += 1.0
if self.progress <= 100.0 {
self.updateProgress(label)
}
}
}
}
class ProgressLabel: UILabel {
var value: CGFloat = 0.0
// Create your own circle shape layer to render the progress
private var progressLayer: CAShapeLayer!
override func draw(_ rect: CGRect) {
super.draw(rect)
}
}
Note:
Here, you have to take consideration for y value of customview based on the AlertController's title and message text size.

Swift 3: Can you save the position of a cropped image and access it later when the full image is displayed?

Language: Swift 3
Task: Allow users to crop their profile image on upload
Question: Initially I was going to try and figure out to save the cropped image and the full image to my server because I need both, then came up with the question... Is there a way to save the cropped position of the full image to my mysql database(php) then access it when I need to display the cropped image?
I'm using
self.imagePicker.allowsEditing = true
To allow the users to crop photo
let chooseAction = UIAlertAction(title: "Choose Image", style: .default, handler: { (alert: UIAlertAction!) -> Void in
if UIImagePickerController.isSourceTypeAvailable(.savedPhotosAlbum) {
self.imagePicker.delegate = self
self.imagePicker.sourceType = .savedPhotosAlbum
self.imagePicker.allowsEditing = true
self.present(self.imagePicker, animated: true, completion: nil) }
})
let takeAction = UIAlertAction(title: "Take Image", style: .default, handler: { (alert: UIAlertAction!) -> Void in
if UIImagePickerController.isSourceTypeAvailable(.camera) {
self.imagePicker.delegate = self
self.imagePicker.sourceType = .camera
self.imagePicker.allowsEditing = true
self.present(self.imagePicker, animated: true, completion: nil) }
})
let cancelAction = UIAlertAction(title: "Cancel", style: .cancel, handler: { (alert: UIAlertAction!) -> Void in })
optionMenu.addAction(chooseAction)
optionMenu.addAction(takeAction)
optionMenu.addAction(cancelAction)
self.present(optionMenu, animated: true, completion: nil)
}
EDIT
I am saving the full image to server
I need the simplest way to get the cropped square position if possible then store the position in a variable (I will store this on my server)
On certain views when the image is displayed I only want to show the cropped part of the full image. I will retrieve the saved cropped position set on upload then only show that part of the image.
I decided to try this out since there are a few issues that can occur doing this. I made this example:
class ViewController: UIViewController {
private let scrollViewPanel: HitForwardingView = HitForwardingView()
private let scrollView: UIScrollView = UIScrollView()
private let imageView: UIImageView = UIImageView()
private var image: UIImage?
override func viewDidLoad() {
super.viewDidLoad()
scrollViewPanel.frame = view.bounds
scrollViewPanel.listenerView = scrollView
view.addSubview(scrollViewPanel)
scrollView.frame = CGRect(x: 0.0, y: 0.0, width: 200.0, height: 200.0)
scrollView.center = CGPoint(x: scrollViewPanel.bounds.midX, y: scrollViewPanel.bounds.midY)
scrollViewPanel.addSubview(scrollView)
scrollView.clipsToBounds = false
scrollView.delegate = self
scrollView.minimumZoomScale = 0.2
scrollView.maximumZoomScale = 5.0
if let image = UIImage(named: "testImage") {
setupWithImage(image)
scrollToCenter()
}
// Just some masking to make things look nicer
scrollViewPanel.addSubview({
let view = UIView(frame: CGRect(x: 0.0, y: 0.0, width: scrollViewPanel.bounds.width, height: scrollView.frame.minY))
view.isUserInteractionEnabled = false
view.backgroundColor = UIColor.black.withAlphaComponent(0.6)
return view
}())
scrollViewPanel.addSubview({
let view = UIView(frame: CGRect(x: 0.0, y: scrollView.frame.minY, width: scrollView.frame.minX, height: scrollView.frame.height))
view.isUserInteractionEnabled = false
view.backgroundColor = UIColor.black.withAlphaComponent(0.6)
return view
}())
scrollViewPanel.addSubview({
let view = UIView(frame: CGRect(x: scrollView.frame.maxX, y: scrollView.frame.minY, width: scrollViewPanel.frame.width - scrollView.frame.maxX, height: scrollView.frame.height))
view.isUserInteractionEnabled = false
view.backgroundColor = UIColor.black.withAlphaComponent(0.6)
return view
}())
scrollViewPanel.addSubview({
let view = UIView(frame: CGRect(x: 0.0, y: scrollView.frame.maxY, width: scrollViewPanel.bounds.width, height: scrollViewPanel.frame.height - scrollView.frame.maxY))
view.isUserInteractionEnabled = false
view.backgroundColor = UIColor.black.withAlphaComponent(0.6)
return view
}())
// Add a trigger to show crop
view.addGestureRecognizer({
let recognizer = UITapGestureRecognizer(target: self, action: #selector(debugSnapshot))
recognizer.numberOfTapsRequired = 2
return recognizer
}())
}
#objc private func debugSnapshot() {
// Going to use relative coordinates depending on the original image. The result should be a frame normalized depending on original:
// x=0 is left-most
// x=1 is right-most
// y=0 is top-most
// y=1 is bottom-most
let convertedScrollViewFrame = scrollView.convert(scrollView.bounds, to: scrollViewPanel)
let convertedImageViewFrame = imageView.convert(imageView.bounds, to: scrollViewPanel)
var frame = convertedScrollViewFrame
frame.origin.x -= convertedImageViewFrame.origin.x
frame.origin.y -= convertedImageViewFrame.origin.y
frame.origin.x /= convertedImageViewFrame.width
frame.origin.y /= convertedImageViewFrame.height
frame.size.width /= convertedImageViewFrame.width
frame.size.height /= convertedImageViewFrame.height
print(frame)
// Do a reconstruction
let previewPanelView = UIView(frame: self.view.bounds)
previewPanelView.backgroundColor = UIColor.black.withAlphaComponent(0.8)
previewPanelView.addSubview({
let panel = UIView(frame: CGRect(x: 0.0, y: 0.0, width: 300.0, height: 300.0))
panel.center = CGPoint(x: previewPanelView.bounds.midX, y: previewPanelView.bounds.midY)
panel.clipsToBounds = true
let imageView = UIImageView(image: self.imageView.image)
// Frame now relative to where we display it (panel)
imageView.frame = CGRect(x: -frame.origin.x * panel.bounds.width / frame.size.width,
y: -frame.origin.y * panel.bounds.height / frame.size.height,
width: panel.bounds.width / frame.size.width,
height: panel.bounds.height / frame.size.height)
panel.addSubview(imageView)
return panel
}())
view.addSubview(previewPanelView)
DispatchQueue.main.asyncAfter(deadline: .now() + 2.0) {
previewPanelView.removeFromSuperview()
}
}
private func setupWithImage(_ image: UIImage) {
imageView.image = image
imageView.frame = CGRect(x: 0.0, y: 0.0, width: image.size.width, height: image.size.height)
scrollView.addSubview(imageView)
scrollView.contentSize = imageView.frame.size
}
private func scrollToCenter() {
scrollView.zoomScale = 1.0
scrollView.contentOffset = CGPoint(x: scrollView.contentSize.width*0.5 - scrollView.bounds.size.width*0.5,
y: scrollView.contentSize.height*0.5 - scrollView.bounds.size.height*0.5)
}
}
extension ViewController: UIScrollViewDelegate {
func viewForZooming(in scrollView: UIScrollView) -> UIView? {
return imageView
}
}
private extension ViewController {
class HitForwardingView: UIView {
weak var listenerView: UIView?
override func hitTest(_ point: CGPoint, with event: UIEvent?) -> UIView? {
let original = super.hitTest(point, with: event)
if (original == self || original == nil) && self.bounds.contains(point) {
return listenerView
} else {
return original
}
}
}
}
To make it work you can simply create a new project and copy this code into your ViewController. You need a test image as well named "testImage". You can scroll your image around, zoom it in or out. If you double-tap a new overlay will appear which should show a cropped image.
Explanations:
The whole view controller is done in code just to make it easier to explain. In reality all the views would be done in storyboard with constraints.
Because we use a "small" scroll view we need to forward touch events to the scroll view even when you drag outside of it. To do so HitForwardingView is introduced. Basically it is collecting events and forwarding them to the scroll view.
Both "position" construction and deconstruction is demonstrated in debugSnapshot. To your backend you would send data from received frame in this method. You would then later use this same frame from backend to position image correctly to mimic cropped image.
I hope the rest is pretty much self-explanatory from the code. If there are any questions about it please go ahead.

Annotation(Image & Text) subviews changes position after saving

I have created a PDF based application. Firstly, I get the PDF from document directory then convert it to Images and then programmatically set all images to vertically by using UIImageView & ScrollView.
Then I set different type of annotation like sign, text etc. When I finalised it to images and convert them to PDF with good result. I get different results as shown in below images.
Screen 1: convert original image size to UIImageView Size with aspect to fit ( because i have look like this type of screen) when i have convert original size to screen size at that time i have stored image original size and its used to when i have convert images to pdf after add annotation looking in screen 2.
Screen 2. when i have finalise or convert to pdf. result look in screen 2, change annotation or sign position.
let drawImage = jotViewController.renderImage() //sign image
signImg = drawImage
gestureView = UIView(frame: CGRect(x: 50, y: 300, width: 200, height: 110))
counter = counter + 1
gestureView.tag = counter
annotationImg = UIImageView(frame: CGRect(x: 0, y: 0, width:170, height: 80))
annotationImg.image = signImg
annotationImg.layer.cornerRadius = 5.0
// gestureView.frame = annotationImg.frame
cancelBtn.frame = CGRect(x: (annotationImg.frame.minX-10), y: (annotationImg.frame.maxY-10), width: 20, height: 20)
cancelBtn.setImage(#imageLiteral(resourceName: "cancel.png"), for: .normal)
cancelBtn.addTarget(self, action: #selector(cancelBtnAction), for: .touchUpInside)
gestureView.addSubview(cancelBtn)
zoomBtn.frame = CGRect(x: (annotationImg.frame.width-10), y: (annotationImg.frame.height-10), width: 20, height: 20)
zoomBtn.setImage(#imageLiteral(resourceName: "zoom.png"), for: .normal)
gestureView.addSubview(zoomBtn)
let zoomBtnpanGesture: UIPanGestureRecognizer = UIPanGestureRecognizer(target: self, action: #selector(ViewController.zoomBtnPanGesture(_:)))
zoomBtnpanGesture.minimumNumberOfTouches = 1
zoomBtnpanGesture.maximumNumberOfTouches = 1
zoomBtn.addGestureRecognizer(zoomBtnpanGesture)
let panGesture = UIPanGestureRecognizer(target: self, action: #selector(self.handlePanGesture))
panGesture.minimumNumberOfTouches = 1
panGesture.maximumNumberOfTouches = 1
annotationImg.backgroundColor = UIColor.lightGray.withAlphaComponent(0.5)
gestureView.addGestureRecognizer(panGesture)
annotationImg.clipsToBounds = true
gestureView.addSubview(annotationImg)
for getUIImageView in pdfUIImageViewArr {
if tag==0 {
let alert = UIAlertController(title: "Alert!!", message: "Please Select Signature Page.", preferredStyle: .alert)
alert.addAction(UIAlertAction(title: "OK", style: .default, handler: nil))
present(alert, animated: true, completion: nil)
}
if getUIImageView.tag == tag && tag != 0 {
getUIImageView.addSubview(gestureView)
}
}
let tapGesture = UITapGestureRecognizer(target: self, action: #selector(gestureViewTapped(sender:)))
tapGesture.numberOfTapsRequired = 1
gestureView.addGestureRecognizer(tapGesture)
imageViewArr.append(annotationImg)
gestureViewArr.append(gestureView)
UIDevice.current.setValue(UIInterfaceOrientation.portrait.rawValue, forKey: "orientation")
#IBAction func storeAsPdfAction(_ sender: AnyObject) {
self.storeToDocumentDir()
let userObj = self.storyboard?.instantiateViewController(withIdentifier: "DocumentVC") as? DocumentVC
self.navigationController?.pushViewController(userObj!, animated: true)
}
func storeToDocumentDir(){
print(pdfImage.frame)
print(view.frame)
print(gestureView.frame)
for getTextViewLbl in textViewLabelArr{
getTextViewLbl.backgroundColor = UIColor.clear
}
gestureViewArr.removeFirst()
for gestureView in gestureViewArr {
print(gestureView.frame)
zoomBtn.isHidden = true
cancelBtn.isHidden = true
// pdfImage.addSubview(gestureView)
}
pdfUIImageViewArr.remove(at: 0)
var i = 1
for getPDFImage in pdfUIImageViewArr {
getPDFImage.frame = CGRect(x: 0, y: 0, width: pdfImage.frame.size.width, height: pdfImage.frame.size.height)
getPDFImage.frame = CGRect(x: 0, y: 0, width: pageRect.size.width, height: pageRect.size.height)
getPDFImage.contentMode = UIViewContentMode.scaleToFill
//
let getImage = self.imageWithView(getPDFImage)
let fileManager = FileManager.default
let paths = (NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true)[0] as NSString).appendingPathComponent("img\(i).jpg")
i += 1
let image = getImage
print(paths)
let imageData = UIImageJPEGRepresentation(image, 0.5)
fileManager.createFile(atPath: paths as String, contents: imageData, attributes: nil)
setPdfImgArr.append(getImage)
}
self.createPdfFromView(pdfImage, imageArrAdd: setPdfImgArr, saveToDocumentsWithFileName: "\((setFileName)!)")
let pdfUrl = URL(fileURLWithPath:documentsFileName)
self.setImagesInScrollView(pdfUrl)
}
Problem Solved :
By Using UIScrollView Zoom:
override func viewWillLayoutSubviews() {
self.configureZoomScale()
}
func configureZoomScale() {
var xZoomScale: CGFloat = self.scrollView.bounds.size.width / self.pdfImage.bounds.size.width
var yZoomScale: CGFloat = self.scrollView.bounds.size.height / self.pdfImage.bounds.size.height
var minZoomScale: CGFloat = min(xZoomScale, yZoomScale)
//set minimumZoomScale to the minimum zoom scale we calculated
//this mean that the image cant me smaller than full screen
self.scrollView.minimumZoomScale = minZoomScale
//allow up to 4x zoom
self.scrollView.maximumZoomScale = 4
//set the starting zoom scale
self.scrollView.zoomScale = minZoomScale
}
func viewForZooming(in scrollView: UIScrollView) -> UIView?{
return pdfFinalImage
}

Image from View larger than device screen

I have a UIView that I created in Storyboard that I want to use in order to print a form, I have instantiated it and then i call the View and then everything goes to work, however, when this happens on a smaller screened item, such as an iPhone it cuts it off where the screen is. I have no idea where I need to adjust either the size or what in order to avoid this. Right now this is what I'm using to scale it up so I have a high quality (This issue was happening before I added the scale for quality). Should I be setting the size when I instantiate it?
func generatePDF() {
let pdfData = NSMutableData()
let bounds = self.view.bounds
UIGraphicsBeginPDFContextToData(pdfData, bounds, nil)
UIGraphicsBeginPDFPage()
guard let pdfContext = UIGraphicsGetCurrentContext() else { return }
let rescale : CGFloat = 4
func scaler(v: UIView) {
if !v.isKindOfClass(UIStackView.self) {
v.contentScaleFactor = 8
}
for sv in v.subviews {
scaler(sv)
}
}
scaler(pdfView)
let bigSize = CGSize(width: pdfView.frame.size.width*rescale, height: pdfView.frame.size.width*rescale)
UIGraphicsBeginImageContextWithOptions(bigSize, true, 1)
let context = UIGraphicsGetCurrentContext()!
CGContextSetFillColorWithColor(context, UIColor.whiteColor().CGColor)
CGContextFillRect(context, CGRect(origin: CGPoint(x: 0, y: 0), size: bigSize))
CGContextScaleCTM(context, rescale, rescale)
pdfView.layer.renderInContext(context)
let image = UIGraphicsGetImageFromCurrentImageContext()
UIGraphicsEndImageContext()
CGContextSaveGState(pdfContext)
CGContextTranslateCTM(pdfContext, pdfView.frame.origin.x, pdfView.frame.origin.y)
CGContextScaleCTM(pdfContext, 1/rescale, 1/rescale)
let frame = CGRect(origin: CGPoint(x: 0, y: 0), size: bigSize)
image!.drawInRect(frame)
CGContextRestoreGState(pdfContext)
let info : UIPrintInfo = UIPrintInfo(dictionary: nil)
info.orientation = UIPrintInfoOrientation.Portrait
info.outputType = UIPrintInfoOutputType.Grayscale
info.jobName = "Work Order"
if NSUserDefaults.standardUserDefaults().URLForKey("printer") != nil {
let printer = UIPrinter(URL: NSUserDefaults.standardUserDefaults().URLForKey("printer")!)
printer.contactPrinter { (available) in
if available {
let printInteraction = UIPrintInteractionController.sharedPrintController()
printInteraction.printingItem = image
printInteraction.printInfo = info
printInteraction.printToPrinter(printer, completionHandler: { (printerController, completed, error) in
if completed {
let alert = UIAlertController(title: "Printed!", message: nil, preferredStyle: .Alert)
self.presentViewController(alert, animated: true, completion: nil)
let delay = 1.0 * Double(NSEC_PER_SEC)
let time = dispatch_time(DISPATCH_TIME_NOW, Int64(delay))
dispatch_after(time, dispatch_get_main_queue(), {
alert.dismissViewControllerAnimated(true, completion: {
self.dismissViewControllerAnimated(false, completion: nil)
})
})
}
})
}
}
} else {
self.dismissViewControllerAnimated(false, completion: nil)
let alert = UIAlertController(title: "Select Printer", message: "\nPlease select a printer in the More section and then try your print again", preferredStyle: .Alert)
let okayButton = UIAlertAction(title: "Okay", style: .Default, handler: nil)
alert.addAction(okayButton)
UIApplication.sharedApplication().keyWindow?.rootViewController?.presentViewController(alert, animated: true, completion: nil)
}
}
Try setting the boundaries of the UIView to be the boundaries of the screen like this:
let screenSize: CGRect = UIScreen.main.bounds
view.frame = CGRect(x: 0.0, y: 0.0, width: screenSize.width, height: screenSize.height)

Resources